* Add logging setup * Fix fusing layers message * Fix logging does not have end * Add logging * Change logging to use logger * Update yolo.py I tried this in a cloned branch, and everything seems to work fine * Update yolo.py Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>5.0
@@ -13,7 +13,8 @@ from numpy import random | |||
from models.experimental import attempt_load | |||
from utils.datasets import LoadStreams, LoadImages | |||
from utils.general import ( | |||
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer) | |||
check_img_size, non_max_suppression, apply_classifier, scale_coords, | |||
xyxy2xywh, plot_one_box, strip_optimizer, set_logging) | |||
from utils.torch_utils import select_device, load_classifier, time_synchronized | |||
@@ -23,6 +24,7 @@ def detect(save_img=False): | |||
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt') | |||
# Initialize | |||
set_logging() | |||
device = select_device(opt.device) | |||
if os.path.exists(out): | |||
shutil.rmtree(out) # delete output folder |
@@ -9,6 +9,7 @@ import argparse | |||
import torch | |||
from utils.google_utils import attempt_download | |||
from utils.general import set_logging | |||
if __name__ == '__main__': | |||
parser = argparse.ArgumentParser() | |||
@@ -18,6 +19,7 @@ if __name__ == '__main__': | |||
opt = parser.parse_args() | |||
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand | |||
print(opt) | |||
set_logging() | |||
# Input | |||
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection |
@@ -9,7 +9,7 @@ import torch.nn as nn | |||
from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat | |||
from models.experimental import MixConv2d, CrossConv, C3 | |||
from utils.general import check_anchor_order, make_divisible, check_file | |||
from utils.general import check_anchor_order, make_divisible, check_file, set_logging | |||
from utils.torch_utils import ( | |||
time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device) | |||
@@ -156,7 +156,7 @@ class Model(nn.Module): | |||
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights | |||
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers | |||
print('Fusing layers... ', end='') | |||
print('Fusing layers... ') | |||
for m in self.model.modules(): | |||
if type(m) is Conv: | |||
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability | |||
@@ -239,6 +239,7 @@ if __name__ == '__main__': | |||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | |||
opt = parser.parse_args() | |||
opt.cfg = check_file(opt.cfg) # check file | |||
set_logging() | |||
device = select_device(opt.device) | |||
# Create model |
@@ -13,8 +13,8 @@ from tqdm import tqdm | |||
from models.experimental import attempt_load | |||
from utils.datasets import create_dataloader | |||
from utils.general import ( | |||
coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, | |||
scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class) | |||
coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, scale_coords, | |||
xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class, set_logging) | |||
from utils.torch_utils import select_device, time_synchronized | |||
@@ -39,6 +39,7 @@ def test(data, | |||
device = next(model.parameters()).device # get model device | |||
else: # called directly | |||
set_logging() | |||
device = select_device(opt.device, batch_size=batch_size) | |||
merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels | |||
if save_txt: |
@@ -71,7 +71,7 @@ def train(hyp, opt, device, tb_writer=None): | |||
state_dict = ckpt['model'].float().state_dict() # to FP32 | |||
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect | |||
model.load_state_dict(state_dict, strict=False) # load | |||
logging.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report | |||
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report | |||
else: | |||
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create | |||
@@ -234,7 +234,7 @@ def train(hyp, opt, device, tb_writer=None): | |||
if rank != -1: | |||
dataloader.sampler.set_epoch(epoch) | |||
pbar = enumerate(dataloader) | |||
logging.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) | |||
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) | |||
if rank in [-1, 0]: | |||
pbar = tqdm(pbar, total=nb) # progress bar | |||
optimizer.zero_grad() |