Browse Source

Fix Logging (#719)

* Add logging setup

* Fix fusing layers message

* Fix logging does not have end

* Add logging

* Change logging to use logger

* Update yolo.py

I tried this in a cloned branch, and everything seems to work fine

* Update yolo.py

Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
5.0
NanoCode012 GitHub 4 years ago
parent
commit
0892c44bc4
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 13 additions and 7 deletions
  1. +3
    -1
      detect.py
  2. +2
    -0
      models/export.py
  3. +3
    -2
      models/yolo.py
  4. +3
    -2
      test.py
  5. +2
    -2
      train.py

+ 3
- 1
detect.py View File

from models.experimental import attempt_load from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages from utils.datasets import LoadStreams, LoadImages
from utils.general import ( from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
check_img_size, non_max_suppression, apply_classifier, scale_coords,
xyxy2xywh, plot_one_box, strip_optimizer, set_logging)
from utils.torch_utils import select_device, load_classifier, time_synchronized from utils.torch_utils import select_device, load_classifier, time_synchronized




webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt') webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')


# Initialize # Initialize
set_logging()
device = select_device(opt.device) device = select_device(opt.device)
if os.path.exists(out): if os.path.exists(out):
shutil.rmtree(out) # delete output folder shutil.rmtree(out) # delete output folder

+ 2
- 0
models/export.py View File

import torch import torch


from utils.google_utils import attempt_download from utils.google_utils import attempt_download
from utils.general import set_logging


if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
opt = parser.parse_args() opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt) print(opt)
set_logging()


# Input # Input
img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection

+ 3
- 2
models/yolo.py View File



from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat
from models.experimental import MixConv2d, CrossConv, C3 from models.experimental import MixConv2d, CrossConv, C3
from utils.general import check_anchor_order, make_divisible, check_file
from utils.general import check_anchor_order, make_divisible, check_file, set_logging
from utils.torch_utils import ( from utils.torch_utils import (
time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device) time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, select_device)


# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights


def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ', end='')
print('Fusing layers... ')
for m in self.model.modules(): for m in self.model.modules():
if type(m) is Conv: if type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args() opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device) device = select_device(opt.device)


# Create model # Create model

+ 3
- 2
test.py View File

from models.experimental import attempt_load from models.experimental import attempt_load
from utils.datasets import create_dataloader from utils.datasets import create_dataloader
from utils.general import ( from utils.general import (
coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression,
scale_coords, xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class)
coco80_to_coco91_class, check_dataset, check_file, check_img_size, compute_loss, non_max_suppression, scale_coords,
xyxy2xywh, clip_coords, plot_images, xywh2xyxy, box_iou, output_to_target, ap_per_class, set_logging)
from utils.torch_utils import select_device, time_synchronized from utils.torch_utils import select_device, time_synchronized




device = next(model.parameters()).device # get model device device = next(model.parameters()).device # get model device


else: # called directly else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size) device = select_device(opt.device, batch_size=batch_size)
merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels merge, save_txt = opt.merge, opt.save_txt # use Merge NMS, save *.txt labels
if save_txt: if save_txt:

+ 2
- 2
train.py View File

state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load model.load_state_dict(state_dict, strict=False) # load
logging.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else: else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create model = Model(opt.cfg, ch=3, nc=nc).to(device) # create


if rank != -1: if rank != -1:
dataloader.sampler.set_epoch(epoch) dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader) pbar = enumerate(dataloader)
logging.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
if rank in [-1, 0]: if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad() optimizer.zero_grad()

Loading…
Cancel
Save