* Logger consolidation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>modifyDataloader
@@ -3,7 +3,6 @@ | |||
Common modules | |||
""" | |||
import logging | |||
import math | |||
import warnings | |||
from copy import copy | |||
@@ -18,12 +17,10 @@ from PIL import Image | |||
from torch.cuda import amp | |||
from utils.datasets import exif_transpose, letterbox | |||
from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh | |||
from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh | |||
from utils.plots import Annotator, colors, save_one_box | |||
from utils.torch_utils import time_sync | |||
LOGGER = logging.getLogger(__name__) | |||
def autopad(k, p=None): # kernel, padding | |||
# Pad to 'same' |
@@ -7,7 +7,6 @@ Usage: | |||
""" | |||
import argparse | |||
import logging | |||
import math | |||
import os | |||
import random | |||
@@ -201,8 +200,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary | |||
# DP mode | |||
if cuda and RANK == -1 and torch.cuda.device_count() > 1: | |||
logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' | |||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') | |||
LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' | |||
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') | |||
model = torch.nn.DataParallel(model) | |||
# SyncBatchNorm |
@@ -3,14 +3,13 @@ | |||
Image augmentation functions | |||
""" | |||
import logging | |||
import math | |||
import random | |||
import cv2 | |||
import numpy as np | |||
from utils.general import check_version, colorstr, resample_segments, segment2box | |||
from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box | |||
from utils.metrics import bbox_ioa | |||
@@ -32,11 +31,11 @@ class Albumentations: | |||
A.ImageCompression(quality_lower=75, p=0.0)], | |||
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) | |||
logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) | |||
LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) | |||
except ImportError: # package not installed, skip | |||
pass | |||
except Exception as e: | |||
logging.info(colorstr('albumentations: ') + f'{e}') | |||
LOGGER.info(colorstr('albumentations: ') + f'{e}') | |||
def __call__(self, im, labels, p=1.0): | |||
if self.transform and random.random() < p: |
@@ -6,7 +6,6 @@ Dataloaders and dataset utils | |||
import glob | |||
import hashlib | |||
import json | |||
import logging | |||
import os | |||
import random | |||
import shutil | |||
@@ -335,7 +334,7 @@ class LoadStreams: | |||
if success: | |||
self.imgs[i] = im | |||
else: | |||
LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') | |||
LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') | |||
self.imgs[i] *= 0 | |||
cap.open(stream) # re-open stream if signal was lost | |||
time.sleep(1 / self.fps[i]) # wait time | |||
@@ -427,7 +426,7 @@ class LoadImagesAndLabels(Dataset): | |||
d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" | |||
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results | |||
if cache['msgs']: | |||
logging.info('\n'.join(cache['msgs'])) # display warnings | |||
LOGGER.info('\n'.join(cache['msgs'])) # display warnings | |||
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' | |||
# Read cache | |||
@@ -525,9 +524,9 @@ class LoadImagesAndLabels(Dataset): | |||
pbar.close() | |||
if msgs: | |||
logging.info('\n'.join(msgs)) | |||
LOGGER.info('\n'.join(msgs)) | |||
if nf == 0: | |||
logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') | |||
LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') | |||
x['hash'] = get_hash(self.label_files + self.img_files) | |||
x['results'] = nf, nm, ne, nc, len(self.img_files) | |||
x['msgs'] = msgs # warnings | |||
@@ -535,9 +534,9 @@ class LoadImagesAndLabels(Dataset): | |||
try: | |||
np.save(path, x) # save cache for next time | |||
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix | |||
logging.info(f'{prefix}New cache created: {path}') | |||
LOGGER.info(f'{prefix}New cache created: {path}') | |||
except Exception as e: | |||
logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable | |||
LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable | |||
return x | |||
def __len__(self): |
@@ -45,7 +45,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory | |||
def set_logging(name=None, verbose=True): | |||
# Sets level and returns logger | |||
rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings | |||
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) | |||
logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) | |||
return logging.getLogger(name) | |||
@@ -4,7 +4,6 @@ PyTorch utils | |||
""" | |||
import datetime | |||
import logging | |||
import math | |||
import os | |||
import platform | |||
@@ -100,7 +99,6 @@ def profile(input, ops, n=10, device=None): | |||
# profile(input, [m1, m2], n=100) # profile over 100 iterations | |||
results = [] | |||
logging.basicConfig(format="%(message)s", level=logging.INFO) | |||
device = device or select_device() | |||
print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" | |||
f"{'input':>24s}{'output':>24s}") |