Remove NCOLS from tqdm (#5804)
* Remove NCOLS from tqdm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
53349dac8e
commit
7c6bae0ae6
10
train.py
10
train.py
|
|
@ -39,10 +39,10 @@ from utils.autobatch import check_train_batch_size
|
|||
from utils.callbacks import Callbacks
|
||||
from utils.datasets import create_dataloader
|
||||
from utils.downloads import attempt_download
|
||||
from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size,
|
||||
check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path,
|
||||
init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods,
|
||||
one_cycle, print_args, print_mutation, strip_optimizer)
|
||||
from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements,
|
||||
check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds,
|
||||
intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle,
|
||||
print_args, print_mutation, strip_optimizer)
|
||||
from utils.loggers import Loggers
|
||||
from utils.loggers.wandb.wandb_utils import check_wandb_resume
|
||||
from utils.loss import ComputeLoss
|
||||
|
|
@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
|
|||
pbar = enumerate(train_loader)
|
||||
LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size'))
|
||||
if RANK in [-1, 0]:
|
||||
pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||
pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||
optimizer.zero_grad()
|
||||
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
|
||||
ni = i + nb * epoch # number integrated batches (since train start)
|
||||
|
|
|
|||
|
|
@ -838,4 +838,4 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False):
|
|||
|
||||
|
||||
# Variables
|
||||
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size
|
||||
NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
|
||||
|
|
|
|||
4
val.py
4
val.py
|
|
@ -26,7 +26,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
|||
from models.common import DetectMultiBackend
|
||||
from utils.callbacks import Callbacks
|
||||
from utils.datasets import create_dataloader
|
||||
from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
|
||||
from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml,
|
||||
coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args,
|
||||
scale_coords, xywh2xyxy, xyxy2xywh)
|
||||
from utils.metrics import ConfusionMatrix, ap_per_class
|
||||
|
|
@ -164,7 +164,7 @@ def run(data,
|
|||
dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
|
||||
loss = torch.zeros(3, device=device)
|
||||
jdict, stats, ap, ap_class = [], [], [], []
|
||||
pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
|
||||
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
|
||||
t1 = time_sync()
|
||||
if pt or engine:
|
||||
|
|
|
|||
Loading…
Reference in New Issue