You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

188 lines
7.9KB

  1. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
  2. """
  3. Logging utils
  4. """
  5. import os
  6. import warnings
  7. import pkg_resources as pkg
  8. import torch
  9. from torch.utils.tensorboard import SummaryWriter
  10. from utils.general import colorstr, cv2, emojis
  11. from utils.loggers.wandb.wandb_utils import WandbLogger
  12. from utils.plots import plot_images, plot_results
  13. from utils.torch_utils import de_parallel
  14. LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
  15. RANK = int(os.getenv('RANK', -1))
  16. try:
  17. import wandb
  18. assert hasattr(wandb, '__version__') # verify package import not local dir
  19. if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}:
  20. try:
  21. wandb_login_success = wandb.login(timeout=30)
  22. except wandb.errors.UsageError: # known non-TTY terminal issue
  23. wandb_login_success = False
  24. if not wandb_login_success:
  25. wandb = None
  26. except (ImportError, AssertionError):
  27. wandb = None
  28. class Loggers():
  29. # YOLOv5 Loggers class
  30. def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):
  31. self.save_dir = save_dir
  32. self.weights = weights
  33. self.opt = opt
  34. self.hyp = hyp
  35. self.logger = logger # for printing results to console
  36. self.include = include
  37. self.keys = [
  38. 'train/box_loss',
  39. 'train/obj_loss',
  40. 'train/cls_loss', # train loss
  41. 'metrics/precision',
  42. 'metrics/recall',
  43. 'metrics/mAP_0.5',
  44. 'metrics/mAP_0.5:0.95', # metrics
  45. 'val/box_loss',
  46. 'val/obj_loss',
  47. 'val/cls_loss', # val loss
  48. 'x/lr0',
  49. 'x/lr1',
  50. 'x/lr2'] # params
  51. self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95']
  52. for k in LOGGERS:
  53. setattr(self, k, None) # init empty logger dictionary
  54. self.csv = True # always log to csv
  55. # Message
  56. if not wandb:
  57. prefix = colorstr('Weights & Biases: ')
  58. s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
  59. self.logger.info(emojis(s))
  60. # TensorBoard
  61. s = self.save_dir
  62. if 'tb' in self.include and not self.opt.evolve:
  63. prefix = colorstr('TensorBoard: ')
  64. self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
  65. self.tb = SummaryWriter(str(s))
  66. # W&B
  67. if wandb and 'wandb' in self.include:
  68. wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')
  69. run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None
  70. self.opt.hyp = self.hyp # add hyperparameters
  71. self.wandb = WandbLogger(self.opt, run_id)
  72. # temp warn. because nested artifacts not supported after 0.12.10
  73. if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'):
  74. self.logger.warning(
  75. "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected."
  76. )
  77. else:
  78. self.wandb = None
  79. def on_train_start(self):
  80. # Callback runs on train start
  81. pass
  82. def on_pretrain_routine_end(self):
  83. # Callback runs on pre-train routine end
  84. paths = self.save_dir.glob('*labels*.jpg') # training labels
  85. if self.wandb:
  86. self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]})
  87. def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):
  88. # Callback runs on train batch end
  89. if plots:
  90. if ni == 0:
  91. if not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754
  92. with warnings.catch_warnings():
  93. warnings.simplefilter('ignore') # suppress jit trace warning
  94. self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
  95. if ni < 3:
  96. f = self.save_dir / f'train_batch{ni}.jpg' # filename
  97. plot_images(imgs, targets, paths, f)
  98. if self.wandb and ni == 10:
  99. files = sorted(self.save_dir.glob('train*.jpg'))
  100. self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})
  101. def on_train_epoch_end(self, epoch):
  102. # Callback runs on train epoch end
  103. if self.wandb:
  104. self.wandb.current_epoch = epoch + 1
  105. def on_val_image_end(self, pred, predn, path, names, im):
  106. # Callback runs on val image end
  107. if self.wandb:
  108. self.wandb.val_one_image(pred, predn, path, names, im)
  109. def on_val_end(self):
  110. # Callback runs on val end
  111. if self.wandb:
  112. files = sorted(self.save_dir.glob('val*.jpg'))
  113. self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]})
  114. def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
  115. # Callback runs at the end of each fit (train+val) epoch
  116. x = dict(zip(self.keys, vals))
  117. if self.csv:
  118. file = self.save_dir / 'results.csv'
  119. n = len(x) + 1 # number of cols
  120. s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header
  121. with open(file, 'a') as f:
  122. f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
  123. if self.tb:
  124. for k, v in x.items():
  125. self.tb.add_scalar(k, v, epoch)
  126. if self.wandb:
  127. if best_fitness == fi:
  128. best_results = [epoch] + vals[3:7]
  129. for i, name in enumerate(self.best_keys):
  130. self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary
  131. self.wandb.log(x)
  132. self.wandb.end_epoch(best_result=best_fitness == fi)
  133. def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
  134. # Callback runs on model save event
  135. if self.wandb:
  136. if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
  137. self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
  138. def on_train_end(self, last, best, plots, epoch, results):
  139. # Callback runs on training end
  140. if plots:
  141. plot_results(file=self.save_dir / 'results.csv') # save results.png
  142. files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
  143. files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
  144. self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}")
  145. if self.tb:
  146. for f in files:
  147. self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')
  148. if self.wandb:
  149. self.wandb.log(dict(zip(self.keys[3:10], results)))
  150. self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]})
  151. # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
  152. if not self.opt.evolve:
  153. wandb.log_artifact(str(best if best.exists() else last),
  154. type='model',
  155. name=f'run_{self.wandb.wandb_run.id}_model',
  156. aliases=['latest', 'best', 'stripped'])
  157. self.wandb.finish_run()
  158. def on_params_update(self, params):
  159. # Update hyperparams or configs of the experiment
  160. # params: A dict containing {param: value} pairs
  161. if self.wandb:
  162. self.wandb.wandb_run.config.update(params, allow_val_change=True)