* [pre-commit.ci] pre-commit suggestions updates: - [github.com/asottile/pyupgrade: v2.31.0 → v2.31.1](https://github.com/asottile/pyupgrade/compare/v2.31.0...v2.31.1) - [github.com/pre-commit/mirrors-yapf: v0.31.0 → v0.32.0](https://github.com/pre-commit/mirrors-yapf/compare/v0.31.0...v0.32.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update yolo.py * Update activations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update activations.py * Update tf.py * Update tf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>modifyDataloader
@@ -24,7 +24,7 @@ repos: | |||
- id: check-docstring-first | |||
- repo: https://github.com/asottile/pyupgrade | |||
rev: v2.31.0 | |||
rev: v2.31.1 | |||
hooks: | |||
- id: pyupgrade | |||
args: [--py36-plus] | |||
@@ -37,7 +37,7 @@ repos: | |||
name: Sort imports | |||
- repo: https://github.com/pre-commit/mirrors-yapf | |||
rev: v0.31.0 | |||
rev: v0.32.0 | |||
hooks: | |||
- id: yapf | |||
name: YAPF formatting |
@@ -50,6 +50,7 @@ class TFBN(keras.layers.Layer): | |||
class TFPad(keras.layers.Layer): | |||
def __init__(self, pad): | |||
super().__init__() | |||
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) | |||
@@ -206,6 +207,7 @@ class TFSPPF(keras.layers.Layer): | |||
class TFDetect(keras.layers.Layer): | |||
# TF YOLOv5 Detect layer | |||
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer | |||
super().__init__() | |||
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) | |||
@@ -255,6 +257,7 @@ class TFDetect(keras.layers.Layer): | |||
class TFUpsample(keras.layers.Layer): | |||
# TF version of torch.nn.Upsample() | |||
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' | |||
super().__init__() | |||
assert scale_factor == 2, "scale_factor must be 2" | |||
@@ -269,6 +272,7 @@ class TFUpsample(keras.layers.Layer): | |||
class TFConcat(keras.layers.Layer): | |||
# TF version of torch.concat() | |||
def __init__(self, dimension=1, w=None): | |||
super().__init__() | |||
assert dimension == 1, "convert only NCHW to NHWC concat" | |||
@@ -331,6 +335,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) | |||
class TFModel: | |||
# TF YOLOv5 model | |||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes | |||
super().__init__() | |||
if isinstance(cfg, dict): |
@@ -88,6 +88,7 @@ class Detect(nn.Module): | |||
class Model(nn.Module): | |||
# YOLOv5 model | |||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes | |||
super().__init__() | |||
if isinstance(cfg, dict): |
@@ -8,29 +8,32 @@ import torch.nn as nn | |||
import torch.nn.functional as F | |||
# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- | |||
class SiLU(nn.Module): # export-friendly version of nn.SiLU() | |||
class SiLU(nn.Module): | |||
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf | |||
@staticmethod | |||
def forward(x): | |||
return x * torch.sigmoid(x) | |||
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() | |||
class Hardswish(nn.Module): | |||
# Hard-SiLU activation | |||
@staticmethod | |||
def forward(x): | |||
# return x * F.hardsigmoid(x) # for TorchScript and CoreML | |||
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX | |||
# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- | |||
class Mish(nn.Module): | |||
# Mish activation https://github.com/digantamisra98/Mish | |||
@staticmethod | |||
def forward(x): | |||
return x * F.softplus(x).tanh() | |||
class MemoryEfficientMish(nn.Module): | |||
# Mish activation memory-efficient | |||
class F(torch.autograd.Function): | |||
@staticmethod | |||
def forward(ctx, x): | |||
ctx.save_for_backward(x) | |||
@@ -47,8 +50,8 @@ class MemoryEfficientMish(nn.Module): | |||
return self.F.apply(x) | |||
# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- | |||
class FReLU(nn.Module): | |||
# FReLU activation https://arxiv.org/abs/2007.11824 | |||
def __init__(self, c1, k=3): # ch_in, kernel | |||
super().__init__() | |||
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) | |||
@@ -58,12 +61,12 @@ class FReLU(nn.Module): | |||
return torch.max(x, self.bn(self.conv(x))) | |||
# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- | |||
class AconC(nn.Module): | |||
r""" ACON activation (activate or not). | |||
r""" ACON activation (activate or not) | |||
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter | |||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | |||
""" | |||
def __init__(self, c1): | |||
super().__init__() | |||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) | |||
@@ -76,10 +79,11 @@ class AconC(nn.Module): | |||
class MetaAconC(nn.Module): | |||
r""" ACON activation (activate or not). | |||
r""" ACON activation (activate or not) | |||
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network | |||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | |||
""" | |||
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r | |||
super().__init__() | |||
c2 = max(r, c1 // r) |
@@ -8,6 +8,7 @@ class Callbacks: | |||
"""" | |||
Handles all registered callbacks for YOLOv5 Hooks | |||
""" | |||
def __init__(self): | |||
# Define the available callbacks | |||
self._callbacks = { |
@@ -145,6 +145,7 @@ class InfiniteDataLoader(dataloader.DataLoader): | |||
Uses same syntax as vanilla DataLoader | |||
""" | |||
def __init__(self, *args, **kwargs): | |||
super().__init__(*args, **kwargs) | |||
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) | |||
@@ -164,6 +165,7 @@ class _RepeatSampler: | |||
Args: | |||
sampler (Sampler) | |||
""" | |||
def __init__(self, sampler): | |||
self.sampler = sampler | |||
@@ -978,6 +980,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil | |||
autodownload: Attempt to download dataset if not found locally | |||
verbose: Print stats dictionary | |||
""" | |||
def round_labels(labels): | |||
# Update labels to integer class and 6 decimal place floats | |||
return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] |
@@ -116,6 +116,7 @@ class WandbLogger(): | |||
For more on how this logger is used, see the Weights & Biases documentation: | |||
https://docs.wandb.com/guides/integrations/yolov5 | |||
""" | |||
def __init__(self, opt, run_id=None, job_type='Training'): | |||
""" | |||
- Initialize WandbLogger instance |
@@ -260,6 +260,7 @@ def box_iou(box1, box2): | |||
iou (Tensor[N, M]): the NxM matrix containing the pairwise | |||
IoU values for every element in boxes1 and boxes2 | |||
""" | |||
def box_area(box): | |||
# box = 4xn | |||
return (box[2] - box[0]) * (box[3] - box[1]) |
@@ -284,6 +284,7 @@ class ModelEMA: | |||
Keeps a moving average of everything in the model state_dict (parameters and buffers) | |||
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage | |||
""" | |||
def __init__(self, model, decay=0.9999, tau=2000, updates=0): | |||
# Create EMA | |||
self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA |