Explorar el Código

Update FLOPs description (#3422)

* Update README.md

* Changing FLOPS to FLOPs.

Co-authored-by: BuildTools <unconfigured@null.spigotmc.org>
modifyDataloader
chocosaj GitHub hace 3 años
padre
commit
3cb9ad4fc4
No se encontró ninguna clave conocida en la base de datos para esta firma ID de clave GPG: 4AEE18F83AFDEB23
Se han modificado 5 ficheros con 15 adiciones y 15 borrados
  1. +2
    -2
      README.md
  2. +3
    -3
      models/yolo.py
  3. +1
    -1
      requirements.txt
  4. +3
    -3
      tutorial.ipynb
  5. +6
    -6
      utils/torch_utils.py

+ 2
- 2
README.md Ver fichero

@@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d

[assets]: https://github.com/ultralytics/yolov5/releases

Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>test<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>V100 (ms) | |params<br><sup>(M) |FLOPS<br><sup>640 (B)
Model |size<br><sup>(pixels) |mAP<sup>val<br>0.5:0.95 |mAP<sup>test<br>0.5:0.95 |mAP<sup>val<br>0.5 |Speed<br><sup>V100 (ms) | |params<br><sup>(M) |<br><sup>640 (B)
--- |--- |--- |--- |--- |--- |---|--- |---
[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0
[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3
@@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi
YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)

Fusing layers...
Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS
Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs
image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s)
image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s)
Results saved to runs/detect/exp2

+ 3
- 3
models/yolo.py Ver fichero

@@ -21,7 +21,7 @@ from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, s
select_device, copy_attr

try:
import thop # for FLOPS computation
import thop # for FLOPs computation
except ImportError:
thop = None

@@ -140,13 +140,13 @@ class Model(nn.Module):
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers

if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
if m == self.model[0]:
logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}")
logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}")
logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')

x = m(x) # run

+ 1
- 1
requirements.txt Ver fichero

@@ -27,4 +27,4 @@ pandas
# extras --------------------------------------
# Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
pycocotools>=2.0 # COCO mAP
thop # FLOPS computation
thop # FLOPs computation

+ 3
- 3
tutorial.ipynb Ver fichero

@@ -611,7 +611,7 @@
"YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
"\n",
"Fusing layers... \n",
"Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n",
"Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n",
"image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n",
"image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
"Results saved to runs/detect/exp\n",
@@ -734,7 +734,7 @@
"100% 168M/168M [00:05<00:00, 32.3MB/s]\n",
"\n",
"Fusing layers... \n",
"Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
"Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
" Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n",
@@ -964,7 +964,7 @@
" 22 [-1, 10] 1 0 models.common.Concat [1] \n",
" 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
" 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
"Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n",
"Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
"\n",
"Transferred 362/362 items from yolov5s.pt\n",
"Scaled weight_decay = 0.0005\n",

+ 6
- 6
utils/torch_utils.py Ver fichero

@@ -18,7 +18,7 @@ import torch.nn.functional as F
import torchvision

try:
import thop # for FLOPS computation
import thop # for FLOPs computation
except ImportError:
thop = None
logger = logging.getLogger(__name__)
@@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None):
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs
except:
flops = 0

@@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640):
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))

try: # FLOPS
try: # FLOPs
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs
except (ImportError, Exception):
fs = ''


Cargando…
Cancelar
Guardar