|
|
@@ -564,7 +564,7 @@ |
|
|
|
"clear_output()\n", |
|
|
|
"print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" |
|
|
|
], |
|
|
|
"execution_count": 1, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "stream", |
|
|
@@ -600,7 +600,7 @@ |
|
|
|
"!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source inference/images/\n", |
|
|
|
"Image(filename='inference/output/zidane.jpg', width=600)" |
|
|
|
], |
|
|
|
"execution_count": 38, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "stream", |
|
|
@@ -641,7 +641,7 @@ |
|
|
|
"id": "4qbaa3iEcrcE" |
|
|
|
}, |
|
|
|
"source": [ |
|
|
|
"Available inference sources:\n", |
|
|
|
"Results are saved to `inference/output`. A full list of available inference sources:\n", |
|
|
|
"<img src=\"https://user-images.githubusercontent.com/26833433/98274798-2b7a7a80-1f94-11eb-91a4-70c73593e26b.jpg\" width=\"900\"> " |
|
|
|
] |
|
|
|
}, |
|
|
@@ -690,7 +690,7 @@ |
|
|
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", |
|
|
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip" |
|
|
|
], |
|
|
|
"execution_count": 16, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "display_data", |
|
|
@@ -730,7 +730,7 @@ |
|
|
|
"# Run YOLOv5x on COCO val2017\n", |
|
|
|
"!python test.py --weights yolov5x.pt --data coco.yaml --img 640" |
|
|
|
], |
|
|
|
"execution_count": 17, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "stream", |
|
|
@@ -797,9 +797,10 @@ |
|
|
|
}, |
|
|
|
"source": [ |
|
|
|
"# Download COCO test-dev2017\n", |
|
|
|
"gdrive_download('1cXZR_ckHki6nddOmcysCuuJFM--T-Q6L','coco2017labels.zip') # annotations\n", |
|
|
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n", |
|
|
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", |
|
|
|
"!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", |
|
|
|
"!mv ./test2017 ./coco/images && mv ./coco ../ # move images into /coco and move /coco alongside /yolov5" |
|
|
|
"%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5" |
|
|
|
], |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [] |
|
|
@@ -852,7 +853,7 @@ |
|
|
|
"torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", |
|
|
|
"!unzip -q tmp.zip -d ../ && rm tmp.zip" |
|
|
|
], |
|
|
|
"execution_count": 22, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "display_data", |
|
|
@@ -916,7 +917,7 @@ |
|
|
|
"# Train YOLOv5s on COCO128 for 3 epochs\n", |
|
|
|
"!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" |
|
|
|
], |
|
|
|
"execution_count": 23, |
|
|
|
"execution_count": null, |
|
|
|
"outputs": [ |
|
|
|
{ |
|
|
|
"output_type": "stream", |
|
|
@@ -1023,7 +1024,7 @@ |
|
|
|
"source": [ |
|
|
|
"## 4.2 Local Logging\n", |
|
|
|
"\n", |
|
|
|
"All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View `train_batch*.jpg` to see training images, labels and augmentation effects. A **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." |
|
|
|
"All results are logged by default to the `runs/exp0` directory, with a new directory created for each new training as `runs/exp1`, `runs/exp2`, etc. View train and test jpgs to see mosaics, labels/predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." |
|
|
|
] |
|
|
|
}, |
|
|
|
{ |
|
|
@@ -1046,7 +1047,7 @@ |
|
|
|
}, |
|
|
|
"source": [ |
|
|
|
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667642-90fcb200-a583-11ea-8fa3-338bbf7da194.jpeg\" width=\"750\"> \n", |
|
|
|
"`test_batch0_gt.jpg` train batch 0 mosaics and labels\n", |
|
|
|
"`train_batch0.jpg` train batch 0 mosaics and labels\n", |
|
|
|
"\n", |
|
|
|
"> <img src=\"https://user-images.githubusercontent.com/26833433/83667626-8c37fe00-a583-11ea-997b-0923fe59b29b.jpeg\" width=\"750\"> \n", |
|
|
|
"`test_batch0_gt.jpg` shows test batch 0 ground truth\n", |
|
|
@@ -1061,7 +1062,7 @@ |
|
|
|
"id": "7KN5ghjE6ZWh" |
|
|
|
}, |
|
|
|
"source": [ |
|
|
|
"Training losses and performance metrics are also logged to Tensorboard and a custom `runs/exp0/results.txt` logfile. `results.txt` is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `yolov5s.pt` (orange)." |
|
|
|
"Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)." |
|
|
|
] |
|
|
|
}, |
|
|
|
{ |