You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1235 line
53KB

  1. {
  2. "nbformat": 4,
  3. "nbformat_minor": 0,
  4. "metadata": {
  5. "colab": {
  6. "name": "YOLOv5 Tutorial",
  7. "provenance": [],
  8. "collapsed_sections": [],
  9. "toc_visible": true,
  10. "include_colab_link": true
  11. },
  12. "kernelspec": {
  13. "name": "python3",
  14. "display_name": "Python 3"
  15. },
  16. "accelerator": "GPU",
  17. "widgets": {
  18. "application/vnd.jupyter.widget-state+json": {
  19. "2e915d9016c846e095e382b6a02ee773": {
  20. "model_module": "@jupyter-widgets/controls",
  21. "model_name": "HBoxModel",
  22. "model_module_version": "1.5.0",
  23. "state": {
  24. "_view_name": "HBoxView",
  25. "_dom_classes": [],
  26. "_model_name": "HBoxModel",
  27. "_view_module": "@jupyter-widgets/controls",
  28. "_model_module_version": "1.5.0",
  29. "_view_count": null,
  30. "_view_module_version": "1.5.0",
  31. "box_style": "",
  32. "layout": "IPY_MODEL_cb7fc3a5c6cc4fde8d2c83e594a7c86e",
  33. "_model_module": "@jupyter-widgets/controls",
  34. "children": [
  35. "IPY_MODEL_ac3edef4e3434f4587e6cbf8aa048770",
  36. "IPY_MODEL_853ac234cc2a4236946fc516871e10eb"
  37. ]
  38. }
  39. },
  40. "cb7fc3a5c6cc4fde8d2c83e594a7c86e": {
  41. "model_module": "@jupyter-widgets/base",
  42. "model_name": "LayoutModel",
  43. "model_module_version": "1.2.0",
  44. "state": {
  45. "_view_name": "LayoutView",
  46. "grid_template_rows": null,
  47. "right": null,
  48. "justify_content": null,
  49. "_view_module": "@jupyter-widgets/base",
  50. "overflow": null,
  51. "_model_module_version": "1.2.0",
  52. "_view_count": null,
  53. "flex_flow": null,
  54. "width": null,
  55. "min_width": null,
  56. "border": null,
  57. "align_items": null,
  58. "bottom": null,
  59. "_model_module": "@jupyter-widgets/base",
  60. "top": null,
  61. "grid_column": null,
  62. "overflow_y": null,
  63. "overflow_x": null,
  64. "grid_auto_flow": null,
  65. "grid_area": null,
  66. "grid_template_columns": null,
  67. "flex": null,
  68. "_model_name": "LayoutModel",
  69. "justify_items": null,
  70. "grid_row": null,
  71. "max_height": null,
  72. "align_content": null,
  73. "visibility": null,
  74. "align_self": null,
  75. "height": null,
  76. "min_height": null,
  77. "padding": null,
  78. "grid_auto_rows": null,
  79. "grid_gap": null,
  80. "max_width": null,
  81. "order": null,
  82. "_view_module_version": "1.2.0",
  83. "grid_template_areas": null,
  84. "object_position": null,
  85. "object_fit": null,
  86. "grid_auto_columns": null,
  87. "margin": null,
  88. "display": null,
  89. "left": null
  90. }
  91. },
  92. "ac3edef4e3434f4587e6cbf8aa048770": {
  93. "model_module": "@jupyter-widgets/controls",
  94. "model_name": "FloatProgressModel",
  95. "model_module_version": "1.5.0",
  96. "state": {
  97. "_view_name": "ProgressView",
  98. "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1",
  99. "_dom_classes": [],
  100. "description": "100%",
  101. "_model_name": "FloatProgressModel",
  102. "bar_style": "success",
  103. "max": 818322941,
  104. "_view_module": "@jupyter-widgets/controls",
  105. "_model_module_version": "1.5.0",
  106. "value": 818322941,
  107. "_view_count": null,
  108. "_view_module_version": "1.5.0",
  109. "orientation": "horizontal",
  110. "min": 0,
  111. "description_tooltip": null,
  112. "_model_module": "@jupyter-widgets/controls",
  113. "layout": "IPY_MODEL_f454999c3a924c7bad0746fb453dec36"
  114. }
  115. },
  116. "853ac234cc2a4236946fc516871e10eb": {
  117. "model_module": "@jupyter-widgets/controls",
  118. "model_name": "HTMLModel",
  119. "model_module_version": "1.5.0",
  120. "state": {
  121. "_view_name": "HTMLView",
  122. "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8",
  123. "_dom_classes": [],
  124. "description": "",
  125. "_model_name": "HTMLModel",
  126. "placeholder": "​",
  127. "_view_module": "@jupyter-widgets/controls",
  128. "_model_module_version": "1.5.0",
  129. "value": " 780M/780M [03:59<00:00, 3.42MB/s]",
  130. "_view_count": null,
  131. "_view_module_version": "1.5.0",
  132. "description_tooltip": null,
  133. "_model_module": "@jupyter-widgets/controls",
  134. "layout": "IPY_MODEL_9da1a23b042c41618dd14b0e30aa7cbe"
  135. }
  136. },
  137. "13842ca90c0047e584b8d68d99dad2b1": {
  138. "model_module": "@jupyter-widgets/controls",
  139. "model_name": "ProgressStyleModel",
  140. "model_module_version": "1.5.0",
  141. "state": {
  142. "_view_name": "StyleView",
  143. "_model_name": "ProgressStyleModel",
  144. "description_width": "initial",
  145. "_view_module": "@jupyter-widgets/base",
  146. "_model_module_version": "1.5.0",
  147. "_view_count": null,
  148. "_view_module_version": "1.2.0",
  149. "bar_color": null,
  150. "_model_module": "@jupyter-widgets/controls"
  151. }
  152. },
  153. "f454999c3a924c7bad0746fb453dec36": {
  154. "model_module": "@jupyter-widgets/base",
  155. "model_name": "LayoutModel",
  156. "model_module_version": "1.2.0",
  157. "state": {
  158. "_view_name": "LayoutView",
  159. "grid_template_rows": null,
  160. "right": null,
  161. "justify_content": null,
  162. "_view_module": "@jupyter-widgets/base",
  163. "overflow": null,
  164. "_model_module_version": "1.2.0",
  165. "_view_count": null,
  166. "flex_flow": null,
  167. "width": null,
  168. "min_width": null,
  169. "border": null,
  170. "align_items": null,
  171. "bottom": null,
  172. "_model_module": "@jupyter-widgets/base",
  173. "top": null,
  174. "grid_column": null,
  175. "overflow_y": null,
  176. "overflow_x": null,
  177. "grid_auto_flow": null,
  178. "grid_area": null,
  179. "grid_template_columns": null,
  180. "flex": null,
  181. "_model_name": "LayoutModel",
  182. "justify_items": null,
  183. "grid_row": null,
  184. "max_height": null,
  185. "align_content": null,
  186. "visibility": null,
  187. "align_self": null,
  188. "height": null,
  189. "min_height": null,
  190. "padding": null,
  191. "grid_auto_rows": null,
  192. "grid_gap": null,
  193. "max_width": null,
  194. "order": null,
  195. "_view_module_version": "1.2.0",
  196. "grid_template_areas": null,
  197. "object_position": null,
  198. "object_fit": null,
  199. "grid_auto_columns": null,
  200. "margin": null,
  201. "display": null,
  202. "left": null
  203. }
  204. },
  205. "f94a7ca8c1f04761bf38fdc5f99664b8": {
  206. "model_module": "@jupyter-widgets/controls",
  207. "model_name": "DescriptionStyleModel",
  208. "model_module_version": "1.5.0",
  209. "state": {
  210. "_view_name": "StyleView",
  211. "_model_name": "DescriptionStyleModel",
  212. "description_width": "",
  213. "_view_module": "@jupyter-widgets/base",
  214. "_model_module_version": "1.5.0",
  215. "_view_count": null,
  216. "_view_module_version": "1.2.0",
  217. "_model_module": "@jupyter-widgets/controls"
  218. }
  219. },
  220. "9da1a23b042c41618dd14b0e30aa7cbe": {
  221. "model_module": "@jupyter-widgets/base",
  222. "model_name": "LayoutModel",
  223. "model_module_version": "1.2.0",
  224. "state": {
  225. "_view_name": "LayoutView",
  226. "grid_template_rows": null,
  227. "right": null,
  228. "justify_content": null,
  229. "_view_module": "@jupyter-widgets/base",
  230. "overflow": null,
  231. "_model_module_version": "1.2.0",
  232. "_view_count": null,
  233. "flex_flow": null,
  234. "width": null,
  235. "min_width": null,
  236. "border": null,
  237. "align_items": null,
  238. "bottom": null,
  239. "_model_module": "@jupyter-widgets/base",
  240. "top": null,
  241. "grid_column": null,
  242. "overflow_y": null,
  243. "overflow_x": null,
  244. "grid_auto_flow": null,
  245. "grid_area": null,
  246. "grid_template_columns": null,
  247. "flex": null,
  248. "_model_name": "LayoutModel",
  249. "justify_items": null,
  250. "grid_row": null,
  251. "max_height": null,
  252. "align_content": null,
  253. "visibility": null,
  254. "align_self": null,
  255. "height": null,
  256. "min_height": null,
  257. "padding": null,
  258. "grid_auto_rows": null,
  259. "grid_gap": null,
  260. "max_width": null,
  261. "order": null,
  262. "_view_module_version": "1.2.0",
  263. "grid_template_areas": null,
  264. "object_position": null,
  265. "object_fit": null,
  266. "grid_auto_columns": null,
  267. "margin": null,
  268. "display": null,
  269. "left": null
  270. }
  271. },
  272. "6ff8a710ded44391a624dec5c460b771": {
  273. "model_module": "@jupyter-widgets/controls",
  274. "model_name": "HBoxModel",
  275. "model_module_version": "1.5.0",
  276. "state": {
  277. "_view_name": "HBoxView",
  278. "_dom_classes": [],
  279. "_model_name": "HBoxModel",
  280. "_view_module": "@jupyter-widgets/controls",
  281. "_model_module_version": "1.5.0",
  282. "_view_count": null,
  283. "_view_module_version": "1.5.0",
  284. "box_style": "",
  285. "layout": "IPY_MODEL_3c19729b51cd45d4848035da06e96ff8",
  286. "_model_module": "@jupyter-widgets/controls",
  287. "children": [
  288. "IPY_MODEL_23b2f0ae3d46438c8de375987c77f580",
  289. "IPY_MODEL_dd9498c321a9422da6faf17a0be026d4"
  290. ]
  291. }
  292. },
  293. "3c19729b51cd45d4848035da06e96ff8": {
  294. "model_module": "@jupyter-widgets/base",
  295. "model_name": "LayoutModel",
  296. "model_module_version": "1.2.0",
  297. "state": {
  298. "_view_name": "LayoutView",
  299. "grid_template_rows": null,
  300. "right": null,
  301. "justify_content": null,
  302. "_view_module": "@jupyter-widgets/base",
  303. "overflow": null,
  304. "_model_module_version": "1.2.0",
  305. "_view_count": null,
  306. "flex_flow": null,
  307. "width": null,
  308. "min_width": null,
  309. "border": null,
  310. "align_items": null,
  311. "bottom": null,
  312. "_model_module": "@jupyter-widgets/base",
  313. "top": null,
  314. "grid_column": null,
  315. "overflow_y": null,
  316. "overflow_x": null,
  317. "grid_auto_flow": null,
  318. "grid_area": null,
  319. "grid_template_columns": null,
  320. "flex": null,
  321. "_model_name": "LayoutModel",
  322. "justify_items": null,
  323. "grid_row": null,
  324. "max_height": null,
  325. "align_content": null,
  326. "visibility": null,
  327. "align_self": null,
  328. "height": null,
  329. "min_height": null,
  330. "padding": null,
  331. "grid_auto_rows": null,
  332. "grid_gap": null,
  333. "max_width": null,
  334. "order": null,
  335. "_view_module_version": "1.2.0",
  336. "grid_template_areas": null,
  337. "object_position": null,
  338. "object_fit": null,
  339. "grid_auto_columns": null,
  340. "margin": null,
  341. "display": null,
  342. "left": null
  343. }
  344. },
  345. "23b2f0ae3d46438c8de375987c77f580": {
  346. "model_module": "@jupyter-widgets/controls",
  347. "model_name": "FloatProgressModel",
  348. "model_module_version": "1.5.0",
  349. "state": {
  350. "_view_name": "ProgressView",
  351. "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e",
  352. "_dom_classes": [],
  353. "description": "100%",
  354. "_model_name": "FloatProgressModel",
  355. "bar_style": "success",
  356. "max": 6984509,
  357. "_view_module": "@jupyter-widgets/controls",
  358. "_model_module_version": "1.5.0",
  359. "value": 6984509,
  360. "_view_count": null,
  361. "_view_module_version": "1.5.0",
  362. "orientation": "horizontal",
  363. "min": 0,
  364. "description_tooltip": null,
  365. "_model_module": "@jupyter-widgets/controls",
  366. "layout": "IPY_MODEL_ff8151449e444a14869684212b9ab14e"
  367. }
  368. },
  369. "dd9498c321a9422da6faf17a0be026d4": {
  370. "model_module": "@jupyter-widgets/controls",
  371. "model_name": "HTMLModel",
  372. "model_module_version": "1.5.0",
  373. "state": {
  374. "_view_name": "HTMLView",
  375. "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909",
  376. "_dom_classes": [],
  377. "description": "",
  378. "_model_name": "HTMLModel",
  379. "placeholder": "​",
  380. "_view_module": "@jupyter-widgets/controls",
  381. "_model_module_version": "1.5.0",
  382. "value": " 6.66M/6.66M [00:01<00:00, 6.08MB/s]",
  383. "_view_count": null,
  384. "_view_module_version": "1.5.0",
  385. "description_tooltip": null,
  386. "_model_module": "@jupyter-widgets/controls",
  387. "layout": "IPY_MODEL_8fda673769984e2b928ef820d34c85c3"
  388. }
  389. },
  390. "d8dda4b2ce864fd682e558b9a48f602e": {
  391. "model_module": "@jupyter-widgets/controls",
  392. "model_name": "ProgressStyleModel",
  393. "model_module_version": "1.5.0",
  394. "state": {
  395. "_view_name": "StyleView",
  396. "_model_name": "ProgressStyleModel",
  397. "description_width": "initial",
  398. "_view_module": "@jupyter-widgets/base",
  399. "_model_module_version": "1.5.0",
  400. "_view_count": null,
  401. "_view_module_version": "1.2.0",
  402. "bar_color": null,
  403. "_model_module": "@jupyter-widgets/controls"
  404. }
  405. },
  406. "ff8151449e444a14869684212b9ab14e": {
  407. "model_module": "@jupyter-widgets/base",
  408. "model_name": "LayoutModel",
  409. "model_module_version": "1.2.0",
  410. "state": {
  411. "_view_name": "LayoutView",
  412. "grid_template_rows": null,
  413. "right": null,
  414. "justify_content": null,
  415. "_view_module": "@jupyter-widgets/base",
  416. "overflow": null,
  417. "_model_module_version": "1.2.0",
  418. "_view_count": null,
  419. "flex_flow": null,
  420. "width": null,
  421. "min_width": null,
  422. "border": null,
  423. "align_items": null,
  424. "bottom": null,
  425. "_model_module": "@jupyter-widgets/base",
  426. "top": null,
  427. "grid_column": null,
  428. "overflow_y": null,
  429. "overflow_x": null,
  430. "grid_auto_flow": null,
  431. "grid_area": null,
  432. "grid_template_columns": null,
  433. "flex": null,
  434. "_model_name": "LayoutModel",
  435. "justify_items": null,
  436. "grid_row": null,
  437. "max_height": null,
  438. "align_content": null,
  439. "visibility": null,
  440. "align_self": null,
  441. "height": null,
  442. "min_height": null,
  443. "padding": null,
  444. "grid_auto_rows": null,
  445. "grid_gap": null,
  446. "max_width": null,
  447. "order": null,
  448. "_view_module_version": "1.2.0",
  449. "grid_template_areas": null,
  450. "object_position": null,
  451. "object_fit": null,
  452. "grid_auto_columns": null,
  453. "margin": null,
  454. "display": null,
  455. "left": null
  456. }
  457. },
  458. "0f84fe609bcf4aa9afdc32a8cf076909": {
  459. "model_module": "@jupyter-widgets/controls",
  460. "model_name": "DescriptionStyleModel",
  461. "model_module_version": "1.5.0",
  462. "state": {
  463. "_view_name": "StyleView",
  464. "_model_name": "DescriptionStyleModel",
  465. "description_width": "",
  466. "_view_module": "@jupyter-widgets/base",
  467. "_model_module_version": "1.5.0",
  468. "_view_count": null,
  469. "_view_module_version": "1.2.0",
  470. "_model_module": "@jupyter-widgets/controls"
  471. }
  472. },
  473. "8fda673769984e2b928ef820d34c85c3": {
  474. "model_module": "@jupyter-widgets/base",
  475. "model_name": "LayoutModel",
  476. "model_module_version": "1.2.0",
  477. "state": {
  478. "_view_name": "LayoutView",
  479. "grid_template_rows": null,
  480. "right": null,
  481. "justify_content": null,
  482. "_view_module": "@jupyter-widgets/base",
  483. "overflow": null,
  484. "_model_module_version": "1.2.0",
  485. "_view_count": null,
  486. "flex_flow": null,
  487. "width": null,
  488. "min_width": null,
  489. "border": null,
  490. "align_items": null,
  491. "bottom": null,
  492. "_model_module": "@jupyter-widgets/base",
  493. "top": null,
  494. "grid_column": null,
  495. "overflow_y": null,
  496. "overflow_x": null,
  497. "grid_auto_flow": null,
  498. "grid_area": null,
  499. "grid_template_columns": null,
  500. "flex": null,
  501. "_model_name": "LayoutModel",
  502. "justify_items": null,
  503. "grid_row": null,
  504. "max_height": null,
  505. "align_content": null,
  506. "visibility": null,
  507. "align_self": null,
  508. "height": null,
  509. "min_height": null,
  510. "padding": null,
  511. "grid_auto_rows": null,
  512. "grid_gap": null,
  513. "max_width": null,
  514. "order": null,
  515. "_view_module_version": "1.2.0",
  516. "grid_template_areas": null,
  517. "object_position": null,
  518. "object_fit": null,
  519. "grid_auto_columns": null,
  520. "margin": null,
  521. "display": null,
  522. "left": null
  523. }
  524. }
  525. }
  526. }
  527. },
  528. "cells": [
  529. {
  530. "cell_type": "markdown",
  531. "metadata": {
  532. "id": "view-in-github",
  533. "colab_type": "text"
  534. },
  535. "source": [
  536. "<a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
  537. ]
  538. },
  539. {
  540. "cell_type": "markdown",
  541. "metadata": {
  542. "id": "t6MPjfT5NrKQ"
  543. },
  544. "source": [
  545. "<a align=\"left\" href=\"https://ultralytics.com/yolov5\" target=\"_blank\">\n",
  546. "<img src=\"https://user-images.githubusercontent.com/26833433/125273437-35b3fc00-e30d-11eb-9079-46f313325424.png\"></a>\n",
  547. "\n",
  548. "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n",
  549. "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!"
  550. ]
  551. },
  552. {
  553. "cell_type": "markdown",
  554. "metadata": {
  555. "id": "7mGmQbAO5pQb"
  556. },
  557. "source": [
  558. "# Setup\n",
  559. "\n",
  560. "Clone repo, install dependencies and check PyTorch and GPU."
  561. ]
  562. },
  563. {
  564. "cell_type": "code",
  565. "metadata": {
  566. "id": "wbvMlHd_QwMG",
  567. "colab": {
  568. "base_uri": "https://localhost:8080/"
  569. },
  570. "outputId": "ada1dd8d-e0aa-4858-e893-dc320319ca30"
  571. },
  572. "source": [
  573. "!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
  574. "%cd yolov5\n",
  575. "%pip install -qr requirements.txt # install dependencies\n",
  576. "\n",
  577. "import torch\n",
  578. "from IPython.display import Image, clear_output # to display images\n",
  579. "\n",
  580. "clear_output()\n",
  581. "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
  582. ],
  583. "execution_count": null,
  584. "outputs": [
  585. {
  586. "output_type": "stream",
  587. "text": [
  588. "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n"
  589. ],
  590. "name": "stdout"
  591. }
  592. ]
  593. },
  594. {
  595. "cell_type": "markdown",
  596. "metadata": {
  597. "id": "4JnkELT0cIJg"
  598. },
  599. "source": [
  600. "# 1. Inference\n",
  601. "\n",
  602. "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n",
  603. "\n",
  604. "```shell\n",
  605. "python detect.py --source 0 # webcam\n",
  606. " file.jpg # image \n",
  607. " file.mp4 # video\n",
  608. " path/ # directory\n",
  609. " path/*.jpg # glob\n",
  610. " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n",
  611. " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n",
  612. "```"
  613. ]
  614. },
  615. {
  616. "cell_type": "code",
  617. "metadata": {
  618. "id": "zR9ZbuQCH7FX",
  619. "colab": {
  620. "base_uri": "https://localhost:8080/"
  621. },
  622. "outputId": "a7a37616-a82b-4bdb-a463-6ead850b5615"
  623. },
  624. "source": [
  625. "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
  626. "Image(filename='runs/detect/exp/zidane.jpg', width=600)"
  627. ],
  628. "execution_count": null,
  629. "outputs": [
  630. {
  631. "output_type": "stream",
  632. "text": [
  633. "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n",
  634. "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
  635. "\n",
  636. "Fusing layers... \n",
  637. "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n",
  638. "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.008s)\n",
  639. "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n",
  640. "Results saved to runs/detect/exp\n",
  641. "Done. (0.091s)\n"
  642. ],
  643. "name": "stdout"
  644. }
  645. ]
  646. },
  647. {
  648. "cell_type": "markdown",
  649. "metadata": {
  650. "id": "hkAzDWJ7cWTr"
  651. },
  652. "source": [
  653. "&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\n",
  654. "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/127574988-6a558aa1-d268-44b9-bf6b-62d4c605cc72.jpg\" width=\"600\">"
  655. ]
  656. },
  657. {
  658. "cell_type": "markdown",
  659. "metadata": {
  660. "id": "0eq1SMWl6Sfn"
  661. },
  662. "source": [
  663. "# 2. Validate\n",
  664. "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation."
  665. ]
  666. },
  667. {
  668. "cell_type": "markdown",
  669. "metadata": {
  670. "id": "eyTZYGgRjnMc"
  671. },
  672. "source": [
  673. "## COCO val2017\n",
  674. "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy."
  675. ]
  676. },
  677. {
  678. "cell_type": "code",
  679. "metadata": {
  680. "id": "WQPtK1QYVaD_",
  681. "colab": {
  682. "base_uri": "https://localhost:8080/",
  683. "height": 66,
  684. "referenced_widgets": [
  685. "2e915d9016c846e095e382b6a02ee773",
  686. "cb7fc3a5c6cc4fde8d2c83e594a7c86e",
  687. "ac3edef4e3434f4587e6cbf8aa048770",
  688. "853ac234cc2a4236946fc516871e10eb",
  689. "13842ca90c0047e584b8d68d99dad2b1",
  690. "f454999c3a924c7bad0746fb453dec36",
  691. "f94a7ca8c1f04761bf38fdc5f99664b8",
  692. "9da1a23b042c41618dd14b0e30aa7cbe"
  693. ]
  694. },
  695. "outputId": "3606f305-aa67-43fd-d5d6-93d1f311768c"
  696. },
  697. "source": [
  698. "# Download COCO val2017\n",
  699. "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
  700. "!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
  701. ],
  702. "execution_count": null,
  703. "outputs": [
  704. {
  705. "output_type": "display_data",
  706. "data": {
  707. "application/vnd.jupyter.widget-view+json": {
  708. "model_id": "2e915d9016c846e095e382b6a02ee773",
  709. "version_minor": 0,
  710. "version_major": 2
  711. },
  712. "text/plain": [
  713. "HBox(children=(FloatProgress(value=0.0, max=818322941.0), HTML(value='')))"
  714. ]
  715. },
  716. "metadata": {
  717. "tags": []
  718. }
  719. },
  720. {
  721. "output_type": "stream",
  722. "text": [
  723. "\n"
  724. ],
  725. "name": "stdout"
  726. }
  727. ]
  728. },
  729. {
  730. "cell_type": "code",
  731. "metadata": {
  732. "id": "X58w8JLpMnjH",
  733. "colab": {
  734. "base_uri": "https://localhost:8080/"
  735. },
  736. "outputId": "20fbc423-f536-43ff-e70b-3acf6aeade99"
  737. },
  738. "source": [
  739. "# Run YOLOv5x on COCO val2017\n",
  740. "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
  741. ],
  742. "execution_count": null,
  743. "outputs": [
  744. {
  745. "output_type": "stream",
  746. "text": [
  747. "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n",
  748. "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
  749. "\n",
  750. "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
  751. "100% 168M/168M [00:05<00:00, 31.9MB/s]\n",
  752. "\n",
  753. "Fusing layers... \n",
  754. "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n",
  755. "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2653.03it/s]\n",
  756. "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n",
  757. " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:18<00:00, 2.00it/s]\n",
  758. " all 5000 36335 0.746 0.626 0.68 0.49\n",
  759. "Speed: 0.1ms pre-process, 5.1ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n",
  760. "\n",
  761. "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
  762. "loading annotations into memory...\n",
  763. "Done (t=0.44s)\n",
  764. "creating index...\n",
  765. "index created!\n",
  766. "Loading and preparing results...\n",
  767. "DONE (t=4.82s)\n",
  768. "creating index...\n",
  769. "index created!\n",
  770. "Running per image evaluation...\n",
  771. "Evaluate annotation type *bbox*\n",
  772. "DONE (t=84.52s).\n",
  773. "Accumulating evaluation results...\n",
  774. "DONE (t=13.82s).\n",
  775. " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n",
  776. " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
  777. " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n",
  778. " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n",
  779. " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n",
  780. " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n",
  781. " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n",
  782. " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n",
  783. " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n",
  784. " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n",
  785. " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n",
  786. " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n",
  787. "Results saved to runs/val/exp\n"
  788. ],
  789. "name": "stdout"
  790. }
  791. ]
  792. },
  793. {
  794. "cell_type": "markdown",
  795. "metadata": {
  796. "id": "rc_KbFk0juX2"
  797. },
  798. "source": [
  799. "## COCO test-dev2017\n",
  800. "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794."
  801. ]
  802. },
  803. {
  804. "cell_type": "code",
  805. "metadata": {
  806. "id": "V0AJnSeCIHyJ"
  807. },
  808. "source": [
  809. "# Download COCO test-dev2017\n",
  810. "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n",
  811. "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n",
  812. "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n",
  813. "%mv ./test2017 ../coco/images # move to /coco"
  814. ],
  815. "execution_count": null,
  816. "outputs": []
  817. },
  818. {
  819. "cell_type": "code",
  820. "metadata": {
  821. "id": "29GJXAP_lPrt"
  822. },
  823. "source": [
  824. "# Run YOLOv5s on COCO test-dev2017 using --task test\n",
  825. "!python val.py --weights yolov5s.pt --data coco.yaml --task test"
  826. ],
  827. "execution_count": null,
  828. "outputs": []
  829. },
  830. {
  831. "cell_type": "markdown",
  832. "metadata": {
  833. "id": "VUOiNLtMP5aG"
  834. },
  835. "source": [
  836. "# 3. Train\n",
  837. "\n",
  838. "Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)."
  839. ]
  840. },
  841. {
  842. "cell_type": "code",
  843. "metadata": {
  844. "id": "Knxi2ncxWffW",
  845. "colab": {
  846. "base_uri": "https://localhost:8080/",
  847. "height": 66,
  848. "referenced_widgets": [
  849. "6ff8a710ded44391a624dec5c460b771",
  850. "3c19729b51cd45d4848035da06e96ff8",
  851. "23b2f0ae3d46438c8de375987c77f580",
  852. "dd9498c321a9422da6faf17a0be026d4",
  853. "d8dda4b2ce864fd682e558b9a48f602e",
  854. "ff8151449e444a14869684212b9ab14e",
  855. "0f84fe609bcf4aa9afdc32a8cf076909",
  856. "8fda673769984e2b928ef820d34c85c3"
  857. ]
  858. },
  859. "outputId": "4510c6b0-8d2a-436c-d3f4-c8f8470d913a"
  860. },
  861. "source": [
  862. "# Download COCO128\n",
  863. "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
  864. "!unzip -q tmp.zip -d ../ && rm tmp.zip"
  865. ],
  866. "execution_count": null,
  867. "outputs": [
  868. {
  869. "output_type": "display_data",
  870. "data": {
  871. "application/vnd.jupyter.widget-view+json": {
  872. "model_id": "6ff8a710ded44391a624dec5c460b771",
  873. "version_minor": 0,
  874. "version_major": 2
  875. },
  876. "text/plain": [
  877. "HBox(children=(FloatProgress(value=0.0, max=6984509.0), HTML(value='')))"
  878. ]
  879. },
  880. "metadata": {
  881. "tags": []
  882. }
  883. },
  884. {
  885. "output_type": "stream",
  886. "text": [
  887. "\n"
  888. ],
  889. "name": "stdout"
  890. }
  891. ]
  892. },
  893. {
  894. "cell_type": "markdown",
  895. "metadata": {
  896. "id": "_pOkGLv1dMqh"
  897. },
  898. "source": [
  899. "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n",
  900. "\n",
  901. "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n"
  902. ]
  903. },
  904. {
  905. "cell_type": "code",
  906. "metadata": {
  907. "id": "bOy5KI2ncnWd"
  908. },
  909. "source": [
  910. "# Tensorboard (optional)\n",
  911. "%load_ext tensorboard\n",
  912. "%tensorboard --logdir runs/train"
  913. ],
  914. "execution_count": null,
  915. "outputs": []
  916. },
  917. {
  918. "cell_type": "code",
  919. "metadata": {
  920. "id": "2fLAV42oNb7M"
  921. },
  922. "source": [
  923. "# Weights & Biases (optional)\n",
  924. "%pip install -q wandb\n",
  925. "import wandb\n",
  926. "wandb.login()"
  927. ],
  928. "execution_count": null,
  929. "outputs": []
  930. },
  931. {
  932. "cell_type": "code",
  933. "metadata": {
  934. "id": "1NcFxRcFdJ_O",
  935. "colab": {
  936. "base_uri": "https://localhost:8080/"
  937. },
  938. "outputId": "cd8ac17d-19a8-4e87-ab6a-31af1edac1ef"
  939. },
  940. "source": [
  941. "# Train YOLOv5s on COCO128 for 3 epochs\n",
  942. "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
  943. ],
  944. "execution_count": null,
  945. "outputs": [
  946. {
  947. "output_type": "stream",
  948. "text": [
  949. "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache_images=True, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, entity=None, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias=latest, local_rank=-1\n",
  950. "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
  951. "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
  952. "\n",
  953. "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
  954. "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
  955. "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
  956. "2021-07-29 22:56:52.096481: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n",
  957. "\n",
  958. "WARNING: Dataset not found, nonexistent paths: ['/content/datasets/coco128/images/train2017']\n",
  959. "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n",
  960. "100% 6.66M/6.66M [00:00<00:00, 44.0MB/s]\n",
  961. "Dataset autodownload success\n",
  962. "\n",
  963. "\n",
  964. " from n params module arguments \n",
  965. " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
  966. " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
  967. " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
  968. " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
  969. " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n",
  970. " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
  971. " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n",
  972. " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
  973. " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
  974. " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
  975. " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
  976. " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
  977. " 12 [-1, 6] 1 0 models.common.Concat [1] \n",
  978. " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n",
  979. " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n",
  980. " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
  981. " 16 [-1, 4] 1 0 models.common.Concat [1] \n",
  982. " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n",
  983. " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n",
  984. " 19 [-1, 14] 1 0 models.common.Concat [1] \n",
  985. " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n",
  986. " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n",
  987. " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
  988. " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
  989. " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
  990. "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
  991. "\n",
  992. "Transferred 362/362 items from yolov5s.pt\n",
  993. "Scaled weight_decay = 0.0005\n",
  994. "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 59 weight, 62 weight (no decay), 62 bias\n",
  995. "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
  996. "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2021.98it/s]\n",
  997. "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/coco128/labels/train2017.cache\n",
  998. "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 273.58it/s]\n",
  999. "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 506004.63it/s]\n",
  1000. "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 121.71it/s]\n",
  1001. "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
  1002. "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
  1003. "Plotting labels... \n",
  1004. "\n",
  1005. "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n",
  1006. "Image sizes 640 train, 640 val\n",
  1007. "Using 2 dataloader workers\n",
  1008. "Logging results to runs/train/exp\n",
  1009. "Starting training for 3 epochs...\n",
  1010. "\n",
  1011. " Epoch gpu_mem box obj cls labels img_size\n",
  1012. " 0/2 3.64G 0.0441 0.06646 0.02229 290 640: 100% 8/8 [00:04<00:00, 1.93it/s]\n",
  1013. " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.45it/s]\n",
  1014. " all 128 929 0.696 0.562 0.644 0.419\n",
  1015. "\n",
  1016. " Epoch gpu_mem box obj cls labels img_size\n",
  1017. " 1/2 5.04G 0.04573 0.06289 0.021 226 640: 100% 8/8 [00:01<00:00, 5.46it/s]\n",
  1018. " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.16it/s]\n",
  1019. " all 128 929 0.71 0.567 0.654 0.424\n",
  1020. "\n",
  1021. " Epoch gpu_mem box obj cls labels img_size\n",
  1022. " 2/2 5.04G 0.04542 0.0715 0.02028 242 640: 100% 8/8 [00:01<00:00, 5.12it/s]\n",
  1023. " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.46it/s]\n",
  1024. " all 128 929 0.731 0.563 0.658 0.427\n",
  1025. "3 epochs completed in 0.006 hours.\n",
  1026. "\n",
  1027. "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
  1028. "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n"
  1029. ],
  1030. "name": "stdout"
  1031. }
  1032. ]
  1033. },
  1034. {
  1035. "cell_type": "markdown",
  1036. "metadata": {
  1037. "id": "15glLzbQx5u0"
  1038. },
  1039. "source": [
  1040. "# 4. Visualize"
  1041. ]
  1042. },
  1043. {
  1044. "cell_type": "markdown",
  1045. "metadata": {
  1046. "id": "DLI1JmHU7B0l"
  1047. },
  1048. "source": [
  1049. "## Weights & Biases Logging 🌟 NEW\n",
  1050. "\n",
  1051. "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n",
  1052. "\n",
  1053. "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n",
  1054. "\n",
  1055. "<img align=\"left\" src=\"https://user-images.githubusercontent.com/26833433/125274843-a27bc600-e30e-11eb-9a44-62af0b7a50a2.png\" width=\"800\">"
  1056. ]
  1057. },
  1058. {
  1059. "cell_type": "markdown",
  1060. "metadata": {
  1061. "id": "-WPvRbS5Swl6"
  1062. },
  1063. "source": [
  1064. "## Local Logging\n",
  1065. "\n",
  1066. "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n",
  1067. "\n",
  1068. "> <img src=\"https://user-images.githubusercontent.com/26833433/124931219-48bf8700-e002-11eb-84f0-e05d95b118dd.jpg\" width=\"700\"> \n",
  1069. "`train_batch0.jpg` shows train batch 0 mosaics and labels\n",
  1070. "\n",
  1071. "> <img src=\"https://user-images.githubusercontent.com/26833433/124931217-4826f080-e002-11eb-87b9-ae0925a8c94b.jpg\" width=\"700\"> \n",
  1072. "`test_batch0_labels.jpg` shows val batch 0 labels\n",
  1073. "\n",
  1074. "> <img src=\"https://user-images.githubusercontent.com/26833433/124931209-46f5c380-e002-11eb-9bd5-7a3de2be9851.jpg\" width=\"700\"> \n",
  1075. "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n",
  1076. "\n",
  1077. "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n",
  1078. "\n",
  1079. "```python\n",
  1080. "from utils.plots import plot_results \n",
  1081. "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n",
  1082. "```\n",
  1083. "\n",
  1084. "<img align=\"left\" width=\"800\" alt=\"COCO128 Training Results\" src=\"https://user-images.githubusercontent.com/26833433/126906780-8c5e2990-6116-4de6-b78a-367244a33ccf.png\">"
  1085. ]
  1086. },
  1087. {
  1088. "cell_type": "markdown",
  1089. "metadata": {
  1090. "id": "Zelyeqbyt3GD"
  1091. },
  1092. "source": [
  1093. "# Environments\n",
  1094. "\n",
  1095. "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n",
  1096. "\n",
  1097. "- **Google Colab and Kaggle** notebooks with free GPU: <a href=\"https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"></a> <a href=\"https://www.kaggle.com/ultralytics/yolov5\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" alt=\"Open In Kaggle\"></a>\n",
  1098. "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n",
  1099. "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n",
  1100. "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) <a href=\"https://hub.docker.com/r/ultralytics/yolov5\"><img src=\"https://img.shields.io/docker/pulls/ultralytics/yolov5?logo=docker\" alt=\"Docker Pulls\"></a>\n"
  1101. ]
  1102. },
  1103. {
  1104. "cell_type": "markdown",
  1105. "metadata": {
  1106. "id": "6Qu7Iesl0p54"
  1107. },
  1108. "source": [
  1109. "# Status\n",
  1110. "\n",
  1111. "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n",
  1112. "\n",
  1113. "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n"
  1114. ]
  1115. },
  1116. {
  1117. "cell_type": "markdown",
  1118. "metadata": {
  1119. "id": "IEijrePND_2I"
  1120. },
  1121. "source": [
  1122. "# Appendix\n",
  1123. "\n",
  1124. "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n"
  1125. ]
  1126. },
  1127. {
  1128. "cell_type": "code",
  1129. "metadata": {
  1130. "id": "mcKoSIK2WSzj"
  1131. },
  1132. "source": [
  1133. "# Reproduce\n",
  1134. "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n",
  1135. " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n",
  1136. " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP"
  1137. ],
  1138. "execution_count": null,
  1139. "outputs": []
  1140. },
  1141. {
  1142. "cell_type": "code",
  1143. "metadata": {
  1144. "id": "GMusP4OAxFu6"
  1145. },
  1146. "source": [
  1147. "# PyTorch Hub\n",
  1148. "import torch\n",
  1149. "\n",
  1150. "# Model\n",
  1151. "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n",
  1152. "\n",
  1153. "# Images\n",
  1154. "dir = 'https://ultralytics.com/images/'\n",
  1155. "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n",
  1156. "\n",
  1157. "# Inference\n",
  1158. "results = model(imgs)\n",
  1159. "results.print() # or .show(), .save()"
  1160. ],
  1161. "execution_count": null,
  1162. "outputs": []
  1163. },
  1164. {
  1165. "cell_type": "code",
  1166. "metadata": {
  1167. "id": "FGH0ZjkGjejy"
  1168. },
  1169. "source": [
  1170. "# Unit tests\n",
  1171. "%%shell\n",
  1172. "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n",
  1173. "\n",
  1174. "rm -rf runs # remove runs/\n",
  1175. "for m in yolov5s; do # models\n",
  1176. " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n",
  1177. " python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n",
  1178. " for d in 0 cpu; do # devices\n",
  1179. " python detect.py --weights $m.pt --device $d # detect official\n",
  1180. " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n",
  1181. " python val.py --weights $m.pt --device $d # val official\n",
  1182. " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n",
  1183. " done\n",
  1184. " python hubconf.py # hub\n",
  1185. " python models/yolo.py --cfg $m.yaml # inspect\n",
  1186. " python export.py --weights $m.pt --img 640 --batch 1 # export\n",
  1187. "done"
  1188. ],
  1189. "execution_count": null,
  1190. "outputs": []
  1191. },
  1192. {
  1193. "cell_type": "code",
  1194. "metadata": {
  1195. "id": "gogI-kwi3Tye"
  1196. },
  1197. "source": [
  1198. "# Profile\n",
  1199. "from utils.torch_utils import profile\n",
  1200. "\n",
  1201. "m1 = lambda x: x * torch.sigmoid(x)\n",
  1202. "m2 = torch.nn.SiLU()\n",
  1203. "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)"
  1204. ],
  1205. "execution_count": null,
  1206. "outputs": []
  1207. },
  1208. {
  1209. "cell_type": "code",
  1210. "metadata": {
  1211. "id": "RVRSOhEvUdb5"
  1212. },
  1213. "source": [
  1214. "# Evolve\n",
  1215. "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n",
  1216. "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)"
  1217. ],
  1218. "execution_count": null,
  1219. "outputs": []
  1220. },
  1221. {
  1222. "cell_type": "code",
  1223. "metadata": {
  1224. "id": "BSgFCAcMbk1R"
  1225. },
  1226. "source": [
  1227. "# VOC\n",
  1228. "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n",
  1229. " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}"
  1230. ],
  1231. "execution_count": null,
  1232. "outputs": []
  1233. }
  1234. ]
  1235. }