diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..3c6b6ab
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,216 @@
+# Repo-specific DockerIgnore -------------------------------------------------------------------------------------------
+#.git
+.cache
+.idea
+runs
+output
+coco
+storage.googleapis.com
+
+data/samples/*
+**/results*.txt
+*.jpg
+
+# Neural Network weights -----------------------------------------------------------------------------------------------
+**/*.weights
+**/*.pt
+**/*.pth
+**/*.onnx
+**/*.mlmodel
+**/*.torchscript
+
+
+# Below Copied From .gitignore -----------------------------------------------------------------------------------------
+# Below Copied From .gitignore -----------------------------------------------------------------------------------------
+
+
+# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+wandb/
+.installed.cfg
+*.egg
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# dotenv
+.env
+
+# virtualenv
+.venv*
+venv*/
+ENV*/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+
+# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
+
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+Icon?
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+
+# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea/*
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/dictionaries
+.html # Bokeh Plots
+.pg # TensorFlow Frozen Graphs
+.avi # videos
+
+# Sensitive or high-churn files:
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+
+# Gradle:
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# CMake
+cmake-build-debug/
+cmake-build-release/
+
+# Mongo Explorer plugin:
+.idea/**/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..dad4239
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+# this drop notebooks from GitHub language stats
+*.ipynb linguist-vendored
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..91ce33f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,252 @@
+# Repo-specific GitIgnore ----------------------------------------------------------------------------------------------
+*.jpg
+*.jpeg
+*.png
+*.bmp
+*.tif
+*.tiff
+*.heic
+*.JPG
+*.JPEG
+*.PNG
+*.BMP
+*.TIF
+*.TIFF
+*.HEIC
+*.mp4
+*.mov
+*.MOV
+*.avi
+*.data
+*.json
+
+*.cfg
+!cfg/yolov3*.cfg
+
+storage.googleapis.com
+runs/*
+data/*
+!data/images/zidane.jpg
+!data/images/bus.jpg
+!data/coco.names
+!data/coco_paper.names
+!data/coco.data
+!data/coco_*.data
+!data/coco_*.txt
+!data/trainvalno5k.shapes
+!data/*.sh
+
+pycocotools/*
+results*.txt
+gcp_test*.sh
+
+# Datasets -------------------------------------------------------------------------------------------------------------
+coco/
+coco128/
+VOC/
+
+# MATLAB GitIgnore -----------------------------------------------------------------------------------------------------
+*.m~
+*.mat
+!targets*.mat
+
+# Neural Network weights -----------------------------------------------------------------------------------------------
+*.weights
+*.pt
+*.onnx
+*.mlmodel
+*.torchscript
+darknet53.conv.74
+yolov3-tiny.conv.15
+
+# GitHub Python GitIgnore ----------------------------------------------------------------------------------------------
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+wandb/
+.installed.cfg
+*.egg
+
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# dotenv
+.env
+
+# virtualenv
+.venv*
+venv*/
+ENV*/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+
+# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore -----------------------------------------------
+
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+Icon?
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+
+# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea/*
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/dictionaries
+.html # Bokeh Plots
+.pg # TensorFlow Frozen Graphs
+.avi # videos
+
+# Sensitive or high-churn files:
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+
+# Gradle:
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# CMake
+cmake-build-debug/
+cmake-build-release/
+
+# Mongo Explorer plugin:
+.idea/**/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..1f301b2
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,54 @@
+# Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch
+FROM nvcr.io/nvidia/pytorch:20.12-py3
+
+# Install linux packages
+RUN apt update && apt install -y screen libgl1-mesa-glx
+
+# Install python dependencies
+RUN python -m pip install --upgrade pip
+COPY requirements.txt .
+RUN pip install -r requirements.txt gsutil
+
+# Create working directory
+RUN mkdir -p /usr/src/app
+WORKDIR /usr/src/app
+
+# Copy contents
+COPY . /usr/src/app
+
+# Copy weights
+#RUN python3 -c "from models import *; \
+#attempt_download('weights/yolov5s.pt'); \
+#attempt_download('weights/yolov5m.pt'); \
+#attempt_download('weights/yolov5l.pt')"
+
+
+# --------------------------------------------------- Extras Below ---------------------------------------------------
+
+# Build and Push
+# t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t
+# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done
+
+# Pull and Run
+# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t
+
+# Pull and Run with local directory access
+# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t
+
+# Kill all
+# sudo docker kill $(sudo docker ps -q)
+
+# Kill all image-based
+# sudo docker kill $(sudo docker ps -a -q --filter ancestor=ultralytics/yolov5:latest)
+
+# Bash into running container
+# sudo docker exec -it 5a9b5863d93d bash
+
+# Bash into stopped container
+# id=5a9b5863d93d && sudo docker start $id && sudo docker exec -it $id bash
+
+# Send weights to GCP
+# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt
+
+# Clean up
+# docker system prune -a --volumes
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..9e419e0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
\ No newline at end of file
diff --git a/README.md b/README.md
index 0223bf2..297c50c 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,12 @@
-# RK3588_Detection
+原版仓库:https://github.com/ultralytics/yolov5
+
+环境要求:python version >= 3.6
+
+模型训练:python3 train.py
+
+模型导出:python3 models/export.py --weights "xxx.pt"
+
+转换rknn:python3 onnx_to_rknn.py
+
+模型推理:python3 rknn_detect_yolov5.py
-RK系列开发板模型转化加速代码
\ No newline at end of file
diff --git a/detect.py b/detect.py
new file mode 100644
index 0000000..802b99f
--- /dev/null
+++ b/detect.py
@@ -0,0 +1,172 @@
+import argparse
+import time
+from pathlib import Path
+
+import cv2
+import torch
+import torch.backends.cudnn as cudnn
+from numpy import random
+
+from models.experimental import attempt_load
+from utils.datasets import LoadStreams, LoadImages
+from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, \
+ strip_optimizer, set_logging, increment_path
+from utils.plots import plot_one_box
+from utils.torch_utils import select_device, load_classifier, time_synchronized
+
+
+def detect(save_img=False):
+ source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
+ webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
+ ('rtsp://', 'rtmp://', 'http://'))
+
+ # Directories
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
+
+ # Initialize
+ set_logging()
+ device = select_device(opt.device)
+ half = device.type != 'cpu' # half precision only supported on CUDA
+
+ # Load model
+ model = attempt_load(weights, map_location=device) # load FP32 model
+ imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+ if half:
+ model.half() # to FP16
+
+ # Second-stage classifier
+ classify = False
+ if classify:
+ modelc = load_classifier(name='resnet101', n=2) # initialize
+ modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
+
+ # Set Dataloader
+ vid_path, vid_writer = None, None
+ if webcam:
+ view_img = True
+ cudnn.benchmark = True # set True to speed up constant image size inference
+ dataset = LoadStreams(source, img_size=imgsz)
+ else:
+ save_img = True
+ dataset = LoadImages(source, img_size=imgsz)
+
+ # Get names and colors
+ names = model.module.names if hasattr(model, 'module') else model.names
+ colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
+
+ # Run inference
+ t0 = time.time()
+ img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
+ _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
+ for path, img, im0s, vid_cap in dataset:
+ img = torch.from_numpy(img).to(device)
+ img = img.half() if half else img.float() # uint8 to fp16/32
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
+ if img.ndimension() == 3:
+ img = img.unsqueeze(0)
+
+ # Inference
+ t1 = time_synchronized()
+ pred = model(img, augment=opt.augment)[0]
+
+ # Apply NMS
+ pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
+ t2 = time_synchronized()
+
+ # Apply Classifier
+ if classify:
+ pred = apply_classifier(pred, modelc, img, im0s)
+
+ # Process detections
+ for i, det in enumerate(pred): # detections per image
+ if webcam: # batch_size >= 1
+ p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
+ else:
+ p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
+
+ p = Path(p) # to Path
+ save_path = str(save_dir / p.name) # img.jpg
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
+ s += '%gx%g ' % img.shape[2:] # print string
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
+ if len(det):
+ # Rescale boxes from img_size to im0 size
+ det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
+
+ # Print results
+ for c in det[:, -1].unique():
+ n = (det[:, -1] == c).sum() # detections per class
+ s += f'{n} {names[int(c)]}s, ' # add to string
+
+ # Write results
+ for *xyxy, conf, cls in reversed(det):
+ if save_txt: # Write to file
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
+ with open(txt_path + '.txt', 'a') as f:
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+ if save_img or view_img: # Add bbox to image
+ label = f'{names[int(cls)]} {conf:.2f}'
+ plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
+
+ # Print time (inference + NMS)
+ print(f'{s}Done. ({t2 - t1:.3f}s)')
+
+ # Stream results
+ if view_img:
+ cv2.imshow(str(p), im0)
+
+ # Save results (image with detections)
+ if save_img:
+ if dataset.mode == 'image':
+ cv2.imwrite(save_path, im0)
+ else: # 'video'
+ if vid_path != save_path: # new video
+ vid_path = save_path
+ if isinstance(vid_writer, cv2.VideoWriter):
+ vid_writer.release() # release previous video writer
+
+ fourcc = 'mp4v' # output video codec
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
+ vid_writer.write(im0)
+
+ if save_txt or save_img:
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+ print(f"Results saved to {save_dir}{s}")
+
+ print(f'Done. ({time.time() - t0:.3f}s)')
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
+ parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--view-img', action='store_true', help='display results')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--update', action='store_true', help='update all models')
+ parser.add_argument('--project', default='runs/detect', help='save results to project/name')
+ parser.add_argument('--name', default='exp', help='save results to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ opt = parser.parse_args()
+ print(opt)
+
+ with torch.no_grad():
+ if opt.update: # update all models (to fix SourceChangeWarning)
+ for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
+ detect()
+ strip_optimizer(opt.weights)
+ else:
+ detect()
diff --git a/hubconf.py b/hubconf.py
new file mode 100644
index 0000000..c4485a4
--- /dev/null
+++ b/hubconf.py
@@ -0,0 +1,141 @@
+"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
+
+Usage:
+ import torch
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80)
+"""
+
+from pathlib import Path
+
+import torch
+
+from models.yolo import Model
+from utils.general import set_logging
+from utils.google_utils import attempt_download
+
+dependencies = ['torch', 'yaml']
+set_logging()
+
+
+def create(name, pretrained, channels, classes, autoshape):
+ """Creates a specified YOLOv5 model
+
+ Arguments:
+ name (str): name of model, i.e. 'yolov5s'
+ pretrained (bool): load pretrained weights into the model
+ channels (int): number of input channels
+ classes (int): number of model classes
+
+ Returns:
+ pytorch model
+ """
+ config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path
+ try:
+ model = Model(config, channels, classes)
+ if pretrained:
+ fname = f'{name}.pt' # checkpoint filename
+ attempt_download(fname) # download if not found locally
+ ckpt = torch.load(fname, map_location=torch.device('cpu')) # load
+ state_dict = ckpt['model'].float().state_dict() # to FP32
+ state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
+ model.load_state_dict(state_dict, strict=False) # load
+ if len(ckpt['model'].names) == classes:
+ model.names = ckpt['model'].names # set class names attribute
+ if autoshape:
+ model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
+ return model
+
+ except Exception as e:
+ help_url = 'https://github.com/ultralytics/yolov5/issues/36'
+ s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url
+ raise Exception(s) from e
+
+
+def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True):
+ """YOLOv5-small model from https://github.com/ultralytics/yolov5
+
+ Arguments:
+ pretrained (bool): load pretrained weights into the model, default=False
+ channels (int): number of input channels, default=3
+ classes (int): number of model classes, default=80
+
+ Returns:
+ pytorch model
+ """
+ return create('yolov5s', pretrained, channels, classes, autoshape)
+
+
+def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True):
+ """YOLOv5-medium model from https://github.com/ultralytics/yolov5
+
+ Arguments:
+ pretrained (bool): load pretrained weights into the model, default=False
+ channels (int): number of input channels, default=3
+ classes (int): number of model classes, default=80
+
+ Returns:
+ pytorch model
+ """
+ return create('yolov5m', pretrained, channels, classes, autoshape)
+
+
+def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True):
+ """YOLOv5-large model from https://github.com/ultralytics/yolov5
+
+ Arguments:
+ pretrained (bool): load pretrained weights into the model, default=False
+ channels (int): number of input channels, default=3
+ classes (int): number of model classes, default=80
+
+ Returns:
+ pytorch model
+ """
+ return create('yolov5l', pretrained, channels, classes, autoshape)
+
+
+def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True):
+ """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
+
+ Arguments:
+ pretrained (bool): load pretrained weights into the model, default=False
+ channels (int): number of input channels, default=3
+ classes (int): number of model classes, default=80
+
+ Returns:
+ pytorch model
+ """
+ return create('yolov5x', pretrained, channels, classes, autoshape)
+
+
+def custom(path_or_model='path/to/model.pt', autoshape=True):
+ """YOLOv5-custom model from https://github.com/ultralytics/yolov5
+
+ Arguments (3 options):
+ path_or_model (str): 'path/to/model.pt'
+ path_or_model (dict): torch.load('path/to/model.pt')
+ path_or_model (nn.Module): torch.load('path/to/model.pt')['model']
+
+ Returns:
+ pytorch model
+ """
+ model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint
+ if isinstance(model, dict):
+ model = model['model'] # load model
+
+ hub_model = Model(model.yaml).to(next(model.parameters()).device) # create
+ hub_model.load_state_dict(model.float().state_dict()) # load state_dict
+ hub_model.names = model.names # class names
+ return hub_model.autoshape() if autoshape else hub_model
+
+
+if __name__ == '__main__':
+ model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example
+ # model = custom(path_or_model='path/to/model.pt') # custom example
+
+ # Verify inference
+ from PIL import Image
+
+ imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')]
+ results = model(imgs)
+ results.show()
+ results.print()
diff --git a/lighting_app.py b/lighting_app.py
new file mode 100644
index 0000000..e0f3385
--- /dev/null
+++ b/lighting_app.py
@@ -0,0 +1,41 @@
+import os
+import json
+import time
+import requests
+from flask import request
+from flask import Flask, Response
+from concurrent.futures import ThreadPoolExecutor
+app = Flask(__name__)
+executor = ThreadPoolExecutor(3)
+
+
+def analysing(request_data):
+ patrol_host = '172.20.0.115'
+ patrol_port = 8000
+ request_data = json.loads(request_data)
+ file_path = request_data['file_path']
+ url = "http://" + patrol_host + ":" + patrol_port + "/notifyresult"
+ #url = "http://172.20.0.115:8000/notifyresult"
+ headers = {'Content--Type': 'application/json;charset=UTF-8'}
+
+ '''
+ print("--------------------------- url---------------------------", url)
+ res = requests.post(url=url, json=result_data, headers=headers)
+ print("---------------------------------res------------------------------------", res)
+ '''
+
+
+
+
+@app.route('/analysis', methods=['POST'])
+def picAnalyse():
+ print("---------------------------picAnalyse---start------------------------", request.args)
+ request_data = request.get_data().decode('utf-8')
+ print("---------------------------request_data---------------------------", request_data)
+ executor.submit(analysing, request_data)
+ #return Response()
+ return json.dumps({'success':True})
+
+
+if __name__ == '__main__':
+ app.run(host='0.0.0.0', port=8000)
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/models/common.py b/models/common.py
new file mode 100644
index 0000000..ac97fe1
--- /dev/null
+++ b/models/common.py
@@ -0,0 +1,297 @@
+# This file contains modules common to various models
+
+import math
+import numpy as np
+import requests
+import torch
+import torch.nn as nn
+from PIL import Image, ImageDraw
+
+from utils.datasets import letterbox
+from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh
+from utils.plots import color_list
+
+
+def autopad(k, p=None): # kernel, padding
+ # Pad to 'same'
+ if p is None:
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
+ return p
+
+
+def DWConv(c1, c2, k=1, s=1, act=True):
+ # Depthwise convolution
+ return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
+
+
+class Conv(nn.Module):
+ # Standard convolution
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Conv, self).__init__()
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+ self.bn = nn.BatchNorm2d(c2)
+ # self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+ self.act = nn.ReLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+
+ def forward(self, x):
+ return self.act(self.bn(self.conv(x)))
+
+ def fuseforward(self, x):
+ return self.act(self.conv(x))
+
+
+class Bottleneck(nn.Module):
+ # Standard bottleneck
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
+ super(Bottleneck, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class BottleneckCSP(nn.Module):
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(BottleneckCSP, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
+ # self.act = nn.LeakyReLU(0.1, inplace=True)
+ self.act = nn.ReLU(inplace=True)
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+
+ def forward(self, x):
+ y1 = self.cv3(self.m(self.cv1(x)))
+ y2 = self.cv2(x)
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
+
+
+class C3(nn.Module):
+ # CSP Bottleneck with 3 convolutions
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
+ super(C3, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c1, c_, 1, 1)
+ self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
+ self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
+ # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
+
+ def forward(self, x):
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
+
+
+class SPP(nn.Module):
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
+ def __init__(self, c1, c2, k=(5, 9, 13)):
+ super(SPP, self).__init__()
+ c_ = c1 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, 1, 1)
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
+
+ def forward(self, x):
+ x = self.cv1(x)
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
+
+
+class Focus(nn.Module):
+ # Focus wh information into c-space
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Focus, self).__init__()
+ self.conv = Conv(c1, c2, k, 2, p, g, act)
+ # self.contract = Contract(gain=2)
+
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
+ return self.conv(x)
+
+
+class Contract(nn.Module):
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
+ def __init__(self, gain=2):
+ super().__init__()
+ self.gain = gain
+
+ def forward(self, x):
+ N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
+ s = self.gain
+ x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
+ return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
+
+
+class Expand(nn.Module):
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
+ def __init__(self, gain=2):
+ super().__init__()
+ self.gain = gain
+
+ def forward(self, x):
+ N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
+ s = self.gain
+ x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
+ return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
+
+
+class Concat(nn.Module):
+ # Concatenate a list of tensors along dimension
+ def __init__(self, dimension=1):
+ super(Concat, self).__init__()
+ self.d = dimension
+
+ def forward(self, x):
+ return torch.cat(x, self.d)
+
+
+class NMS(nn.Module):
+ # Non-Maximum Suppression (NMS) module
+ conf = 0.25 # confidence threshold
+ iou = 0.45 # IoU threshold
+ classes = None # (optional list) filter by class
+
+ def __init__(self):
+ super(NMS, self).__init__()
+
+ def forward(self, x):
+ return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
+
+
+class autoShape(nn.Module):
+ # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
+ img_size = 640 # inference size (pixels)
+ conf = 0.25 # NMS confidence threshold
+ iou = 0.45 # NMS IoU threshold
+ classes = None # (optional list) filter by class
+
+ def __init__(self, model):
+ super(autoShape, self).__init__()
+ self.model = model.eval()
+
+ def autoshape(self):
+ print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
+ return self
+
+ def forward(self, imgs, size=640, augment=False, profile=False):
+ # Inference from various sources. For height=720, width=1280, RGB images example inputs are:
+ # filename: imgs = 'data/samples/zidane.jpg'
+ # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
+ # PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
+ # numpy: = np.zeros((720,1280,3)) # HWC
+ # torch: = torch.zeros(16,3,720,1280) # BCHW
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
+
+ p = next(self.model.parameters()) # for device and type
+ if isinstance(imgs, torch.Tensor): # torch
+ return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
+
+ # Pre-process
+ n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
+ shape0, shape1 = [], [] # image and inference shapes
+ for i, im in enumerate(imgs):
+ if isinstance(im, str): # filename or uri
+ im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open
+ im = np.array(im) # to numpy
+ if im.shape[0] < 5: # image in CHW
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
+ im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
+ s = im.shape[:2] # HWC
+ shape0.append(s) # image shape
+ g = (size / max(s)) # gain
+ shape1.append([y * g for y in s])
+ imgs[i] = im # update
+ shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
+ x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
+ x = np.stack(x, 0) if n > 1 else x[0][None] # stack
+ x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
+
+ # Inference
+ with torch.no_grad():
+ y = self.model(x, augment, profile)[0] # forward
+ y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
+
+ # Post-process
+ for i in range(n):
+ scale_coords(shape1, y[i][:, :4], shape0[i])
+
+ return Detections(imgs, y, self.names)
+
+
+class Detections:
+ # detections class for YOLOv5 inference results
+ def __init__(self, imgs, pred, names=None):
+ super(Detections, self).__init__()
+ d = pred[0].device # device
+ gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
+ self.imgs = imgs # list of images as numpy arrays
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
+ self.names = names # class names
+ self.xyxy = pred # xyxy pixels
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
+ self.n = len(self.pred)
+
+ def display(self, pprint=False, show=False, save=False):
+ colors = color_list()
+ for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
+ str = f'Image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
+ if pred is not None:
+ for c in pred[:, -1].unique():
+ n = (pred[:, -1] == c).sum() # detections per class
+ str += f'{n} {self.names[int(c)]}s, ' # add to string
+ if show or save:
+ img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
+ for *box, conf, cls in pred: # xyxy, confidence, class
+ # str += '%s %.2f, ' % (names[int(cls)], conf) # label
+ ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot
+ if save:
+ f = f'results{i}.jpg'
+ str += f"saved to '{f}'"
+ img.save(f) # save
+ if show:
+ img.show(f'Image {i}') # show
+ if pprint:
+ print(str)
+
+ def print(self):
+ self.display(pprint=True) # print results
+
+ def show(self):
+ self.display(show=True) # show results
+
+ def save(self):
+ self.display(save=True) # save results
+
+ def __len__(self):
+ return self.n
+
+ def tolist(self):
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
+ x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
+ for d in x:
+ for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
+ setattr(d, k, getattr(d, k)[0]) # pop out of list
+ return x
+
+
+class Classify(nn.Module):
+ # Classification head, i.e. x(b,c1,20,20) to x(b,c2)
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
+ super(Classify, self).__init__()
+ self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
+ self.flat = nn.Flatten()
+
+ def forward(self, x):
+ z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
+ return self.flat(self.conv(z)) # flatten to x(b,c2)
diff --git a/models/experimental.py b/models/experimental.py
new file mode 100644
index 0000000..2dbbf7f
--- /dev/null
+++ b/models/experimental.py
@@ -0,0 +1,133 @@
+# This file contains experimental modules
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from models.common import Conv, DWConv
+from utils.google_utils import attempt_download
+
+
+class CrossConv(nn.Module):
+ # Cross Convolution Downsample
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
+ super(CrossConv, self).__init__()
+ c_ = int(c2 * e) # hidden channels
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
+ self.add = shortcut and c1 == c2
+
+ def forward(self, x):
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
+
+
+class Sum(nn.Module):
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
+ def __init__(self, n, weight=False): # n: number of inputs
+ super(Sum, self).__init__()
+ self.weight = weight # apply weights boolean
+ self.iter = range(n - 1) # iter object
+ if weight:
+ self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
+
+ def forward(self, x):
+ y = x[0] # no weight
+ if self.weight:
+ w = torch.sigmoid(self.w) * 2
+ for i in self.iter:
+ y = y + x[i + 1] * w[i]
+ else:
+ for i in self.iter:
+ y = y + x[i + 1]
+ return y
+
+
+class GhostConv(nn.Module):
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
+ super(GhostConv, self).__init__()
+ c_ = c2 // 2 # hidden channels
+ self.cv1 = Conv(c1, c_, k, s, None, g, act)
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
+
+ def forward(self, x):
+ y = self.cv1(x)
+ return torch.cat([y, self.cv2(y)], 1)
+
+
+class GhostBottleneck(nn.Module):
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
+ def __init__(self, c1, c2, k, s):
+ super(GhostBottleneck, self).__init__()
+ c_ = c2 // 2
+ self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
+ Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
+
+ def forward(self, x):
+ return self.conv(x) + self.shortcut(x)
+
+
+class MixConv2d(nn.Module):
+ # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
+ super(MixConv2d, self).__init__()
+ groups = len(k)
+ if equal_ch: # equal c_ per group
+ i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
+ c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
+ else: # equal weight.numel() per group
+ b = [c2] + [0] * groups
+ a = np.eye(groups + 1, groups, k=-1)
+ a -= np.roll(a, 1, axis=1)
+ a *= np.array(k) ** 2
+ a[0] = 1
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
+
+ self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
+ self.bn = nn.BatchNorm2d(c2)
+ self.act = nn.LeakyReLU(0.1, inplace=True)
+
+ def forward(self, x):
+ return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
+
+
+class Ensemble(nn.ModuleList):
+ # Ensemble of models
+ def __init__(self):
+ super(Ensemble, self).__init__()
+
+ def forward(self, x, augment=False):
+ y = []
+ for module in self:
+ y.append(module(x, augment)[0])
+ # y = torch.stack(y).max(0)[0] # max ensemble
+ # y = torch.stack(y).mean(0) # mean ensemble
+ y = torch.cat(y, 1) # nms ensemble
+ return y, None # inference, train output
+
+
+def attempt_load(weights, map_location=None):
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
+ model = Ensemble()
+ for w in weights if isinstance(weights, list) else [weights]:
+ attempt_download(w)
+ model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model
+
+ # Compatibility updates
+ for m in model.modules():
+ if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
+ m.inplace = True # pytorch 1.7.0 compatibility
+ elif type(m) is Conv:
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
+
+ if len(model) == 1:
+ return model[-1] # return model
+ else:
+ print('Ensemble created with %s\n' % weights)
+ for k in ['names', 'stride']:
+ setattr(model, k, getattr(model[-1], k))
+ return model # return ensemble
diff --git a/models/export.py b/models/export.py
new file mode 100644
index 0000000..3dcdcab
--- /dev/null
+++ b/models/export.py
@@ -0,0 +1,70 @@
+"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats
+
+Usage:
+ $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1
+"""
+
+import argparse
+import sys
+import time
+
+sys.path.append('./') # to run '$ python *.py' files in subdirectories
+
+import torch
+import torch.nn as nn
+
+import models
+from models.experimental import attempt_load
+from utils.activations import Hardswish, SiLU
+from utils.general import set_logging, check_img_size
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default='./weights/best.pt', help='weights path') # from yolov5/models/
+ parser.add_argument('--img-size', nargs='+', type=int, default=[416, 416], help='image size') # height, width
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
+ opt = parser.parse_args()
+ opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
+ print(opt)
+ set_logging()
+ t = time.time()
+
+ # Load PyTorch model
+ model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model
+ labels = model.names
+
+ # Checks
+ gs = int(max(model.stride)) # grid size (max stride)
+ opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples
+
+ # Input
+ img = torch.zeros(opt.batch_size, 3, *opt.img_size[::-1]) # image size(1,3,320,192) iDetection
+
+ # Update model
+ for k, m in model.named_modules():
+ m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
+ if isinstance(m, models.common.Conv): # assign export-friendly activations
+ if isinstance(m.act, nn.Hardswish):
+ m.act = Hardswish()
+ # elif isinstance(m.act, nn.SiLU):
+ # m.act = SiLU()
+ # elif isinstance(m, models.yolo.Detect):
+ # m.forward = m.forward_export # assign forward (optional)
+ model.model[-1].export = True # set Detect() layer export=True
+ y = model(img) # dry run
+ try:
+ import onnx
+
+ print('\nStarting ONNX export with onnx %s...' % onnx.__version__)
+ f = opt.weights.replace('.pt', f'_{opt.img_size[0]}x{opt.img_size[1]}.onnx') # filename
+ torch.onnx.export(model, img, f, verbose=False, opset_version=10, input_names=['images'],
+ output_names=['classes', 'boxes'] if y is None else ['output'])
+
+ # Checks
+ onnx_model = onnx.load(f) # load onnx model
+ onnx.checker.check_model(onnx_model) # check onnx model
+ # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model
+ print('ONNX export success, saved as %s' % f)
+ except Exception as e:
+ print('ONNX export failure: %s' % e)
+
diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml
new file mode 100644
index 0000000..a07a4dc
--- /dev/null
+++ b/models/hub/anchors.yaml
@@ -0,0 +1,58 @@
+# Default YOLOv5 anchors for COCO data
+
+
+# P5 -------------------------------------------------------------------------------------------------------------------
+# P5-640:
+anchors_p5_640:
+ - [ 10,13, 16,30, 33,23 ] # P3/8
+ - [ 30,61, 62,45, 59,119 ] # P4/16
+ - [ 116,90, 156,198, 373,326 ] # P5/32
+
+
+# P6 -------------------------------------------------------------------------------------------------------------------
+# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
+anchors_p6_640:
+ - [ 9,11, 21,19, 17,41 ] # P3/8
+ - [ 43,32, 39,70, 86,64 ] # P4/16
+ - [ 65,131, 134,130, 120,265 ] # P5/32
+ - [ 282,180, 247,354, 512,387 ] # P6/64
+
+# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
+anchors_p6_1280:
+ - [ 19,27, 44,40, 38,94 ] # P3/8
+ - [ 96,68, 86,152, 180,137 ] # P4/16
+ - [ 140,301, 303,264, 238,542 ] # P5/32
+ - [ 436,615, 739,380, 925,792 ] # P6/64
+
+# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
+anchors_p6_1920:
+ - [ 28,41, 67,59, 57,141 ] # P3/8
+ - [ 144,103, 129,227, 270,205 ] # P4/16
+ - [ 209,452, 455,396, 358,812 ] # P5/32
+ - [ 653,922, 1109,570, 1387,1187 ] # P6/64
+
+
+# P7 -------------------------------------------------------------------------------------------------------------------
+# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
+anchors_p7_640:
+ - [ 11,11, 13,30, 29,20 ] # P3/8
+ - [ 30,46, 61,38, 39,92 ] # P4/16
+ - [ 78,80, 146,66, 79,163 ] # P5/32
+ - [ 149,150, 321,143, 157,303 ] # P6/64
+ - [ 257,402, 359,290, 524,372 ] # P7/128
+
+# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
+anchors_p7_1280:
+ - [ 19,22, 54,36, 32,77 ] # P3/8
+ - [ 70,83, 138,71, 75,173 ] # P4/16
+ - [ 165,159, 148,334, 375,151 ] # P5/32
+ - [ 334,317, 251,626, 499,474 ] # P6/64
+ - [ 750,326, 534,814, 1079,818 ] # P7/128
+
+# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
+anchors_p7_1920:
+ - [ 29,34, 81,55, 47,115 ] # P3/8
+ - [ 105,124, 207,107, 113,259 ] # P4/16
+ - [ 247,238, 222,500, 563,227 ] # P5/32
+ - [ 501,476, 376,939, 749,711 ] # P6/64
+ - [ 1126,489, 801,1222, 1618,1227 ] # P7/128
diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml
new file mode 100644
index 0000000..38dcc44
--- /dev/null
+++ b/models/hub/yolov3-spp.yaml
@@ -0,0 +1,51 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# darknet53 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [32, 3, 1]], # 0
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
+ [-1, 1, Bottleneck, [64]],
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
+ [-1, 2, Bottleneck, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
+ [-1, 8, Bottleneck, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
+ [-1, 8, Bottleneck, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
+ [-1, 4, Bottleneck, [1024]], # 10
+ ]
+
+# YOLOv3-SPP head
+head:
+ [[-1, 1, Bottleneck, [1024, False]],
+ [-1, 1, SPP, [512, [5, 9, 13]]],
+ [-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml
new file mode 100644
index 0000000..ff7638c
--- /dev/null
+++ b/models/hub/yolov3-tiny.yaml
@@ -0,0 +1,41 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,14, 23,27, 37,58] # P4/16
+ - [81,82, 135,169, 344,319] # P5/32
+
+# YOLOv3-tiny backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [16, 3, 1]], # 0
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
+ [-1, 1, Conv, [32, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
+ [-1, 1, Conv, [64, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
+ [-1, 1, Conv, [128, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
+ [-1, 1, Conv, [256, 3, 1]],
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
+ [-1, 1, Conv, [512, 3, 1]],
+ [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
+ [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
+ ]
+
+# YOLOv3-tiny head
+head:
+ [[-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
+
+ [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
+ ]
diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml
new file mode 100644
index 0000000..f2e7613
--- /dev/null
+++ b/models/hub/yolov3.yaml
@@ -0,0 +1,51 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# darknet53 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Conv, [32, 3, 1]], # 0
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
+ [-1, 1, Bottleneck, [64]],
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
+ [-1, 2, Bottleneck, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
+ [-1, 8, Bottleneck, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
+ [-1, 8, Bottleneck, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
+ [-1, 4, Bottleneck, [1024]], # 10
+ ]
+
+# YOLOv3 head
+head:
+ [[-1, 1, Bottleneck, [1024, False]],
+ [-1, 1, Conv, [512, [1, 1]]],
+ [-1, 1, Conv, [1024, 3, 1]],
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
+
+ [-2, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Bottleneck, [512, False]],
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
+
+ [-2, 1, Conv, [128, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Bottleneck, [256, False]],
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
+
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml
new file mode 100644
index 0000000..e772bff
--- /dev/null
+++ b/models/hub/yolov5-fpn.yaml
@@ -0,0 +1,42 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, Bottleneck, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, BottleneckCSP, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, BottleneckCSP, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 6, BottleneckCSP, [1024]], # 9
+ ]
+
+# YOLOv5 FPN head
+head:
+ [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large)
+
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 1, Conv, [512, 1, 1]],
+ [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium)
+
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small)
+
+ [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml
new file mode 100644
index 0000000..0633a90
--- /dev/null
+++ b/models/hub/yolov5-p2.yaml
@@ -0,0 +1,54 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors: 3
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
+ [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 9
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 13
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
+
+ [ -1, 1, Conv, [ 128, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2
+ [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall)
+
+ [ -1, 1, Conv, [ 128, 3, 2 ] ],
+ [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3
+ [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large)
+
+ [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
+ ]
diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml
new file mode 100644
index 0000000..3728a11
--- /dev/null
+++ b/models/hub/yolov5-p6.yaml
@@ -0,0 +1,56 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors: 3
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
+ [ -1, 3, C3, [ 1024, False ] ], # 11
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 15
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 19
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge)
+
+ [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
+ ]
diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml
new file mode 100644
index 0000000..ca8f849
--- /dev/null
+++ b/models/hub/yolov5-p7.yaml
@@ -0,0 +1,67 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors: 3
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
+ [ -1, 3, C3, [ 128 ] ],
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
+ [ -1, 9, C3, [ 256 ] ],
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
+ [ -1, 9, C3, [ 512 ] ],
+ [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
+ [ -1, 3, C3, [ 768 ] ],
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
+ [ -1, 3, C3, [ 1024 ] ],
+ [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128
+ [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ],
+ [ -1, 3, C3, [ 1280, False ] ], # 13
+ ]
+
+# YOLOv5 head
+head:
+ [ [ -1, 1, Conv, [ 1024, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6
+ [ -1, 3, C3, [ 1024, False ] ], # 17
+
+ [ -1, 1, Conv, [ 768, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
+ [ -1, 3, C3, [ 768, False ] ], # 21
+
+ [ -1, 1, Conv, [ 512, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
+ [ -1, 3, C3, [ 512, False ] ], # 25
+
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
+ [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small)
+
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
+ [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4
+ [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium)
+
+ [ -1, 1, Conv, [ 512, 3, 2 ] ],
+ [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5
+ [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large)
+
+ [ -1, 1, Conv, [ 768, 3, 2 ] ],
+ [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6
+ [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge)
+
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ],
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7
+ [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge)
+
+ [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7)
+ ]
diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml
new file mode 100644
index 0000000..340f95a
--- /dev/null
+++ b/models/hub/yolov5-panet.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, BottleneckCSP, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, BottleneckCSP, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, BottleneckCSP, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, BottleneckCSP, [1024, False]], # 9
+ ]
+
+# YOLOv5 PANet head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, BottleneckCSP, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolo.py b/models/yolo.py
new file mode 100644
index 0000000..5dc8b57
--- /dev/null
+++ b/models/yolo.py
@@ -0,0 +1,286 @@
+import argparse
+import logging
+import sys
+from copy import deepcopy
+from pathlib import Path
+
+sys.path.append('./') # to run '$ python *.py' files in subdirectories
+logger = logging.getLogger(__name__)
+
+from models.common import *
+from models.experimental import MixConv2d, CrossConv
+from utils.autoanchor import check_anchor_order
+from utils.general import make_divisible, check_file, set_logging
+from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
+ select_device, copy_attr
+
+try:
+ import thop # for FLOPS computation
+except ImportError:
+ thop = None
+
+
+class Detect(nn.Module):
+ stride = None # strides computed during build
+ export = False # onnx export
+
+ def __init__(self, nc=80, anchors=(), ch=()): # detection layer
+ super(Detect, self).__init__()
+ self.nc = nc # number of classes
+ self.no = nc + 5 # number of outputs per anchor
+ self.nl = len(anchors) # number of detection layers
+ self.na = len(anchors[0]) // 2 # number of anchors
+ self.grid = [torch.zeros(1)] * self.nl # init grid
+ a = torch.tensor(anchors).float().view(self.nl, -1, 2)
+ self.register_buffer('anchors', a) # shape(nl,na,2)
+ self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
+
+ def forward(self, x):
+ # x = x.copy() # for profiling
+ z = [] # inference output
+ self.training |= self.export
+ for i in range(self.nl):
+ x[i] = self.m[i](x[i]) # conv
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
+
+ if not self.training: # inference
+ if self.grid[i].shape[2:4] != x[i].shape[2:4]:
+ self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
+
+ y = x[i].sigmoid()
+ y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
+ y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
+ z.append(y.view(bs, -1, self.no))
+
+ return x if self.training else (torch.cat(z, 1), x)
+
+ @staticmethod
+ def _make_grid(nx=20, ny=20):
+ yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
+ return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
+
+
+class Model(nn.Module):
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
+ super(Model, self).__init__()
+ if isinstance(cfg, dict):
+ self.yaml = cfg # model dict
+ else: # is *.yaml
+ import yaml # for torch hub
+ self.yaml_file = Path(cfg).name
+ with open(cfg) as f:
+ self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
+
+ # Define model
+ ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
+ if nc and nc != self.yaml['nc']:
+ logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
+ self.yaml['nc'] = nc # override yaml value
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
+ self.names = [str(i) for i in range(self.yaml['nc'])] # default names
+ # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
+
+ # Build strides, anchors
+ m = self.model[-1] # Detect()
+ if isinstance(m, Detect):
+ s = 256 # 2x min stride
+ m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
+ m.anchors /= m.stride.view(-1, 1, 1)
+ check_anchor_order(m)
+ self.stride = m.stride
+ self._initialize_biases() # only run once
+ # print('Strides: %s' % m.stride.tolist())
+
+ # Init weights, biases
+ initialize_weights(self)
+ self.info()
+ logger.info('')
+
+ def forward(self, x, augment=False, profile=False):
+ if augment:
+ img_size = x.shape[-2:] # height, width
+ s = [1, 0.83, 0.67] # scales
+ f = [None, 3, None] # flips (2-ud, 3-lr)
+ y = [] # outputs
+ for si, fi in zip(s, f):
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
+ yi = self.forward_once(xi)[0] # forward
+ # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
+ yi[..., :4] /= si # de-scale
+ if fi == 2:
+ yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
+ elif fi == 3:
+ yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
+ y.append(yi)
+ return torch.cat(y, 1), None # augmented inference, train
+ else:
+ return self.forward_once(x, profile) # single-scale inference, train
+
+ def forward_once(self, x, profile=False):
+ y, dt = [], [] # outputs
+ for m in self.model:
+ if m.f != -1: # if not from previous layer
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
+
+ if profile:
+ o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
+ t = time_synchronized()
+ for _ in range(10):
+ _ = m(x)
+ dt.append((time_synchronized() - t) * 100)
+ print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
+
+ x = m(x) # run
+ y.append(x if m.i in self.save else None) # save output
+
+ if profile:
+ print('%.1fms total' % sum(dt))
+ return x
+
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
+ # https://arxiv.org/abs/1708.02002 section 3.3
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
+ m = self.model[-1] # Detect() module
+ for mi, s in zip(m.m, m.stride): # from
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
+ b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+ def _print_biases(self):
+ m = self.model[-1] # Detect() module
+ for mi in m.m: # from
+ b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
+ print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
+
+ # def _print_weights(self):
+ # for m in self.model.modules():
+ # if type(m) is Bottleneck:
+ # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
+
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
+ print('Fusing layers... ')
+ for m in self.model.modules():
+ if type(m) is Conv and hasattr(m, 'bn'):
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
+ delattr(m, 'bn') # remove batchnorm
+ m.forward = m.fuseforward # update forward
+ self.info()
+ return self
+
+ def nms(self, mode=True): # add or remove NMS module
+ present = type(self.model[-1]) is NMS # last layer is NMS
+ if mode and not present:
+ print('Adding NMS... ')
+ m = NMS() # module
+ m.f = -1 # from
+ m.i = self.model[-1].i + 1 # index
+ self.model.add_module(name='%s' % m.i, module=m) # add
+ self.eval()
+ elif not mode and present:
+ print('Removing NMS... ')
+ self.model = self.model[:-1] # remove
+ return self
+
+ def autoshape(self): # add autoShape module
+ print('Adding autoShape... ')
+ m = autoShape(self) # wrap model
+ copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
+ return m
+
+ def info(self, verbose=False, img_size=640): # print model information
+ model_info(self, verbose, img_size)
+
+
+def parse_model(d, ch): # model_dict, input_channels(3)
+ logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
+
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
+ m = eval(m) if isinstance(m, str) else m # eval strings
+ for j, a in enumerate(args):
+ try:
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
+ except:
+ pass
+
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
+ if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
+ c1, c2 = ch[f], args[0]
+
+ # Normal
+ # if i > 0 and args[0] != no: # channel expansion factor
+ # ex = 1.75 # exponential (default 2.0)
+ # e = math.log(c2 / ch[1]) / math.log(2)
+ # c2 = int(ch[1] * ex ** e)
+ # if m != Focus:
+
+ c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
+
+ # Experimental
+ # if i > 0 and args[0] != no: # channel expansion factor
+ # ex = 1 + gw # exponential (default 2.0)
+ # ch1 = 32 # ch[1]
+ # e = math.log(c2 / ch1) / math.log(2) # level 1-n
+ # c2 = int(ch1 * ex ** e)
+ # if m != Focus:
+ # c2 = make_divisible(c2, 8) if c2 != no else c2
+
+ args = [c1, c2, *args[1:]]
+ if m in [BottleneckCSP, C3]:
+ args.insert(2, n)
+ n = 1
+ elif m is nn.BatchNorm2d:
+ args = [ch[f]]
+ elif m is Concat:
+ c2 = sum([ch[x if x < 0 else x + 1] for x in f])
+ elif m is Detect:
+ args.append([ch[x + 1] for x in f])
+ if isinstance(args[1], int): # number of anchors
+ args[1] = [list(range(args[1] * 2))] * len(f)
+ elif m is Contract:
+ c2 = ch[f if f < 0 else f + 1] * args[0] ** 2
+ elif m is Expand:
+ c2 = ch[f if f < 0 else f + 1] // args[0] ** 2
+ else:
+ c2 = ch[f if f < 0 else f + 1]
+
+ m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
+ t = str(m)[8:-2].replace('__main__.', '') # module type
+ np = sum([x.numel() for x in m_.parameters()]) # number params
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
+ logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
+ layers.append(m_)
+ ch.append(c2)
+ return nn.Sequential(*layers), sorted(save)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ opt = parser.parse_args()
+ opt.cfg = check_file(opt.cfg) # check file
+ set_logging()
+ device = select_device(opt.device)
+
+ # Create model
+ model = Model(opt.cfg).to(device)
+ model.train()
+
+ # Profile
+ # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
+ # y = model(img, profile=True)
+
+ # Tensorboard
+ # from torch.utils.tensorboard import SummaryWriter
+ # tb_writer = SummaryWriter()
+ # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
+ # tb_writer.add_graph(model.model, img) # add model to tensorboard
+ # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml
new file mode 100644
index 0000000..71ebf86
--- /dev/null
+++ b/models/yolov5l.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.0 # model depth multiple
+width_multiple: 1.0 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, C3, [1024, False]], # 9
+ ]
+
+# YOLOv5 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml
new file mode 100644
index 0000000..3c749c9
--- /dev/null
+++ b/models/yolov5m.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 0.67 # model depth multiple
+width_multiple: 0.75 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, C3, [1024, False]], # 9
+ ]
+
+# YOLOv5 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml
new file mode 100644
index 0000000..aca669d
--- /dev/null
+++ b/models/yolov5s.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 0.33 # model depth multiple
+width_multiple: 0.50 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, C3, [1024, False]], # 9
+ ]
+
+# YOLOv5 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml
new file mode 100644
index 0000000..d3babdf
--- /dev/null
+++ b/models/yolov5x.yaml
@@ -0,0 +1,48 @@
+# parameters
+nc: 80 # number of classes
+depth_multiple: 1.33 # model depth multiple
+width_multiple: 1.25 # layer channel multiple
+
+# anchors
+anchors:
+ - [10,13, 16,30, 33,23] # P3/8
+ - [30,61, 62,45, 59,119] # P4/16
+ - [116,90, 156,198, 373,326] # P5/32
+
+# YOLOv5 backbone
+backbone:
+ # [from, number, module, args]
+ [[-1, 1, Focus, [64, 3]], # 0-P1/2
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
+ [-1, 3, C3, [128]],
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
+ [-1, 9, C3, [256]],
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
+ [-1, 9, C3, [512]],
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
+ [-1, 1, SPP, [1024, [5, 9, 13]]],
+ [-1, 3, C3, [1024, False]], # 9
+ ]
+
+# YOLOv5 head
+head:
+ [[-1, 1, Conv, [512, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
+ [-1, 3, C3, [512, False]], # 13
+
+ [-1, 1, Conv, [256, 1, 1]],
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
+
+ [-1, 1, Conv, [256, 3, 2]],
+ [[-1, 14], 1, Concat, [1]], # cat head P4
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
+
+ [-1, 1, Conv, [512, 3, 2]],
+ [[-1, 10], 1, Concat, [1]], # cat head P5
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
+
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
+ ]
diff --git a/onnx_to_rknn.py b/onnx_to_rknn.py
new file mode 100644
index 0000000..bbba54d
--- /dev/null
+++ b/onnx_to_rknn.py
@@ -0,0 +1,53 @@
+import os
+import urllib
+import traceback
+import time
+import sys
+import numpy as np
+import cv2
+from rknn.api import RKNN
+
+""""
+将onnx模型转换为rknn模型
+"""
+
+if __name__ == '__main__':
+ ONNX_MODEL = 'yolov5m_416x416.onnx'
+ RKNN_MODEL = 'yolov5m_416x416.rknn'
+
+ # Create RKNN object
+ rknn = RKNN()
+ print('--> config model')
+ # rknn.config(mean_values=[[123.675, 116.28, 103.53]], std_values=[[58.82, 58.82, 58.82]], reorder_channel='0 1 2')
+ # rknn.config(batch_size=1,target_platform=["rk1806", "rk1808", "rk3399pro"], mean_values='0 0 0 255')
+ rknn.config(channel_mean_value='0 0 0 255', reorder_channel='0 1 2', batch_size=1)
+ # rknn.config(channel_mean_value='0 0 0 1', reorder_channel='0 1 2', batch_size=1)
+ # rknn.config(mean_values=[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], std_values=[[255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0, 255.0]], reorder_channel='0 1 2', batch_size=1)
+ print('done')
+
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_onnx(model=ONNX_MODEL)
+ if ret != 0:
+ print('Load resnet50v2 failed!')
+ exit(ret)
+ print('done')
+
+ # Build model
+ print('--> Building model')
+ ret = rknn.build(do_quantization=True, dataset='./dataset.txt') # pre_compile=True
+ # ret = rknn.build(do_quantization=True) # pre_compile=True
+ if ret != 0:
+ print('Build resnet50 failed!')
+ exit(ret)
+ print('done')
+
+ # Export rknn model
+ print('--> Export RKNN model')
+ ret = rknn.export_rknn(RKNN_MODEL)
+ if ret != 0:
+ print('Export resnet50v2.rknn failed!')
+ exit(ret)
+ print('done')
+ rknn.release()
+
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..3c23f2b
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,30 @@
+# pip install -r requirements.txt
+
+# base ----------------------------------------
+Cython
+matplotlib>=3.2.2
+numpy>=1.18.5
+opencv-python>=4.1.2
+Pillow
+PyYAML>=5.3
+scipy>=1.4.1
+tensorboard>=2.2
+torch>=1.7.0
+torchvision>=0.8.1
+tqdm>=4.41.0
+
+# logging -------------------------------------
+# wandb
+
+# plotting ------------------------------------
+seaborn>=0.11.0
+pandas
+
+# export --------------------------------------
+# coremltools==4.0
+# onnx>=1.8.0
+# scikit-learn==0.19.2 # for coreml quantization
+
+# extras --------------------------------------
+thop # FLOPS computation
+pycocotools>=2.0 # COCO mAP
diff --git a/rknn_detect_yolov5_0.py b/rknn_detect_yolov5_0.py
new file mode 100644
index 0000000..79be8d6
--- /dev/null
+++ b/rknn_detect_yolov5_0.py
@@ -0,0 +1,278 @@
+#from rknn.api import RKNN
+from rknnlite.api import RKNNLite
+import cv2
+import numpy as np
+import cv2
+import time
+import os
+"""
+yolov5 预测脚本 for rknn
+"""
+
+SIZE = (640, 640)
+CLASSES = ("lighting")
+OBJ_THRESH = 0.1
+NMS_THRESH = 0.1
+MASKS = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+IMAGE_EXT = [".jpg", "*.JPG", ".jpeg", ".webp", ".bmp", ".png"]
+
+def get_image_list(path):
+ image_names = []
+ for maindir, subdir, file_name_list in os.walk(path):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = os.path.splitext(apath)[1]
+ if ext in IMAGE_EXT:
+ image_names.append(apath)
+ return image_names
+
+def filter_boxes(boxes, box_confidences, box_class_probs) -> (np.ndarray, np.ndarray, np.ndarray):
+ box_scores = box_confidences * box_class_probs # 条件概率, 在该cell存在物体的概率的基础上是某个类别的概率
+ box_classes = np.argmax(box_scores, axis=-1) # 找出概率最大的类别索引
+ box_class_scores = np.max(box_scores, axis=-1) # 最大类别对应的概率值
+ pos = np.where(box_class_scores >= OBJ_THRESH) # 找出概率大于阈值的item
+ # pos = box_class_scores >= OBJ_THRESH # 找出概率大于阈值的item
+ boxes = boxes[pos]
+ classes = box_classes[pos]
+ scores = box_class_scores[pos]
+ return boxes, classes, scores
+
+
+def nms_boxes(boxes, scores):
+ x = boxes[:, 0]
+ y = boxes[:, 1]
+ w = boxes[:, 2]
+ h = boxes[:, 3]
+
+ areas = w * h
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ xx1 = np.maximum(x[i], x[order[1:]])
+ yy1 = np.maximum(y[i], y[order[1:]])
+ xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
+ yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
+
+ w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
+ h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
+ inter = w1 * h1
+
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= NMS_THRESH)[0]
+ order = order[inds + 1]
+ keep = np.array(keep)
+ return keep
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ labels = []
+ box_ls = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x + w, y + h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+ labels.append(CLASSES[cl])
+ box_ls.append((top, left, right, bottom))
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return labels, box_ls
+
+
+def load_model0(model_path, npu_id):
+ rknn = RKNNLite()
+ devs = rknn.list_devices()
+ device_id_dict = {}
+ for index, dev_id in enumerate(devs[-1]):
+ if dev_id[:2] != 'TS':
+ device_id_dict[0] = dev_id
+ if dev_id[:2] == 'TS':
+ device_id_dict[1] = dev_id
+
+ print('-->loading model : ' + model_path)
+ rknn.load_rknn(model_path)
+ print('--> Init runtime environment on: ' + device_id_dict[npu_id])
+ ret = rknn.init_runtime(device_id=device_id_dict[npu_id])
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+def load_rknn_model(PATH):
+ # Create RKNN object
+ rknn = RKNNLite()
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_rknn(PATH)
+ if ret != 0:
+ print('load rknn model failed')
+ exit(ret)
+ print('done')
+ #ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True)
+ ret = rknn.init_runtime()
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+
+
+
+def predict(img_src, rknn):
+ img = cv2.resize(img_src, SIZE)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ t0 = time.time()
+ print("img shape \t:", img.shape)
+ pred_onx = rknn.inference(inputs=[img])
+ print("time: \t", time.time() - t0)
+ boxes, classes, scores = [], [], []
+ for t in range(3):
+ input0_data = sigmoid(pred_onx[t][0])
+ input0_data = np.transpose(input0_data, (1, 2, 0, 3))
+ grid_h, grid_w, channel_n, predict_n = input0_data.shape
+ anchors = [ANCHORS[i] for i in MASKS[t]]
+ box_confidence = input0_data[..., 4]
+ box_confidence = np.expand_dims(box_confidence, axis=-1)
+ box_class_probs = input0_data[..., 5:]
+ box_xy = input0_data[..., :2]
+ box_wh = input0_data[..., 2:4]
+ col = np.tile(np.arange(0, grid_w), grid_h).reshape(-1, grid_w)
+ row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_w)
+ col = col.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ row = row.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ grid = np.concatenate((col, row), axis=-1)
+ box_xy = box_xy * 2 - 0.5 + grid
+ box_wh = (box_wh * 2) ** 2 * anchors
+ box_xy /= (grid_w, grid_h) # 计算原尺寸的中心
+ box_wh /= SIZE # 计算原尺寸的宽高
+ box_xy -= (box_wh / 2.) # 计算原尺寸的中心
+ box = np.concatenate((box_xy, box_wh), axis=-1)
+ res = filter_boxes(box, box_confidence, box_class_probs)
+ boxes.append(res[0])
+ classes.append(res[1])
+ scores.append(res[2])
+ boxes, classes, scores = np.concatenate(boxes), np.concatenate(classes), np.concatenate(scores)
+ print("------------------------boxes, classes, scores-----------------------",boxes, classes, scores)
+ nboxes, nclasses, nscores = [], [], []
+ for c in set(classes):
+ inds = np.where(classes == c)
+ b = boxes[inds]
+ c = classes[inds]
+ s = scores[inds]
+ #keep = nms_boxes(b, s)
+ keep = [0,1,2]
+ print("--------------keep-------------",keep)
+ nboxes.append(b[keep])
+ nclasses.append(c[keep])
+ nscores.append(s[keep])
+ if len(nboxes) < 1:
+ return [], [], []
+ boxes = np.concatenate(nboxes)
+ classes = np.concatenate(nclasses)
+ scores = np.concatenate(nscores)
+ return boxes, classes, scores
+ '''
+ label_list = []
+ box_list = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ x *= img_src.shape[1]
+ y *= img_src.shape[0]
+ w *= img_src.shape[1]
+ h *= img_src.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(img_src.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(img_src.shape[0], np.floor(y + h + 0.5).astype(int))
+ label_list.append(CLASSES[cl])
+ box_list.append((top, left, right, bottom))
+ return label_list, np.array(box_list)
+ '''
+
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ #print('class: {}, score: {}'.format(CLASSES[cl], score))
+ #print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x+w, y+h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x + 0.5).astype(int))
+ left = max(0, np.floor(y + 0.5).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+
+ # print('class: {}, score: {}'.format(CLASSES[cl], score))
+ # print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return image
+
+
+if __name__ == '__main__':
+ path = "./imgs/"
+ save_folder = "./result/"
+ #RKNN_MODEL_PATH = r"yolov5s-640-640.rknn"
+ #RKNN_MODEL_PATH = r"best_640x640.rknn"
+ RKNN_MODEL_PATH = r"23best_640x640.rknn"
+ rknn = load_rknn_model(RKNN_MODEL_PATH)
+ predict.__defaults__ = (None, rknn)
+ files = get_image_list(path)
+ current_time = time.localtime()
+ for image_name in files:
+ img = cv2.imread(image_name)
+ boxes, classes, scores = predict(img)
+ image = draw(img, boxes, scores, classes)
+ save_file_name = os.path.join(save_folder, os.path.basename(image_name))
+ cv2.imwrite(save_file_name,image)
+ print("--------------------------res-----------------------",boxes, classes, scores)
diff --git a/rknn_detect_yolov5_1.py b/rknn_detect_yolov5_1.py
new file mode 100644
index 0000000..95edbd7
--- /dev/null
+++ b/rknn_detect_yolov5_1.py
@@ -0,0 +1,399 @@
+#from rknn.api import RKNN
+from rknnlite.api import RKNNLite
+import cv2
+import numpy as np
+import cv2
+import time
+import os
+from PIL import Image
+"""
+yolov5 预测脚本 for rknn
+"""
+
+SIZE = (640, 640)
+Width = 640
+Height = 640
+CLASSES = ("lighting")
+OBJ_THRESH = 0.1
+NMS_THRESH = 0.1
+MASKS = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
+
+
+
+IMAGE_EXT = [".jpg", "*.JPG", ".jpeg", ".webp", ".bmp", ".png"]
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+def letterbox_image(image, size):
+ iw, ih = image.size
+ w, h = size
+ scale = min(w / iw, h / ih)
+ nw = int(iw * scale)
+ nh = int(ih * scale)
+
+ image = np.array(image)
+ image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_LINEAR)
+ image = Image.fromarray(image)
+ new_image = Image.new('RGB', size, (128, 128, 128))
+ new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
+ return new_image
+
+
+
+def get_image_list(path):
+ image_names = []
+ for maindir, subdir, file_name_list in os.walk(path):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = os.path.splitext(apath)[1]
+ if ext in IMAGE_EXT:
+ image_names.append(apath)
+ return image_names
+
+def filter_boxes(boxes, box_confidences, box_class_probs) -> (np.ndarray, np.ndarray, np.ndarray):
+ box_scores = box_confidences * box_class_probs # 条件概率, 在该cell存在物体的概率的基础上是某个类别的概率
+ box_classes = np.argmax(box_scores, axis=-1) # 找出概率最大的类别索引
+ box_class_scores = np.max(box_scores, axis=-1) # 最大类别对应的概率值
+ pos = np.where(box_class_scores >= OBJ_THRESH) # 找出概率大于阈值的item
+ # pos = box_class_scores >= OBJ_THRESH # 找出概率大于阈值的item
+ boxes = boxes[pos]
+ classes = box_classes[pos]
+ scores = box_class_scores[pos]
+ return boxes, classes, scores
+
+
+def nms_boxes(boxes, scores):
+ x = boxes[:, 0]
+ y = boxes[:, 1]
+ w = boxes[:, 2]
+ h = boxes[:, 3]
+
+ areas = w * h
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ xx1 = np.maximum(x[i], x[order[1:]])
+ yy1 = np.maximum(y[i], y[order[1:]])
+ xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
+ yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
+
+ w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
+ h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
+ inter = w1 * h1
+
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= NMS_THRESH)[0]
+ order = order[inds + 1]
+ keep = np.array(keep)
+ return keep
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ labels = []
+ box_ls = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x + w, y + h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+ labels.append(CLASSES[cl])
+ box_ls.append((top, left, right, bottom))
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return labels, box_ls
+
+
+def load_model0(model_path, npu_id):
+ rknn = RKNNLite()
+ devs = rknn.list_devices()
+ device_id_dict = {}
+ for index, dev_id in enumerate(devs[-1]):
+ if dev_id[:2] != 'TS':
+ device_id_dict[0] = dev_id
+ if dev_id[:2] == 'TS':
+ device_id_dict[1] = dev_id
+
+ print('-->loading model : ' + model_path)
+ rknn.load_rknn(model_path)
+ print('--> Init runtime environment on: ' + device_id_dict[npu_id])
+ ret = rknn.init_runtime(device_id=device_id_dict[npu_id])
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+def load_rknn_model(PATH):
+ # Create RKNN object
+ rknn = RKNNLite()
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_rknn(PATH)
+ if ret != 0:
+ print('load rknn model failed')
+ exit(ret)
+ print('done')
+ #ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True)
+ ret = rknn.init_runtime()
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+
+
+
+def predict(img_src, rknn):
+ img = cv2.resize(img_src, SIZE)
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ # Set inputs
+ #image = Image.open(img_src)
+
+ #img = letterbox_image(img_src, (Width, Height))
+ #img = np.array(img)
+
+ t0 = time.time()
+ print("img shape \t:", img.shape)
+ pred_onx = rknn.inference(inputs=[img])
+ print("time: \t", time.time() - t0)
+ boxes, classes, scores = [], [], []
+ for t in range(3):
+ input0_data = sigmoid(pred_onx[t][0])
+ input0_data = np.transpose(input0_data, (1, 2, 0, 3))
+ grid_h, grid_w, channel_n, predict_n = input0_data.shape
+ print("-------------------input0_data.shape----------------",input0_data.shape)
+ anchors = [ANCHORS[i] for i in MASKS[t]]
+ box_confidence = input0_data[..., 4]
+ box_confidence = np.expand_dims(box_confidence, axis=-1)
+ box_class_probs = input0_data[..., 5:]
+ box_xy = input0_data[..., :2]
+ box_wh = input0_data[..., 2:4]
+ col = np.tile(np.arange(0, grid_w), grid_h).reshape(-1, grid_w)
+ row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_w)
+ col = col.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ row = row.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ grid = np.concatenate((col, row), axis=-1)
+ box_xy = box_xy * 2 - 0.5 + grid
+ box_wh = (box_wh * 2) ** 2 * anchors
+ box_xy /= (grid_w, grid_h) # 计算原尺寸的中心
+ box_wh /= SIZE # 计算原尺寸的宽高
+ box_xy -= (box_wh / 2.) # 计算原尺寸的xy
+ box = np.concatenate((box_xy, box_wh), axis=-1)
+ res = filter_boxes(box, box_confidence, box_class_probs)
+ boxes.append(res[0])
+ classes.append(res[1])
+ scores.append(res[2])
+ boxes, classes, scores = np.concatenate(boxes), np.concatenate(classes), np.concatenate(scores)
+ #print("------------------------boxes, classes, scores-----------------------",boxes, classes, scores)
+ nboxes, nclasses, nscores = [], [], []
+ for c in set(classes):
+ inds = np.where(classes == c)
+ b = boxes[inds]
+ c = classes[inds]
+ s = scores[inds]
+ keep = nms_boxes(b, s)
+ #keep = [0,1,2]
+ #print("--------------keep-------------",keep)
+ nboxes.append(b[keep])
+ nclasses.append(c[keep])
+ nscores.append(s[keep])
+ if len(nboxes) < 1:
+ return [], [], []
+ boxes = np.concatenate(nboxes)
+ classes = np.concatenate(nclasses)
+ scores = np.concatenate(nscores)
+ print("------------------------boxes, classes, scores-----------------------",boxes, classes, scores)
+ return boxes, classes, scores
+ '''
+ label_list = []
+ box_list = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ x *= img_src.shape[1]
+ y *= img_src.shape[0]
+ w *= img_src.shape[1]
+ h *= img_src.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(img_src.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(img_src.shape[0], np.floor(y + h + 0.5).astype(int))
+ label_list.append(CLASSES[cl])
+ box_list.append((top, left, right, bottom))
+ return label_list, np.array(box_list)
+ '''
+
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x+w, y+h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x + 0.5).astype(int))
+ left = max(0, np.floor(y + 0.5).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+
+
+
+
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return image
+
+
+
+def clip_coords(boxes, img_shape):
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
+ #boxes[:, 0].clp(0, img_shape[1]) # x1
+ #boxes[:, 1].clp(0, img_shape[0]) # y1
+ #boxes[:, 2].clp(0, img_shape[1]) # x2
+ #boxes[:, 3].clp(0, img_shape[0]) # y2
+ np.clip(boxes[:, 0],0,img_shape[1])
+ np.clip(boxes[:, 1],0,img_shape[0])
+ np.clip(boxes[:, 2],0,img_shape[1])
+ np.clip(boxes[:, 3],0,img_shape[0])
+ return boxes
+
+def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
+
+ # Rescale coords (xyxy) from img1_shape to img0_shape
+ if ratio_pad is None: # calculate from img0_shape
+ gain = min(img1_shape[0]/img0_shape[0], img1_shape[1]/img0_shape[1]) # gain = old / new
+ print("------------gain-----------",gain)
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
+ else:
+ gain = ratio_pad[0][0]
+ pad = ratio_pad[1]
+ print("-----------old-coords-----------",coords)
+ coords[:, [2]] = (coords[:, [0]] + coords[:, [2]]) * img1_shape[1] - pad[0] # x padding
+ coords[:, [3]] = (coords[:, [1]] + coords[:, [3]]) * img1_shape[0] - pad[1] # y padding
+ coords[:, [0]] = coords[:, [0]] * img1_shape[1] - pad[0] # x padding
+ coords[:, [1]] = coords[:, [1]] * img1_shape[0] - pad[1] # y padding
+ print("-----------new-coords-----------",coords)
+ print("------------pad-----------",pad)
+ coords[:, :4] /= gain
+
+ coords = clip_coords(coords, img0_shape)
+ return coords
+
+
+def display(boxes=None, classes=None, scores=None, image_src=None, input_size=(640, 640), line_thickness=None, text_bg_alpha=0.0):
+ labels = classes
+ boxs = boxes
+ confs = scores
+
+ h, w, c = image_src.shape
+ if len(boxes) <= 0:
+ return image_src
+
+
+
+ boxs[:, :] = scale_coords(input_size, boxs[:, :], (h, w)).round()
+
+ tl = line_thickness or round(0.002 * (w + h) / 2) + 1
+ for i, box in enumerate(boxs):
+ x1, y1, x2, y2 = box
+
+ ratio = (y2-y1)/(x2-x1)
+
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
+ np.random.seed(int(labels[i]) + 2020)
+ color = (np.random.randint(0, 255), 0, np.random.randint(0, 255))
+ cv2.rectangle(image_src, (x1, y1), (x2, y2), color, max(int((w + h) / 600), 1), cv2.LINE_AA)
+ label = '{0:.3f}'.format(confs[i])
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=1)[0]
+ c2 = x1 + t_size[0] + 3, y1 - t_size[1] - 5
+ if text_bg_alpha == 0.0:
+ cv2.rectangle(image_src, (x1 - 1, y1), c2, color, cv2.FILLED, cv2.LINE_AA)
+ else:
+ # 透明文本背景
+ alphaReserve = text_bg_alpha # 0:不透明 1:透明
+ BChannel, GChannel, RChannel = color
+ xMin, yMin = int(x1 - 1), int(y1 - t_size[1] - 3)
+ xMax, yMax = int(x1 + t_size[0]), int(y1)
+ image_src[yMin:yMax, xMin:xMax, 0] = image_src[yMin:yMax, xMin:xMax, 0] * alphaReserve + BChannel * (1 - alphaReserve)
+ image_src[yMin:yMax, xMin:xMax, 1] = image_src[yMin:yMax, xMin:xMax, 1] * alphaReserve + GChannel * (1 - alphaReserve)
+ image_src[yMin:yMax, xMin:xMax, 2] = image_src[yMin:yMax, xMin:xMax, 2] * alphaReserve + RChannel * (1 - alphaReserve)
+ cv2.putText(image_src, label, (x1 + 3, y1 - 4), 0, tl / 3, [255, 255, 255],
+ thickness=1, lineType=cv2.LINE_AA)
+ return image_src
+
+
+
+
+if __name__ == '__main__':
+ path = "./imgs/"
+ save_folder = "./result/"
+ #RKNN_MODEL_PATH = r"yolov5s-640-640.rknn"
+ #RKNN_MODEL_PATH = r"best_640x640.rknn"
+ RKNN_MODEL_PATH = r"23best_640x640.rknn"
+ rknn = load_rknn_model(RKNN_MODEL_PATH)
+ predict.__defaults__ = (None, rknn)
+ files = get_image_list(path)
+ current_time = time.localtime()
+ for image_name in files:
+ image_src = cv2.imread(image_name)
+ #image_src = Image.open(image_name)
+ boxes, classes, scores = predict(image_src)
+ '''
+ image = draw(img, boxes, scores, classes)
+ save_file_name = os.path.join(save_folder, os.path.basename(image_name))
+ cv2.imwrite(save_file_name,image)
+ '''
+ image = np.array(image_src)
+ save_image = display(boxes, classes, scores, image)
+ save_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
+ save_file_name = os.path.join(save_folder, os.path.basename(image_name))
+ cv2.imwrite(save_file_name,save_image)
+
+ print("--------------------------res-----------------------",boxes, classes, scores)
diff --git a/rknn_detect_yolov5_best.py b/rknn_detect_yolov5_best.py
new file mode 100644
index 0000000..771bbd4
--- /dev/null
+++ b/rknn_detect_yolov5_best.py
@@ -0,0 +1,274 @@
+#from rknn.api import RKNN
+from rknnlite.api import RKNNLite
+import cv2
+import numpy as np
+import cv2
+import time
+import os
+from PIL import Image
+"""
+yolov5 预测脚本 for rknn
+"""
+
+SIZE = (640, 640)
+Width = 640
+Height = 640
+CLASSES = ("lighting")
+OBJ_THRESH = 0.1
+NMS_THRESH = 0.1
+MASKS = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
+
+
+
+IMAGE_EXT = [".jpg", "*.JPG", ".jpeg", ".webp", ".bmp", ".png"]
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+def letterbox_image(image, size):
+ iw, ih = image.size
+ w, h = size
+ scale = min(w / iw, h / ih)
+ nw = int(iw * scale)
+ nh = int(ih * scale)
+
+ image = np.array(image)
+ image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_LINEAR)
+ image = Image.fromarray(image)
+ new_image = Image.new('RGB', size, (128, 128, 128))
+ new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
+ return new_image
+
+def get_image_list(path):
+ image_names = []
+ for maindir, subdir, file_name_list in os.walk(path):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = os.path.splitext(apath)[1]
+ if ext in IMAGE_EXT:
+ image_names.append(apath)
+ return image_names
+
+def filter_boxes(boxes, box_confidences, box_class_probs) -> (np.ndarray, np.ndarray, np.ndarray):
+ box_scores = box_confidences * box_class_probs # 条件概率, 在该cell存在物体的概率的基础上是某个类别的概率
+ box_classes = np.argmax(box_scores, axis=-1) # 找出概率最大的类别索引
+ box_class_scores = np.max(box_scores, axis=-1) # 最大类别对应的概率值
+ pos = np.where(box_class_scores >= OBJ_THRESH) # 找出概率大于阈值的item
+ # pos = box_class_scores >= OBJ_THRESH # 找出概率大于阈值的item
+ boxes = boxes[pos]
+ classes = box_classes[pos]
+ scores = box_class_scores[pos]
+ return boxes, classes, scores
+
+
+def nms_boxes(boxes, scores):
+ x = boxes[:, 0]
+ y = boxes[:, 1]
+ w = boxes[:, 2]
+ h = boxes[:, 3]
+
+ areas = w * h
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ xx1 = np.maximum(x[i], x[order[1:]])
+ yy1 = np.maximum(y[i], y[order[1:]])
+ xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
+ yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
+
+ w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
+ h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
+ inter = w1 * h1
+
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= NMS_THRESH)[0]
+ order = order[inds + 1]
+ keep = np.array(keep)
+ return keep
+
+
+
+
+def load_rknn_model(PATH):
+ # Create RKNN object
+ rknn = RKNNLite()
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_rknn(PATH)
+ if ret != 0:
+ print('load rknn model failed')
+ exit(ret)
+ print('done')
+ #ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True)
+ ret = rknn.init_runtime()
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+
+
+
+def predict(img_src, rknn):
+
+ img = letterbox_image(img_src, (Width, Height))
+ img = np.array(img)
+
+ t0 = time.time()
+ #print("img shape \t:", img.shape)
+ pred_onx = rknn.inference(inputs=[img])
+ print("--------------------time: \t", time.time() - t0)
+ boxes, classes, scores = [], [], []
+ for t in range(3):
+ input0_data = sigmoid(pred_onx[t][0])
+ input0_data = np.transpose(input0_data, (1, 2, 0, 3))
+ grid_h, grid_w, channel_n, predict_n = input0_data.shape
+ #print("-------------------input0_data.shape----------------",input0_data.shape)
+ anchors = [ANCHORS[i] for i in MASKS[t]]
+ box_confidence = input0_data[..., 4]
+ box_confidence = np.expand_dims(box_confidence, axis=-1)
+ box_class_probs = input0_data[..., 5:]
+ box_xy = input0_data[..., :2]
+ box_wh = input0_data[..., 2:4]
+ col = np.tile(np.arange(0, grid_w), grid_h).reshape(-1, grid_w)
+ row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_w)
+ col = col.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ row = row.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ grid = np.concatenate((col, row), axis=-1)
+ box_xy = box_xy * 2 - 0.5 + grid
+ box_wh = (box_wh * 2) ** 2 * anchors
+ box_xy /= (grid_w, grid_h) # 计算原尺寸的中心
+ box_wh /= SIZE # 计算原尺寸的宽高
+ box_xy -= (box_wh / 2.) # 计算原尺寸的xy
+ box = np.concatenate((box_xy, box_wh), axis=-1)
+ res = filter_boxes(box, box_confidence, box_class_probs)
+ boxes.append(res[0])
+ classes.append(res[1])
+ scores.append(res[2])
+ boxes, classes, scores = np.concatenate(boxes), np.concatenate(classes), np.concatenate(scores)
+ #print("------------------------boxes, classes, scores-----------------------",boxes, classes, scores)
+ nboxes, nclasses, nscores = [], [], []
+ for c in set(classes):
+ inds = np.where(classes == c)
+ b = boxes[inds]
+ c = classes[inds]
+ s = scores[inds]
+ keep = nms_boxes(b, s)
+ #keep = [0,1,2]
+ #print("--------------keep-------------",keep)
+ nboxes.append(b[keep])
+ nclasses.append(c[keep])
+ nscores.append(s[keep])
+ if len(nboxes) < 1:
+ return [], [], []
+ boxes = np.concatenate(nboxes)
+ classes = np.concatenate(nclasses)
+ scores = np.concatenate(nscores)
+ #print("------------------------boxes, classes, scores-----------------------",boxes, classes, scores)
+ return boxes, classes, scores
+
+
+
+
+def clip_coords(boxes, img_shape):
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
+ #boxes[:, 0].clp(0, img_shape[1]) # x1
+ #boxes[:, 1].clp(0, img_shape[0]) # y1
+ #boxes[:, 2].clp(0, img_shape[1]) # x2
+ #boxes[:, 3].clp(0, img_shape[0]) # y2
+ np.clip(boxes[:, 0],0,img_shape[1])
+ np.clip(boxes[:, 1],0,img_shape[0])
+ np.clip(boxes[:, 2],0,img_shape[1])
+ np.clip(boxes[:, 3],0,img_shape[0])
+ return boxes
+
+def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
+
+ # Rescale coords (xyxy) from img1_shape to img0_shape
+ if ratio_pad is None: # calculate from img0_shape
+ gain = min(img1_shape[0]/img0_shape[0], img1_shape[1]/img0_shape[1]) # gain = old / new
+ #print("------------gain-----------",gain)
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
+ else:
+ gain = ratio_pad[0][0]
+ pad = ratio_pad[1]
+ #print("-----------old-coords-----------",coords)
+ coords[:, [2]] = (coords[:, [0]] + coords[:, [2]]) * img1_shape[1] - pad[0] # x padding
+ coords[:, [3]] = (coords[:, [1]] + coords[:, [3]]) * img1_shape[0] - pad[1] # y padding
+ coords[:, [0]] = coords[:, [0]] * img1_shape[1] - pad[0] # x padding
+ coords[:, [1]] = coords[:, [1]] * img1_shape[0] - pad[1] # y padding
+ #print("-----------new-coords-----------",coords)
+ #print("------------pad-----------",pad)
+ coords[:, :4] /= gain
+
+ coords = clip_coords(coords, img0_shape)
+ return coords
+
+
+def display(boxes=None, classes=None, scores=None, image_src=None, input_size=(640, 640), line_thickness=None, text_bg_alpha=0.0):
+ labels = classes
+ boxs = boxes
+ confs = scores
+
+ h, w, c = image_src.shape
+ if len(boxes) <= 0:
+ return image_src
+ boxs[:, :] = scale_coords(input_size, boxs[:, :], (h, w)).round()
+
+ tl = line_thickness or round(0.002 * (w + h) / 2) + 1
+ for i, box in enumerate(boxs):
+ x1, y1, x2, y2 = box
+
+ ratio = (y2-y1)/(x2-x1)
+
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
+ np.random.seed(int(labels[i]) + 2020)
+ color = (np.random.randint(0, 255), 0, np.random.randint(0, 255))
+ cv2.rectangle(image_src, (x1, y1), (x2, y2), color, max(int((w + h) / 600), 1), cv2.LINE_AA)
+ label = '{0:.3f}'.format(confs[i])
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=1)[0]
+ c2 = x1 + t_size[0] + 3, y1 - t_size[1] - 5
+ if text_bg_alpha == 0.0:
+ cv2.rectangle(image_src, (x1 - 1, y1), c2, color, cv2.FILLED, cv2.LINE_AA)
+ else:
+ # 透明文本背景
+ alphaReserve = text_bg_alpha # 0:不透明 1:透明
+ BChannel, GChannel, RChannel = color
+ xMin, yMin = int(x1 - 1), int(y1 - t_size[1] - 3)
+ xMax, yMax = int(x1 + t_size[0]), int(y1)
+ image_src[yMin:yMax, xMin:xMax, 0] = image_src[yMin:yMax, xMin:xMax, 0] * alphaReserve + BChannel * (1 - alphaReserve)
+ image_src[yMin:yMax, xMin:xMax, 1] = image_src[yMin:yMax, xMin:xMax, 1] * alphaReserve + GChannel * (1 - alphaReserve)
+ image_src[yMin:yMax, xMin:xMax, 2] = image_src[yMin:yMax, xMin:xMax, 2] * alphaReserve + RChannel * (1 - alphaReserve)
+ cv2.putText(image_src, label, (x1 + 3, y1 - 4), 0, tl / 3, [255, 255, 255],
+ thickness=1, lineType=cv2.LINE_AA)
+ return image_src
+
+
+
+
+if __name__ == '__main__':
+ path = "./imgs/"
+ save_folder = "./result/"
+ RKNN_MODEL_PATH = r"23best_640x640.rknn"
+ rknn = load_rknn_model(RKNN_MODEL_PATH)
+ predict.__defaults__ = (None, rknn)
+ files = get_image_list(path)
+ current_time = time.localtime()
+ for image_name in files:
+ print("--------------------------image_name-----------------------", image_name)
+ image_src = Image.open(image_name)
+ boxes, classes, scores = predict(image_src)
+ image = np.array(image_src)
+ save_image = display(boxes, classes, scores, image)
+ save_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
+ save_file_name = os.path.join(save_folder, os.path.basename(image_name))
+ cv2.imwrite(save_file_name,save_image)
+
+ print("--------------------------res-----------------------",boxes, classes, scores)
diff --git a/rknn_detect_yolov5_rtsp.py b/rknn_detect_yolov5_rtsp.py
new file mode 100644
index 0000000..0997f0c
--- /dev/null
+++ b/rknn_detect_yolov5_rtsp.py
@@ -0,0 +1,309 @@
+#from rknn.api import RKNN
+from rknnlite.api import RKNNLite
+import cv2
+import numpy as np
+import cv2
+import time
+import os
+"""
+yolov5 预测脚本 for rknn
+"""
+
+SIZE = (640, 640)
+CLASSES = ("lighting")
+OBJ_THRESH = 0.2
+NMS_THRESH = 0.45
+MASKS = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+IMAGE_EXT = [".jpg", "*.JPG", ".jpeg", ".webp", ".bmp", ".png"]
+
+def get_image_list(path):
+ image_names = []
+ for maindir, subdir, file_name_list in os.walk(path):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = os.path.splitext(apath)[1]
+ if ext in IMAGE_EXT:
+ image_names.append(apath)
+ return image_names
+
+def filter_boxes(boxes, box_confidences, box_class_probs) -> (np.ndarray, np.ndarray, np.ndarray):
+ box_scores = box_confidences * box_class_probs # 条件概率, 在该cell存在物体的概率的基础上是某个类别的概率
+ box_classes = np.argmax(box_scores, axis=-1) # 找出概率最大的类别索引
+ box_class_scores = np.max(box_scores, axis=-1) # 最大类别对应的概率值
+ pos = np.where(box_class_scores >= OBJ_THRESH) # 找出概率大于阈值的item
+ # pos = box_class_scores >= OBJ_THRESH # 找出概率大于阈值的item
+ boxes = boxes[pos]
+ classes = box_classes[pos]
+ scores = box_class_scores[pos]
+ return boxes, classes, scores
+
+
+def nms_boxes(boxes, scores):
+ x = boxes[:, 0]
+ y = boxes[:, 1]
+ w = boxes[:, 2]
+ h = boxes[:, 3]
+
+ areas = w * h
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ xx1 = np.maximum(x[i], x[order[1:]])
+ yy1 = np.maximum(y[i], y[order[1:]])
+ xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
+ yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
+
+ w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
+ h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
+ inter = w1 * h1
+
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= NMS_THRESH)[0]
+ order = order[inds + 1]
+ keep = np.array(keep)
+ return keep
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ labels = []
+ box_ls = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x + w, y + h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+ labels.append(CLASSES[cl])
+ box_ls.append((top, left, right, bottom))
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return labels, box_ls
+
+
+def load_model0(model_path, npu_id):
+ rknn = RKNNLite()
+ devs = rknn.list_devices()
+ device_id_dict = {}
+ for index, dev_id in enumerate(devs[-1]):
+ if dev_id[:2] != 'TS':
+ device_id_dict[0] = dev_id
+ if dev_id[:2] == 'TS':
+ device_id_dict[1] = dev_id
+
+ print('-->loading model : ' + model_path)
+ rknn.load_rknn(model_path)
+ print('--> Init runtime environment on: ' + device_id_dict[npu_id])
+ ret = rknn.init_runtime(device_id=device_id_dict[npu_id])
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+def load_rknn_model(PATH):
+ # Create RKNN object
+ rknn = RKNNLite()
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_rknn(PATH)
+ if ret != 0:
+ print('load rknn model failed')
+ exit(ret)
+ print('done')
+ #ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True)
+ ret = rknn.init_runtime()
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+
+
+
+def predict(img_src, rknn):
+ img = cv2.resize(img_src, SIZE)
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ t0 = time.time()
+ print("img shape \t:", img.shape)
+ pred_onx = rknn.inference(inputs=[img])
+ print("time: \t", time.time() - t0)
+ boxes, classes, scores = [], [], []
+ for t in range(3):
+ input0_data = sigmoid(pred_onx[t][0])
+ input0_data = np.transpose(input0_data, (1, 2, 0, 3))
+ grid_h, grid_w, channel_n, predict_n = input0_data.shape
+ anchors = [ANCHORS[i] for i in MASKS[t]]
+ box_confidence = input0_data[..., 4]
+ box_confidence = np.expand_dims(box_confidence, axis=-1)
+ box_class_probs = input0_data[..., 5:]
+ box_xy = input0_data[..., :2]
+ box_wh = input0_data[..., 2:4]
+ col = np.tile(np.arange(0, grid_w), grid_h).reshape(-1, grid_w)
+ row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_w)
+ col = col.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ row = row.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ grid = np.concatenate((col, row), axis=-1)
+ box_xy = box_xy * 2 - 0.5 + grid
+ box_wh = (box_wh * 2) ** 2 * anchors
+ box_xy /= (grid_w, grid_h) # 计算原尺寸的中心
+ box_wh /= SIZE # 计算原尺寸的宽高
+ box_xy -= (box_wh / 2.) # 计算原尺寸的中心
+ box = np.concatenate((box_xy, box_wh), axis=-1)
+ res = filter_boxes(box, box_confidence, box_class_probs)
+ boxes.append(res[0])
+ classes.append(res[1])
+ scores.append(res[2])
+ boxes, classes, scores = np.concatenate(boxes), np.concatenate(classes), np.concatenate(scores)
+ nboxes, nclasses, nscores = [], [], []
+ for c in set(classes):
+ inds = np.where(classes == c)
+ b = boxes[inds]
+ c = classes[inds]
+ s = scores[inds]
+ keep = nms_boxes(b, s)
+ nboxes.append(b[keep])
+ nclasses.append(c[keep])
+ nscores.append(s[keep])
+ if len(nboxes) < 1:
+ return [], [], []
+ boxes = np.concatenate(nboxes)
+ classes = np.concatenate(nclasses)
+ scores = np.concatenate(nscores)
+ return boxes, classes, scores
+ '''
+ label_list = []
+ box_list = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ x *= img_src.shape[1]
+ y *= img_src.shape[0]
+ w *= img_src.shape[1]
+ h *= img_src.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(img_src.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(img_src.shape[0], np.floor(y + h + 0.5).astype(int))
+ label_list.append(CLASSES[cl])
+ box_list.append((top, left, right, bottom))
+ return label_list, np.array(box_list)
+ '''
+
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ #print('class: {}, score: {}'.format(CLASSES[cl], score))
+ #print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x+w, y+h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x + 0.5).astype(int))
+ left = max(0, np.floor(y + 0.5).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+
+ # print('class: {}, score: {}'.format(CLASSES[cl], score))
+ # print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return image
+
+def cam1():
+ cap1 = cv2.VideoCapture('rtsp://192.168.1.136/live/119')
+ cap1.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 设置缓存区大小为1
+ ret1, frame1 = cap1.read()
+ # cv2.imshow("frame1", frame1)
+ # cv2.waitKey(10)
+ cv2.imwrite('./imgs1/cam1.jpg', frame1)
+ cap1.release()
+ print('1')
+
+ # cv2.destroyAllWindows()
+ # cap.release()
+
+def cam2():
+ cap2 = cv2.VideoCapture('rtsp://192.168.1.136/live/137')
+ cap2.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 设置缓存区大小为1
+ ret2, frame2 = cap2.read()
+ # cv2.imshow("frame2", frame2)
+ # cv2.waitKey(10)
+ cv2.imwrite('./imgs1/cam2.jpg', frame2)
+ print('2')
+ cap2.release()
+
+
+if __name__ == '__main__':
+
+ path = "./imgs1/"
+ save_folder = "./result1/"
+ RKNN_MODEL_PATH = r"yolov5s-640-640.rknn"
+ rknn = load_rknn_model(RKNN_MODEL_PATH)
+ predict.__defaults__ = (None, rknn)
+ files = get_image_list(path)
+
+
+ while True:
+ cam1()
+ cam2()
+ current_time = time.localtime()
+ try:
+ for image_name in files:
+ img = cv2.imread(image_name)
+ boxes, classes, scores = predict(img)
+ image = draw(img, boxes, scores, classes)
+ save_file_name = os.path.join(save_folder, os.path.basename(image_name))
+ cv2.imwrite(save_file_name,image)
+ print("--------------------------res-----------------------",boxes, classes, scores)
+ except:
+ print("continue")
+
+
+
+
diff --git a/rknn_detect_yolov5_video.py b/rknn_detect_yolov5_video.py
new file mode 100644
index 0000000..8c0af66
--- /dev/null
+++ b/rknn_detect_yolov5_video.py
@@ -0,0 +1,291 @@
+#from rknn.api import RKNN
+from rknnlite.api import RKNNLite
+import cv2
+import numpy as np
+import cv2
+import time
+import os
+"""
+yolov5 预测脚本 for rknn
+"""
+
+SIZE = (640, 640)
+CLASSES = ("lighting")
+OBJ_THRESH = 0.2
+NMS_THRESH = 0.45
+MASKS = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
+ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
+
+def sigmoid(x):
+ return 1 / (1 + np.exp(-x))
+
+IMAGE_EXT = [".jpg", "*.JPG", ".jpeg", ".webp", ".bmp", ".png"]
+
+def get_image_list(path):
+ image_names = []
+ for maindir, subdir, file_name_list in os.walk(path):
+ for filename in file_name_list:
+ apath = os.path.join(maindir, filename)
+ ext = os.path.splitext(apath)[1]
+ if ext in IMAGE_EXT:
+ image_names.append(apath)
+ return image_names
+
+def filter_boxes(boxes, box_confidences, box_class_probs) -> (np.ndarray, np.ndarray, np.ndarray):
+ box_scores = box_confidences * box_class_probs # 条件概率, 在该cell存在物体的概率的基础上是某个类别的概率
+ box_classes = np.argmax(box_scores, axis=-1) # 找出概率最大的类别索引
+ box_class_scores = np.max(box_scores, axis=-1) # 最大类别对应的概率值
+ pos = np.where(box_class_scores >= OBJ_THRESH) # 找出概率大于阈值的item
+ # pos = box_class_scores >= OBJ_THRESH # 找出概率大于阈值的item
+ boxes = boxes[pos]
+ classes = box_classes[pos]
+ scores = box_class_scores[pos]
+ return boxes, classes, scores
+
+
+def nms_boxes(boxes, scores):
+ x = boxes[:, 0]
+ y = boxes[:, 1]
+ w = boxes[:, 2]
+ h = boxes[:, 3]
+
+ areas = w * h
+ order = scores.argsort()[::-1]
+
+ keep = []
+ while order.size > 0:
+ i = order[0]
+ keep.append(i)
+
+ xx1 = np.maximum(x[i], x[order[1:]])
+ yy1 = np.maximum(y[i], y[order[1:]])
+ xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
+ yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
+
+ w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
+ h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
+ inter = w1 * h1
+
+ ovr = inter / (areas[i] + areas[order[1:]] - inter)
+ inds = np.where(ovr <= NMS_THRESH)[0]
+ order = order[inds + 1]
+ keep = np.array(keep)
+ return keep
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ labels = []
+ box_ls = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x + w, y + h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+ print('class: {}, score: {}'.format(CLASSES[cl], score))
+ print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+ labels.append(CLASSES[cl])
+ box_ls.append((top, left, right, bottom))
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return labels, box_ls
+
+
+def load_model0(model_path, npu_id):
+ rknn = RKNNLite()
+ devs = rknn.list_devices()
+ device_id_dict = {}
+ for index, dev_id in enumerate(devs[-1]):
+ if dev_id[:2] != 'TS':
+ device_id_dict[0] = dev_id
+ if dev_id[:2] == 'TS':
+ device_id_dict[1] = dev_id
+
+ print('-->loading model : ' + model_path)
+ rknn.load_rknn(model_path)
+ print('--> Init runtime environment on: ' + device_id_dict[npu_id])
+ ret = rknn.init_runtime(device_id=device_id_dict[npu_id])
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+def load_rknn_model(PATH):
+ # Create RKNN object
+ rknn = RKNNLite()
+ # Load tensorflow model
+ print('--> Loading model')
+ ret = rknn.load_rknn(PATH)
+ if ret != 0:
+ print('load rknn model failed')
+ exit(ret)
+ print('done')
+ #ret = rknn.init_runtime(device_id='TS018083200400178', rknn2precompile=True)
+ ret = rknn.init_runtime()
+ if ret != 0:
+ print('Init runtime environment failed')
+ exit(ret)
+ print('done')
+ return rknn
+
+
+
+
+
+def predict(img_src, rknn):
+ img = cv2.resize(img_src, SIZE)
+ #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ t0 = time.time()
+ print("img shape \t:", img.shape)
+ pred_onx = rknn.inference(inputs=[img])
+ print("time: \t", time.time() - t0)
+ boxes, classes, scores = [], [], []
+ for t in range(3):
+ input0_data = sigmoid(pred_onx[t][0])
+ input0_data = np.transpose(input0_data, (1, 2, 0, 3))
+ grid_h, grid_w, channel_n, predict_n = input0_data.shape
+ anchors = [ANCHORS[i] for i in MASKS[t]]
+ box_confidence = input0_data[..., 4]
+ box_confidence = np.expand_dims(box_confidence, axis=-1)
+ box_class_probs = input0_data[..., 5:]
+ box_xy = input0_data[..., :2]
+ box_wh = input0_data[..., 2:4]
+ col = np.tile(np.arange(0, grid_w), grid_h).reshape(-1, grid_w)
+ row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_w)
+ col = col.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ row = row.reshape((grid_h, grid_w, 1, 1)).repeat(3, axis=-2)
+ grid = np.concatenate((col, row), axis=-1)
+ box_xy = box_xy * 2 - 0.5 + grid
+ box_wh = (box_wh * 2) ** 2 * anchors
+ box_xy /= (grid_w, grid_h) # 计算原尺寸的中心
+ box_wh /= SIZE # 计算原尺寸的宽高
+ box_xy -= (box_wh / 2.) # 计算原尺寸的中心
+ box = np.concatenate((box_xy, box_wh), axis=-1)
+ res = filter_boxes(box, box_confidence, box_class_probs)
+ boxes.append(res[0])
+ classes.append(res[1])
+ scores.append(res[2])
+ boxes, classes, scores = np.concatenate(boxes), np.concatenate(classes), np.concatenate(scores)
+ nboxes, nclasses, nscores = [], [], []
+ for c in set(classes):
+ inds = np.where(classes == c)
+ b = boxes[inds]
+ c = classes[inds]
+ s = scores[inds]
+ keep = nms_boxes(b, s)
+ nboxes.append(b[keep])
+ nclasses.append(c[keep])
+ nscores.append(s[keep])
+ if len(nboxes) < 1:
+ return [], [], []
+ boxes = np.concatenate(nboxes)
+ classes = np.concatenate(nclasses)
+ scores = np.concatenate(nscores)
+ return boxes, classes, scores
+ '''
+ label_list = []
+ box_list = []
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ x *= img_src.shape[1]
+ y *= img_src.shape[0]
+ w *= img_src.shape[1]
+ h *= img_src.shape[0]
+ top = max(0, np.floor(x).astype(int))
+ left = max(0, np.floor(y).astype(int))
+ right = min(img_src.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(img_src.shape[0], np.floor(y + h + 0.5).astype(int))
+ label_list.append(CLASSES[cl])
+ box_list.append((top, left, right, bottom))
+ return label_list, np.array(box_list)
+ '''
+
+
+
+def draw(image, boxes, scores, classes):
+ """Draw the boxes on the image.
+
+ # Argument:
+ image: original image.
+ boxes: ndarray, boxes of objects.
+ classes: ndarray, classes of objects.
+ scores: ndarray, scores of objects.
+ all_classes: all classes name.
+ """
+ for box, score, cl in zip(boxes, scores, classes):
+ x, y, w, h = box
+ #print('class: {}, score: {}'.format(CLASSES[cl], score))
+ #print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(x, y, x+w, y+h))
+ x *= image.shape[1]
+ y *= image.shape[0]
+ w *= image.shape[1]
+ h *= image.shape[0]
+ top = max(0, np.floor(x + 0.5).astype(int))
+ left = max(0, np.floor(y + 0.5).astype(int))
+ right = min(image.shape[1], np.floor(x + w + 0.5).astype(int))
+ bottom = min(image.shape[0], np.floor(y + h + 0.5).astype(int))
+
+ # print('class: {}, score: {}'.format(CLASSES[cl], score))
+ # print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))
+
+ cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
+ cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
+ (top, left - 6),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6, (0, 0, 255), 2)
+ return image
+
+
+
+if __name__ == '__main__':
+
+ path = "./video1/"
+ save_folder = "./result2/"
+ RKNN_MODEL_PATH = r"yolov5s-640-640.rknn"
+ rknn = load_rknn_model(RKNN_MODEL_PATH)
+ predict.__defaults__ = (None, rknn)
+ files = get_image_list(path)
+
+
+ cap = cv2.VideoCapture(path+'202207120004.mp4')
+ cap.set(cv2.CAP_PROP_BUFFERSIZE, 1) # 设置缓存区大小为1
+ ret, frame = cap.read()
+ i=1
+ while ret:
+
+ current_time = time.localtime()
+ ret, frame = cap.read()
+ boxes, classes, scores = predict(frame)
+ if len(classes)!=0:
+ image = draw(frame, boxes, scores, classes)
+ save_file_name = os.path.join(save_folder,'flash'+str(i)+'.jpg')
+ cv2.imwrite(save_file_name, image)
+ print("--------------------------res-----------------------", boxes, classes, scores)
+ print(i)
+ print("----------------闪电时间-----------------: 第",str(0.04*i),'秒')
+ i+=1
+
+
+
+
+
diff --git a/test.py b/test.py
new file mode 100644
index 0000000..c9cc659
--- /dev/null
+++ b/test.py
@@ -0,0 +1,334 @@
+import argparse
+import json
+import os
+from pathlib import Path
+from threading import Thread
+
+import numpy as np
+import torch
+import yaml
+from tqdm import tqdm
+
+from models.experimental import attempt_load
+from utils.datasets import create_dataloader
+from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \
+ non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path
+from utils.loss import compute_loss
+from utils.metrics import ap_per_class, ConfusionMatrix
+from utils.plots import plot_images, output_to_target, plot_study_txt
+from utils.torch_utils import select_device, time_synchronized
+
+
+def test(data,
+ weights=None,
+ batch_size=32,
+ imgsz=640,
+ conf_thres=0.001,
+ iou_thres=0.6, # for NMS
+ save_json=False,
+ single_cls=False,
+ augment=False,
+ verbose=False,
+ model=None,
+ dataloader=None,
+ save_dir=Path(''), # for saving images
+ save_txt=False, # for auto-labelling
+ save_hybrid=False, # for hybrid auto-labelling
+ save_conf=False, # save auto-label confidences
+ plots=True,
+ log_imgs=0): # number of logged images
+
+ # Initialize/load model and set device
+ training = model is not None
+ if training: # called by train.py
+ device = next(model.parameters()).device # get model device
+
+ else: # called directly
+ set_logging()
+ device = select_device(opt.device, batch_size=batch_size)
+
+ # Directories
+ save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
+
+ # Load model
+ model = attempt_load(weights, map_location=device) # load FP32 model
+ imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
+
+ # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
+ # if device.type != 'cpu' and torch.cuda.device_count() > 1:
+ # model = nn.DataParallel(model)
+
+ # Half
+ half = device.type != 'cpu' # half precision only supported on CUDA
+ if half:
+ model.half()
+
+ # Configure
+ model.eval()
+ is_coco = data.endswith('coco.yaml') # is COCO dataset
+ with open(data) as f:
+ data = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ check_dataset(data) # check
+ nc = 1 if single_cls else int(data['nc']) # number of classes
+ iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
+ niou = iouv.numel()
+
+ # Logging
+ log_imgs, wandb = min(log_imgs, 100), None # ceil
+ try:
+ import wandb # Weights & Biases
+ except ImportError:
+ log_imgs = 0
+
+ # Dataloader
+ if not training:
+ img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
+ _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once
+ path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
+ dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0]
+
+ seen = 0
+ confusion_matrix = ConfusionMatrix(nc=nc)
+ names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
+ coco91class = coco80_to_coco91_class()
+ s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
+ p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
+ loss = torch.zeros(3, device=device)
+ jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
+ for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
+ img = img.to(device, non_blocking=True)
+ img = img.half() if half else img.float() # uint8 to fp16/32
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
+ targets = targets.to(device)
+ nb, _, height, width = img.shape # batch size, channels, height, width
+
+ with torch.no_grad():
+ # Run model
+ t = time_synchronized()
+ inf_out, train_out = model(img, augment=augment) # inference and training outputs
+ t0 += time_synchronized() - t
+
+ # Compute loss
+ if training:
+ loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls
+
+ # Run NMS
+ targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
+ lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
+ t = time_synchronized()
+ output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb)
+ t1 += time_synchronized() - t
+
+ # Statistics per image
+ for si, pred in enumerate(output):
+ labels = targets[targets[:, 0] == si, 1:]
+ nl = len(labels)
+ tcls = labels[:, 0].tolist() if nl else [] # target class
+ path = Path(paths[si])
+ seen += 1
+
+ if len(pred) == 0:
+ if nl:
+ stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
+ continue
+
+ # Predictions
+ predn = pred.clone()
+ scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
+
+ # Append to text file
+ if save_txt:
+ gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
+ for *xyxy, conf, cls in predn.tolist():
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
+ with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+ # W&B logging
+ if plots and len(wandb_images) < log_imgs:
+ box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
+ "class_id": int(cls),
+ "box_caption": "%s %.3f" % (names[cls], conf),
+ "scores": {"class_score": conf},
+ "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
+ boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
+ wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
+
+ # Append to pycocotools JSON dictionary
+ if save_json:
+ # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
+ image_id = int(path.stem) if path.stem.isnumeric() else path.stem
+ box = xyxy2xywh(predn[:, :4]) # xywh
+ box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
+ for p, b in zip(pred.tolist(), box.tolist()):
+ jdict.append({'image_id': image_id,
+ 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
+ 'bbox': [round(x, 3) for x in b],
+ 'score': round(p[4], 5)})
+
+ # Assign all predictions as incorrect
+ correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
+ if nl:
+ detected = [] # target indices
+ tcls_tensor = labels[:, 0]
+
+ # target boxes
+ tbox = xywh2xyxy(labels[:, 1:5])
+ scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
+ if plots:
+ confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1))
+
+ # Per target class
+ for cls in torch.unique(tcls_tensor):
+ ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
+ pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
+
+ # Search for detections
+ if pi.shape[0]:
+ # Prediction to target ious
+ ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
+
+ # Append detections
+ detected_set = set()
+ for j in (ious > iouv[0]).nonzero(as_tuple=False):
+ d = ti[i[j]] # detected target
+ if d.item() not in detected_set:
+ detected_set.add(d.item())
+ detected.append(d)
+ correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
+ if len(detected) == nl: # all targets already located in image
+ break
+
+ # Append statistics (correct, conf, pcls, tcls)
+ stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
+
+ # Plot images
+ if plots and batch_i < 3:
+ f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
+ Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
+ f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
+ Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start()
+
+ # Compute statistics
+ stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
+ if len(stats) and stats[0].any():
+ p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
+ p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95]
+ mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
+ nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
+ else:
+ nt = torch.zeros(1)
+
+ # Print results
+ pf = '%20s' + '%12.3g' * 6 # print format
+ print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
+
+ # Print results per class
+ if verbose and nc > 1 and len(stats):
+ for i, c in enumerate(ap_class):
+ print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
+
+ # Print speeds
+ t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
+ if not training:
+ print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
+
+ # Plots
+ if plots:
+ confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
+ if wandb and wandb.run:
+ wandb.log({"Images": wandb_images})
+ wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]})
+
+ # Save JSON
+ if save_json and len(jdict):
+ w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
+ anno_json = '../coco/annotations/instances_val2017.json' # annotations json
+ pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
+ print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
+ with open(pred_json, 'w') as f:
+ json.dump(jdict, f)
+
+ try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
+ from pycocotools.coco import COCO
+ from pycocotools.cocoeval import COCOeval
+
+ anno = COCO(anno_json) # init annotations api
+ pred = anno.loadRes(pred_json) # init predictions api
+ eval = COCOeval(anno, pred, 'bbox')
+ if is_coco:
+ eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
+ eval.evaluate()
+ eval.accumulate()
+ eval.summarize()
+ map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
+ except Exception as e:
+ print(f'pycocotools unable to run: {e}')
+
+ # Return results
+ if not training:
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
+ print(f"Results saved to {save_dir}{s}")
+ model.float() # for training
+ maps = np.zeros(nc) + map
+ for i, c in enumerate(ap_class):
+ maps[c] = ap[i]
+ return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(prog='test.py')
+ parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
+ parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
+ parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
+ parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
+ parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
+ parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
+ parser.add_argument('--verbose', action='store_true', help='report mAP by class')
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
+ parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
+ parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
+ parser.add_argument('--project', default='runs/test', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ opt = parser.parse_args()
+ opt.save_json |= opt.data.endswith('coco.yaml')
+ opt.data = check_file(opt.data) # check file
+ print(opt)
+
+ if opt.task in ['val', 'test']: # run normally
+ test(opt.data,
+ opt.weights,
+ opt.batch_size,
+ opt.img_size,
+ opt.conf_thres,
+ opt.iou_thres,
+ opt.save_json,
+ opt.single_cls,
+ opt.augment,
+ opt.verbose,
+ save_txt=opt.save_txt | opt.save_hybrid,
+ save_hybrid=opt.save_hybrid,
+ save_conf=opt.save_conf,
+ )
+
+ elif opt.task == 'study': # run over a range of settings and save/plot
+ for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
+ f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to
+ x = list(range(320, 800, 64)) # x axis
+ y = [] # y axis
+ for i in x: # img-size
+ print('\nRunning %s point %s...' % (f, i))
+ r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
+ plots=False)
+ y.append(r + t) # results and times
+ np.savetxt(f, y, fmt='%10.4g') # save
+ os.system('zip -r study.zip study_*.txt')
+ plot_study_txt(f, x) # plot
diff --git a/train.py b/train.py
new file mode 100644
index 0000000..b4d32b2
--- /dev/null
+++ b/train.py
@@ -0,0 +1,605 @@
+import argparse
+import logging
+import math
+import os
+import random
+import time
+from pathlib import Path
+from threading import Thread
+from warnings import warn
+
+import numpy as np
+import torch.distributed as dist
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.optim as optim
+import torch.optim.lr_scheduler as lr_scheduler
+import torch.utils.data
+import yaml
+from torch.cuda import amp
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.utils.tensorboard import SummaryWriter
+from tqdm import tqdm
+
+import test # import test.py to get mAP after each epoch
+from models.experimental import attempt_load
+from models.yolo import Model
+from utils.autoanchor import check_anchors
+from utils.datasets import create_dataloader
+from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
+ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
+ print_mutation, set_logging, one_cycle
+from utils.google_utils import attempt_download
+from utils.loss import compute_loss
+from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
+from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first
+
+logger = logging.getLogger(__name__)
+
+try:
+ import wandb
+except ImportError:
+ wandb = None
+ logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)")
+
+
+def train(hyp, opt, device, tb_writer=None, wandb=None):
+ logger.info(f'Hyperparameters {hyp}')
+ save_dir, epochs, batch_size, total_batch_size, weights, rank = \
+ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
+
+ # Directories
+ wdir = save_dir / 'weights'
+ wdir.mkdir(parents=True, exist_ok=True) # make dir
+ last = wdir / 'last.pt'
+ best = wdir / 'best.pt'
+ results_file = save_dir / 'results.txt'
+
+ # Save run settings
+ with open(save_dir / 'hyp.yaml', 'w') as f:
+ yaml.dump(hyp, f, sort_keys=False)
+ with open(save_dir / 'opt.yaml', 'w') as f:
+ yaml.dump(vars(opt), f, sort_keys=False)
+
+ # Configure
+ plots = not opt.evolve # create plots
+ cuda = device.type != 'cpu'
+ init_seeds(2 + rank)
+ with open(opt.data) as f:
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict
+ with torch_distributed_zero_first(rank):
+ check_dataset(data_dict) # check
+ train_path = data_dict['train']
+ test_path = data_dict['val']
+ nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
+ names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
+ assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
+
+ # Model
+ pretrained = weights.endswith('.pt')
+ if pretrained:
+ with torch_distributed_zero_first(rank):
+ attempt_download(weights) # download if not found locally
+ ckpt = torch.load(weights, map_location=device) # load checkpoint
+ if hyp.get('anchors'):
+ ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor
+ model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
+ exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys
+ state_dict = ckpt['model'].float().state_dict() # to FP32
+ state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
+ model.load_state_dict(state_dict, strict=False) # load
+ logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
+ else:
+ model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
+
+ # Freeze
+ freeze = [] # parameter names to freeze (full or partial)
+ for k, v in model.named_parameters():
+ v.requires_grad = True # train all layers
+ if any(x in k for x in freeze):
+ print('freezing %s' % k)
+ v.requires_grad = False
+
+ # Optimizer
+ nbs = 64 # nominal batch size
+ accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
+ hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
+ logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
+
+ pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
+ for k, v in model.named_modules():
+ if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
+ pg2.append(v.bias) # biases
+ if isinstance(v, nn.BatchNorm2d):
+ pg0.append(v.weight) # no decay
+ elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
+ pg1.append(v.weight) # apply decay
+
+ if opt.adam:
+ optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
+ else:
+ optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
+
+ optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
+ optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
+ logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
+ del pg0, pg1, pg2
+
+ # Scheduler https://arxiv.org/pdf/1812.01187.pdf
+ # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
+ lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
+ scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
+ # plot_lr_scheduler(optimizer, scheduler, epochs)
+
+ # Logging
+ if rank in [-1, 0] and wandb and wandb.run is None:
+ opt.hyp = hyp # add hyperparameters
+ wandb_run = wandb.init(config=opt, resume="allow",
+ project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
+ name=save_dir.stem,
+ id=ckpt.get('wandb_id') if 'ckpt' in locals() else None)
+ loggers = {'wandb': wandb} # loggers dict
+
+ # Resume
+ start_epoch, best_fitness = 0, 0.0
+ if pretrained:
+ # Optimizer
+ if ckpt['optimizer'] is not None:
+ optimizer.load_state_dict(ckpt['optimizer'])
+ best_fitness = ckpt['best_fitness']
+
+ # Results
+ if ckpt.get('training_results') is not None:
+ with open(results_file, 'w') as file:
+ file.write(ckpt['training_results']) # write results.txt
+
+ # Epochs
+ start_epoch = ckpt['epoch'] + 1
+ if opt.resume:
+ assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
+ if epochs < start_epoch:
+ logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
+ (weights, ckpt['epoch'], epochs))
+ epochs += ckpt['epoch'] # finetune additional epochs
+
+ del ckpt, state_dict
+
+ # Image sizes
+ gs = int(model.stride.max()) # grid size (max stride)
+ nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
+ imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
+
+ # DP mode
+ if cuda and rank == -1 and torch.cuda.device_count() > 1:
+ model = torch.nn.DataParallel(model)
+
+ # SyncBatchNorm
+ if opt.sync_bn and cuda and rank != -1:
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
+ logger.info('Using SyncBatchNorm()')
+
+ # EMA
+ ema = ModelEMA(model) if rank in [-1, 0] else None
+
+ # DDP mode
+ if cuda and rank != -1:
+ model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
+
+ # Trainloader
+ dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
+ hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
+ world_size=opt.world_size, workers=opt.workers,
+ image_weights=opt.image_weights, quad=opt.quad)
+ mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
+ nb = len(dataloader) # number of batches
+ assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
+
+ # Process 0
+ if rank in [-1, 0]:
+ ema.updates = start_epoch * nb // accumulate # set EMA updates
+ testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, # testloader
+ hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True,
+ rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0]
+
+ if not opt.resume:
+ labels = np.concatenate(dataset.labels, 0)
+ c = torch.tensor(labels[:, 0]) # classes
+ # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
+ # model._initialize_biases(cf.to(device))
+ if plots:
+ plot_labels(labels, save_dir, loggers)
+ if tb_writer:
+ tb_writer.add_histogram('classes', c, 0)
+
+ # Anchors
+ if not opt.noautoanchor:
+ check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
+
+ # Model parameters
+ hyp['cls'] *= nc / 80. # scale hyp['cls'] to class count
+ hyp['obj'] *= imgsz ** 2 / 640. ** 2 * 3. / nl # scale hyp['obj'] to image size and output layers
+ model.nc = nc # attach number of classes to model
+ model.hyp = hyp # attach hyperparameters to model
+ model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
+ model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
+ model.names = names
+
+ # Start training
+ t0 = time.time()
+ nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
+ # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
+ maps = np.zeros(nc) # mAP per class
+ results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
+ scheduler.last_epoch = start_epoch - 1 # do not move
+ scaler = amp.GradScaler(enabled=cuda)
+ logger.info('Image sizes %g train, %g test\n'
+ 'Using %g dataloader workers\nLogging results to %s\n'
+ 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs))
+ for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
+ model.train()
+
+ # Update image weights (optional)
+ if opt.image_weights:
+ # Generate indices
+ if rank in [-1, 0]:
+ cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
+ iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
+ dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
+ # Broadcast if DDP
+ if rank != -1:
+ indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
+ dist.broadcast(indices, 0)
+ if rank != 0:
+ dataset.indices = indices.cpu().numpy()
+
+ # Update mosaic border
+ # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
+ # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
+
+ mloss = torch.zeros(4, device=device) # mean losses
+ if rank != -1:
+ dataloader.sampler.set_epoch(epoch)
+ pbar = enumerate(dataloader)
+ logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size'))
+ if rank in [-1, 0]:
+ pbar = tqdm(pbar, total=nb) # progress bar
+ optimizer.zero_grad()
+ for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
+ ni = i + nb * epoch # number integrated batches (since train start)
+ imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
+
+ # Warmup
+ if ni <= nw:
+ xi = [0, nw] # x interp
+ # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
+ accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
+ for j, x in enumerate(optimizer.param_groups):
+ # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+ x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
+ if 'momentum' in x:
+ x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
+
+ # Multi-scale
+ if opt.multi_scale:
+ sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
+ sf = sz / max(imgs.shape[2:]) # scale factor
+ if sf != 1:
+ ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
+ imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
+
+ # Forward
+ with amp.autocast(enabled=cuda):
+ pred = model(imgs) # forward
+ loss, loss_items = compute_loss(pred, targets.to(device), model) # loss scaled by batch_size
+ if rank != -1:
+ loss *= opt.world_size # gradient averaged between devices in DDP mode
+ if opt.quad:
+ loss *= 4.
+
+ # Backward
+ scaler.scale(loss).backward()
+
+ # Optimize
+ if ni % accumulate == 0:
+ scaler.step(optimizer) # optimizer.step
+ scaler.update()
+ optimizer.zero_grad()
+ if ema:
+ ema.update(model)
+
+ # Print
+ if rank in [-1, 0]:
+ mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
+ mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
+ s = ('%10s' * 2 + '%10.4g' * 6) % (
+ '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
+ pbar.set_description(s)
+
+ # Plot
+ if plots and ni < 3:
+ f = save_dir / f'train_batch{ni}.jpg' # filename
+ Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
+ # if tb_writer:
+ # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
+ # tb_writer.add_graph(model, imgs) # add model to tensorboard
+ elif plots and ni == 3 and wandb:
+ wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]})
+
+ # end batch ------------------------------------------------------------------------------------------------
+ # end epoch ----------------------------------------------------------------------------------------------------
+
+ # Scheduler
+ lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
+ scheduler.step()
+
+ # DDP process 0 or single-GPU
+ if rank in [-1, 0]:
+ # mAP
+ if ema:
+ ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
+ final_epoch = epoch + 1 == epochs
+ if not opt.notest or final_epoch: # Calculate mAP
+ results, maps, times = test.test(opt.data,
+ batch_size=total_batch_size,
+ imgsz=imgsz_test,
+ model=ema.ema,
+ single_cls=opt.single_cls,
+ dataloader=testloader,
+ save_dir=save_dir,
+ plots=plots and final_epoch,
+ log_imgs=opt.log_imgs if wandb else 0)
+
+ # Write
+ with open(results_file, 'a') as f:
+ f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
+ if len(opt.name) and opt.bucket:
+ os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
+
+ # Log
+ tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
+ 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
+ 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
+ 'x/lr0', 'x/lr1', 'x/lr2'] # params
+ for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
+ if tb_writer:
+ tb_writer.add_scalar(tag, x, epoch) # tensorboard
+ if wandb:
+ wandb.log({tag: x}) # W&B
+
+ # Update best mAP
+ fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
+ if fi > best_fitness:
+ best_fitness = fi
+
+ # Save model
+ save = (not opt.nosave) or (final_epoch and not opt.evolve)
+ if save:
+ with open(results_file, 'r') as f: # create checkpoint
+ ckpt = {'epoch': epoch,
+ 'best_fitness': best_fitness,
+ 'training_results': f.read(),
+ 'model': ema.ema,
+ 'optimizer': None if final_epoch else optimizer.state_dict(),
+ 'wandb_id': wandb_run.id if wandb else None}
+
+ # Save last, best and delete
+ torch.save(ckpt, last)
+ if best_fitness == fi:
+ torch.save(ckpt, best)
+ del ckpt
+ # end epoch ----------------------------------------------------------------------------------------------------
+ # end training
+
+ if rank in [-1, 0]:
+ # Strip optimizers
+ final = best if best.exists() else last # final model
+ for f in [last, best]:
+ if f.exists():
+ strip_optimizer(f) # strip optimizers
+ if opt.bucket:
+ os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
+
+ # Plots
+ if plots:
+ plot_results(save_dir=save_dir) # save as results.png
+ if wandb:
+ files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png']
+ wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files
+ if (save_dir / f).exists()]})
+ if opt.log_artifacts:
+ wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem)
+
+ # Test best.pt
+ logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
+ if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
+ for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests
+ results, _, _ = test.test(opt.data,
+ batch_size=total_batch_size,
+ imgsz=imgsz_test,
+ conf_thres=conf,
+ iou_thres=iou,
+ model=attempt_load(final, device).half(),
+ single_cls=opt.single_cls,
+ dataloader=testloader,
+ save_dir=save_dir,
+ save_json=save_json,
+ plots=False)
+
+ else:
+ dist.destroy_process_group()
+
+ wandb.run.finish() if wandb and wandb.run else None
+ torch.cuda.empty_cache()
+ return results
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--weights', type=str, default='', help='initial weights path')
+ parser.add_argument('--cfg', type=str, default='models/yolov5m.yaml', help='model.yaml path')
+ parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
+ parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path')
+ parser.add_argument('--epochs', type=int, default=120)
+ parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
+ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
+ parser.add_argument('--rect', action='store_true', help='rectangular training')
+ parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
+ parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
+ parser.add_argument('--notest', action='store_true', help='only test final epoch')
+ parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
+ parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
+ parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
+ parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
+ parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
+ parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
+ parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
+ parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
+ parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
+ parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
+ parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100')
+ parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model')
+ parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
+ parser.add_argument('--project', default='runs/train', help='save to project/name')
+ parser.add_argument('--name', default='exp', help='save to project/name')
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
+ parser.add_argument('--quad', action='store_true', help='quad dataloader')
+ opt = parser.parse_args()
+
+ # opt.image_weights = True
+ # opt.cache_images = True
+ # opt.notest = True
+
+ # Set DDP variables
+ opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
+ opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
+ set_logging(opt.global_rank)
+ if opt.global_rank in [-1, 0]:
+ check_git_status()
+
+ # Resume
+ if opt.resume: # resume an interrupted run
+ ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
+ assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
+ apriori = opt.global_rank, opt.local_rank
+ with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
+ opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
+ opt.cfg, opt.weights, opt.resume, opt.global_rank, opt.local_rank = '', ckpt, True, *apriori # reinstate
+ logger.info('Resuming training from %s' % ckpt)
+ else:
+ # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
+ opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
+ assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
+ opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
+ opt.name = 'evolve' if opt.evolve else opt.name
+ opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
+
+ # DDP mode
+ opt.total_batch_size = opt.batch_size
+ device = select_device(opt.device, batch_size=opt.batch_size)
+ if opt.local_rank != -1:
+ assert torch.cuda.device_count() > opt.local_rank
+ torch.cuda.set_device(opt.local_rank)
+ device = torch.device('cuda', opt.local_rank)
+ dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
+ assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
+ opt.batch_size = opt.total_batch_size // opt.world_size
+
+ # Hyperparameters
+ with open(opt.hyp) as f:
+ hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
+ if 'box' not in hyp:
+ warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' %
+ (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120'))
+ hyp['box'] = hyp.pop('giou')
+
+ # Train
+ logger.info(opt)
+ if not opt.evolve:
+ tb_writer = None # init loggers
+ if opt.global_rank in [-1, 0]:
+ logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/')
+ tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
+ train(hyp, opt, device, tb_writer, wandb)
+
+ # Evolve hyperparameters (optional)
+ else:
+ # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
+ meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
+ 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
+ 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
+ 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
+ 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
+ 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
+ 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
+ 'box': (1, 0.02, 0.2), # box loss gain
+ 'cls': (1, 0.2, 4.0), # cls loss gain
+ 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
+ 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
+ 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
+ 'iou_t': (0, 0.1, 0.7), # IoU training threshold
+ 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
+ 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
+ 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
+ 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
+ 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
+ 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
+ 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
+ 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
+ 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
+ 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
+ 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
+ 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
+ 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
+ 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
+ 'mixup': (1, 0.0, 1.0)} # image mixup (probability)
+
+ assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
+ opt.notest, opt.nosave = True, True # only test/save final epoch
+ # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
+ yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
+ if opt.bucket:
+ os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
+
+ for _ in range(300): # generations to evolve
+ if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
+ # Select parent(s)
+ parent = 'single' # parent selection method: 'single' or 'weighted'
+ x = np.loadtxt('evolve.txt', ndmin=2)
+ n = min(5, len(x)) # number of previous results to consider
+ x = x[np.argsort(-fitness(x))][:n] # top n mutations
+ w = fitness(x) - fitness(x).min() # weights
+ if parent == 'single' or len(x) == 1:
+ # x = x[random.randint(0, n - 1)] # random selection
+ x = x[random.choices(range(n), weights=w)[0]] # weighted selection
+ elif parent == 'weighted':
+ x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
+
+ # Mutate
+ mp, s = 0.8, 0.2 # mutation probability, sigma
+ npr = np.random
+ npr.seed(int(time.time()))
+ g = np.array([x[0] for x in meta.values()]) # gains 0-1
+ ng = len(meta)
+ v = np.ones(ng)
+ while all(v == 1): # mutate until a change occurs (prevent duplicates)
+ v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
+ for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
+ hyp[k] = float(x[i + 7] * v[i]) # mutate
+
+ # Constrain to limits
+ for k, v in meta.items():
+ hyp[k] = max(hyp[k], v[1]) # lower limit
+ hyp[k] = min(hyp[k], v[2]) # upper limit
+ hyp[k] = round(hyp[k], 5) # significant digits
+
+ # Train mutation
+ results = train(hyp.copy(), opt, device, wandb=wandb)
+
+ # Write mutation results
+ print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
+
+ # Plot results
+ plot_evolution(yaml_file)
+ print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
+ f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/utils/activations.py b/utils/activations.py
new file mode 100644
index 0000000..954d2e1
--- /dev/null
+++ b/utils/activations.py
@@ -0,0 +1,72 @@
+# Activation functions
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+# SiLU https://arxiv.org/pdf/1905.02244.pdf ----------------------------------------------------------------------------
+class SiLU(nn.Module): # export-friendly version of nn.SiLU()
+ @staticmethod
+ def forward(x):
+ return x * torch.sigmoid(x)
+
+
+class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
+ @staticmethod
+ def forward(x):
+ # return x * F.hardsigmoid(x) # for torchscript and CoreML
+ return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX
+
+
+class MemoryEfficientSwish(nn.Module):
+ class F(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x * torch.sigmoid(x)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x)
+ return grad_output * (sx * (1 + x * (1 - sx)))
+
+ def forward(self, x):
+ return self.F.apply(x)
+
+
+# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------
+class Mish(nn.Module):
+ @staticmethod
+ def forward(x):
+ return x * F.softplus(x).tanh()
+
+
+class MemoryEfficientMish(nn.Module):
+ class F(torch.autograd.Function):
+ @staticmethod
+ def forward(ctx, x):
+ ctx.save_for_backward(x)
+ return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x)))
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ x = ctx.saved_tensors[0]
+ sx = torch.sigmoid(x)
+ fx = F.softplus(x).tanh()
+ return grad_output * (fx + x * sx * (1 - fx * fx))
+
+ def forward(self, x):
+ return self.F.apply(x)
+
+
+# FReLU https://arxiv.org/abs/2007.11824 -------------------------------------------------------------------------------
+class FReLU(nn.Module):
+ def __init__(self, c1, k=3): # ch_in, kernel
+ super().__init__()
+ self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False)
+ self.bn = nn.BatchNorm2d(c1)
+
+ def forward(self, x):
+ return torch.max(x, self.bn(self.conv(x)))
diff --git a/utils/autoanchor.py b/utils/autoanchor.py
new file mode 100644
index 0000000..badefc1
--- /dev/null
+++ b/utils/autoanchor.py
@@ -0,0 +1,152 @@
+# Auto-anchor utils
+
+import numpy as np
+import torch
+import yaml
+from scipy.cluster.vq import kmeans
+from tqdm import tqdm
+
+
+def check_anchor_order(m):
+ # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
+ a = m.anchor_grid.prod(-1).view(-1) # anchor area
+ da = a[-1] - a[0] # delta a
+ ds = m.stride[-1] - m.stride[0] # delta s
+ if da.sign() != ds.sign(): # same order
+ print('Reversing anchor order')
+ m.anchors[:] = m.anchors.flip(0)
+ m.anchor_grid[:] = m.anchor_grid.flip(0)
+
+
+def check_anchors(dataset, model, thr=4.0, imgsz=640):
+ # Check anchor fit to data, recompute if necessary
+ print('\nAnalyzing anchors... ', end='')
+ m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
+ shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
+ wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
+
+ def metric(k): # compute metric
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ best = x.max(1)[0] # best_x
+ aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
+ bpr = (best > 1. / thr).float().mean() # best possible recall
+ return bpr, aat
+
+ bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
+ print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
+ if bpr < 0.98: # threshold to recompute
+ print('. Attempting to improve anchors, please wait...')
+ na = m.anchor_grid.numel() // 2 # number of anchors
+ new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
+ new_bpr = metric(new_anchors.reshape(-1, 2))[0]
+ if new_bpr > bpr: # replace anchors
+ new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
+ m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
+ m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
+ check_anchor_order(m)
+ print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
+ else:
+ print('Original anchors better than new anchors. Proceeding with original anchors.')
+ print('') # newline
+
+
+def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
+ """ Creates kmeans-evolved anchors from training dataset
+
+ Arguments:
+ path: path to dataset *.yaml, or a loaded dataset
+ n: number of anchors
+ img_size: image size used for training
+ thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
+ gen: generations to evolve anchors using genetic algorithm
+ verbose: print all results
+
+ Return:
+ k: kmeans evolved anchors
+
+ Usage:
+ from utils.autoanchor import *; _ = kmean_anchors()
+ """
+ thr = 1. / thr
+
+ def metric(k, wh): # compute metrics
+ r = wh[:, None] / k[None]
+ x = torch.min(r, 1. / r).min(2)[0] # ratio metric
+ # x = wh_iou(wh, torch.tensor(k)) # iou metric
+ return x, x.max(1)[0] # x, best_x
+
+ def anchor_fitness(k): # mutation fitness
+ _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
+ return (best * (best > thr).float()).mean() # fitness
+
+ def print_results(k):
+ k = k[np.argsort(k.prod(1))] # sort small to large
+ x, best = metric(k, wh0)
+ bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
+ print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
+ print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
+ (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
+ for i, x in enumerate(k):
+ print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
+ return k
+
+ if isinstance(path, str): # *.yaml file
+ with open(path) as f:
+ data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
+ from utils.datasets import LoadImagesAndLabels
+ dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
+ else:
+ dataset = path # dataset
+
+ # Get label wh
+ shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
+ wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
+
+ # Filter
+ i = (wh0 < 3.0).any(1).sum()
+ if i:
+ print('WARNING: Extremely small objects found. '
+ '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
+ wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
+ # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
+
+ # Kmeans calculation
+ print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
+ s = wh.std(0) # sigmas for whitening
+ k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
+ k *= s
+ wh = torch.tensor(wh, dtype=torch.float32) # filtered
+ wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
+ k = print_results(k)
+
+ # Plot
+ # k, d = [None] * 20, [None] * 20
+ # for i in tqdm(range(1, 21)):
+ # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
+ # ax = ax.ravel()
+ # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
+ # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
+ # ax[0].hist(wh[wh[:, 0]<100, 0],400)
+ # ax[1].hist(wh[wh[:, 1]<100, 1],400)
+ # fig.savefig('wh.png', dpi=200)
+
+ # Evolve
+ npr = np.random
+ f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
+ pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
+ for _ in pbar:
+ v = np.ones(sh)
+ while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
+ v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
+ kg = (k.copy() * v).clip(min=2.0)
+ fg = anchor_fitness(kg)
+ if fg > f:
+ f, k = fg, kg.copy()
+ pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
+ if verbose:
+ print_results(k)
+
+ return print_results(k)
diff --git a/utils/datasets.py b/utils/datasets.py
new file mode 100644
index 0000000..9001832
--- /dev/null
+++ b/utils/datasets.py
@@ -0,0 +1,1034 @@
+# Dataset utils and dataloaders
+
+import glob
+import logging
+import math
+import os
+import random
+import shutil
+import time
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
+from pathlib import Path
+from threading import Thread
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+from PIL import Image, ExifTags
+from torch.utils.data import Dataset
+from tqdm import tqdm
+
+from utils.general import xyxy2xywh, xywh2xyxy, clean_str
+from utils.torch_utils import torch_distributed_zero_first
+
+# Parameters
+help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
+img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng'] # acceptable image suffixes
+vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
+logger = logging.getLogger(__name__)
+
+# Get orientation exif tag
+for orientation in ExifTags.TAGS.keys():
+ if ExifTags.TAGS[orientation] == 'Orientation':
+ break
+
+
+def get_hash(files):
+ # Returns a single hash value of a list of files
+ return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
+
+
+def exif_size(img):
+ # Returns exif-corrected PIL size
+ s = img.size # (width, height)
+ try:
+ rotation = dict(img._getexif().items())[orientation]
+ if rotation == 6: # rotation 270
+ s = (s[1], s[0])
+ elif rotation == 8: # rotation 90
+ s = (s[1], s[0])
+ except:
+ pass
+
+ return s
+
+
+def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
+ rank=-1, world_size=1, workers=8, image_weights=False, quad=False):
+ # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
+ with torch_distributed_zero_first(rank):
+ dataset = LoadImagesAndLabels(path, imgsz, batch_size,
+ augment=augment, # augment images
+ hyp=hyp, # augmentation hyperparameters
+ rect=rect, # rectangular training
+ cache_images=cache,
+ single_cls=opt.single_cls,
+ stride=int(stride),
+ pad=pad,
+ rank=rank,
+ image_weights=image_weights)
+
+ batch_size = min(batch_size, len(dataset))
+ nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
+ sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
+ loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
+ # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
+ dataloader = loader(dataset,
+ batch_size=batch_size,
+ num_workers=nw,
+ sampler=sampler,
+ pin_memory=True,
+ collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
+ return dataloader, dataset
+
+
+class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
+ """ Dataloader that reuses workers
+
+ Uses same syntax as vanilla DataLoader
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
+ self.iterator = super().__iter__()
+
+ def __len__(self):
+ return len(self.batch_sampler.sampler)
+
+ def __iter__(self):
+ for i in range(len(self)):
+ yield next(self.iterator)
+
+
+class _RepeatSampler(object):
+ """ Sampler that repeats forever
+
+ Args:
+ sampler (Sampler)
+ """
+
+ def __init__(self, sampler):
+ self.sampler = sampler
+
+ def __iter__(self):
+ while True:
+ yield from iter(self.sampler)
+
+
+class LoadImages: # for inference
+ def __init__(self, path, img_size=640):
+ p = str(Path(path)) # os-agnostic
+ p = os.path.abspath(p) # absolute path
+ if '*' in p:
+ files = sorted(glob.glob(p, recursive=True)) # glob
+ elif os.path.isdir(p):
+ files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
+ elif os.path.isfile(p):
+ files = [p] # files
+ else:
+ raise Exception('ERROR: %s does not exist' % p)
+
+ images = [x for x in files if x.split('.')[-1].lower() in img_formats]
+ videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
+ ni, nv = len(images), len(videos)
+
+ self.img_size = img_size
+ self.files = images + videos
+ self.nf = ni + nv # number of files
+ self.video_flag = [False] * ni + [True] * nv
+ self.mode = 'image'
+ if any(videos):
+ self.new_video(videos[0]) # new video
+ else:
+ self.cap = None
+ assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
+ (p, img_formats, vid_formats)
+
+ def __iter__(self):
+ self.count = 0
+ return self
+
+ def __next__(self):
+ if self.count == self.nf:
+ raise StopIteration
+ path = self.files[self.count]
+
+ if self.video_flag[self.count]:
+ # Read video
+ self.mode = 'video'
+ ret_val, img0 = self.cap.read()
+ if not ret_val:
+ self.count += 1
+ self.cap.release()
+ if self.count == self.nf: # last video
+ raise StopIteration
+ else:
+ path = self.files[self.count]
+ self.new_video(path)
+ ret_val, img0 = self.cap.read()
+
+ self.frame += 1
+ print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
+
+ else:
+ # Read image
+ self.count += 1
+ img0 = cv2.imread(path) # BGR
+ assert img0 is not None, 'Image Not Found ' + path
+ print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
+
+ # Padded resize
+ img = letterbox(img0, new_shape=self.img_size)[0]
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return path, img, img0, self.cap
+
+ def new_video(self, path):
+ self.frame = 0
+ self.cap = cv2.VideoCapture(path)
+ self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+ def __len__(self):
+ return self.nf # number of files
+
+
+class LoadWebcam: # for inference
+ def __init__(self, pipe='0', img_size=640):
+ self.img_size = img_size
+
+ if pipe.isnumeric():
+ pipe = eval(pipe) # local camera
+ # pipe = 'rtsp://192.168.1.64/1' # IP camera
+ # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
+ # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
+
+ self.pipe = pipe
+ self.cap = cv2.VideoCapture(pipe) # video capture object
+ self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
+
+ def __iter__(self):
+ self.count = -1
+ return self
+
+ def __next__(self):
+ self.count += 1
+ if cv2.waitKey(1) == ord('q'): # q to quit
+ self.cap.release()
+ cv2.destroyAllWindows()
+ raise StopIteration
+
+ # Read frame
+ if self.pipe == 0: # local camera
+ ret_val, img0 = self.cap.read()
+ img0 = cv2.flip(img0, 1) # flip left-right
+ else: # IP camera
+ n = 0
+ while True:
+ n += 1
+ self.cap.grab()
+ if n % 30 == 0: # skip frames
+ ret_val, img0 = self.cap.retrieve()
+ if ret_val:
+ break
+
+ # Print
+ assert ret_val, 'Camera Error %s' % self.pipe
+ img_path = 'webcam.jpg'
+ print('webcam %g: ' % self.count, end='')
+
+ # Padded resize
+ img = letterbox(img0, new_shape=self.img_size)[0]
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return img_path, img, img0, None
+
+ def __len__(self):
+ return 0
+
+
+class LoadStreams: # multiple IP or RTSP cameras
+ def __init__(self, sources='streams.txt', img_size=640):
+ self.mode = 'stream'
+ self.img_size = img_size
+
+ if os.path.isfile(sources):
+ with open(sources, 'r') as f:
+ sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
+ else:
+ sources = [sources]
+
+ n = len(sources)
+ self.imgs = [None] * n
+ self.sources = [clean_str(x) for x in sources] # clean source names for later
+ for i, s in enumerate(sources):
+ # Start the thread to read frames from the video stream
+ print('%g/%g: %s... ' % (i + 1, n, s), end='')
+ cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
+ assert cap.isOpened(), 'Failed to open %s' % s
+ w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ fps = cap.get(cv2.CAP_PROP_FPS) % 100
+ _, self.imgs[i] = cap.read() # guarantee first frame
+ thread = Thread(target=self.update, args=([i, cap]), daemon=True)
+ print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
+ thread.start()
+ print('') # newline
+
+ # check for common shapes
+ s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
+ self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
+ if not self.rect:
+ print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
+
+ def update(self, index, cap):
+ # Read next stream frame in a daemon thread
+ n = 0
+ while cap.isOpened():
+ n += 1
+ # _, self.imgs[index] = cap.read()
+ cap.grab()
+ if n == 4: # read every 4th frame
+ _, self.imgs[index] = cap.retrieve()
+ n = 0
+ time.sleep(0.01) # wait time
+
+ def __iter__(self):
+ self.count = -1
+ return self
+
+ def __next__(self):
+ self.count += 1
+ img0 = self.imgs.copy()
+ if cv2.waitKey(1) == ord('q'): # q to quit
+ cv2.destroyAllWindows()
+ raise StopIteration
+
+ # Letterbox
+ img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
+
+ # Stack
+ img = np.stack(img, 0)
+
+ # Convert
+ img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
+ img = np.ascontiguousarray(img)
+
+ return self.sources, img, img0, None
+
+ def __len__(self):
+ return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
+
+
+def img2label_paths(img_paths):
+ # Define label paths as a function of image paths
+ sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
+ return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
+
+
+class LoadImagesAndLabels(Dataset): # for training/testing
+ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
+ cache_images=False, single_cls=False, stride=32, pad=0.0, rank=-1):
+ self.img_size = img_size
+ self.augment = augment
+ self.hyp = hyp
+ self.image_weights = image_weights
+ self.rect = False if image_weights else rect
+ self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
+ self.mosaic_border = [-img_size // 2, -img_size // 2]
+ self.stride = stride
+
+ try:
+ f = [] # image files
+ for p in path if isinstance(path, list) else [path]:
+ p = Path(p) # os-agnostic
+ if p.is_dir(): # dir
+ f += glob.glob(str(p / '**' / '*.*'), recursive=True)
+ elif p.is_file(): # file
+ with open(p, 'r') as t:
+ t = t.read().strip().splitlines()
+ parent = str(p.parent) + os.sep
+ f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
+ else:
+ raise Exception('%s does not exist' % p)
+ self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
+ assert self.img_files, 'No images found'
+ except Exception as e:
+ raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
+
+ # Check cache
+ self.label_files = img2label_paths(self.img_files) # labels
+ cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') # cached labels
+ if cache_path.is_file():
+ cache = torch.load(cache_path) # load
+ if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed
+ cache = self.cache_labels(cache_path) # re-cache
+ else:
+ cache = self.cache_labels(cache_path) # cache
+
+ # Display cache
+ [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total
+ desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+ tqdm(None, desc=desc, total=n, initial=n)
+ assert nf > 0 or not augment, f'No labels found in {cache_path}. Can not train without labels. See {help_url}'
+
+ # Read cache
+ cache.pop('hash') # remove hash
+ labels, shapes = zip(*cache.values())
+ self.labels = list(labels)
+ self.shapes = np.array(shapes, dtype=np.float64)
+ self.img_files = list(cache.keys()) # update
+ self.label_files = img2label_paths(cache.keys()) # update
+ if single_cls:
+ for x in self.labels:
+ x[:, 0] = 0
+
+ n = len(shapes) # number of images
+ bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
+ nb = bi[-1] + 1 # number of batches
+ self.batch = bi # batch index of image
+ self.n = n
+ self.indices = range(n)
+
+ # Rectangular Training
+ if self.rect:
+ # Sort by aspect ratio
+ s = self.shapes # wh
+ ar = s[:, 1] / s[:, 0] # aspect ratio
+ irect = ar.argsort()
+ self.img_files = [self.img_files[i] for i in irect]
+ self.label_files = [self.label_files[i] for i in irect]
+ self.labels = [self.labels[i] for i in irect]
+ self.shapes = s[irect] # wh
+ ar = ar[irect]
+
+ # Set training image shapes
+ shapes = [[1, 1]] * nb
+ for i in range(nb):
+ ari = ar[bi == i]
+ mini, maxi = ari.min(), ari.max()
+ if maxi < 1:
+ shapes[i] = [maxi, 1]
+ elif mini > 1:
+ shapes[i] = [1, 1 / mini]
+
+ self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
+
+ # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
+ self.imgs = [None] * n
+ if cache_images:
+ gb = 0 # Gigabytes of cached images
+ self.img_hw0, self.img_hw = [None] * n, [None] * n
+ results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
+ pbar = tqdm(enumerate(results), total=n)
+ for i, x in pbar:
+ self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
+ gb += self.imgs[i].nbytes
+ pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
+
+ def cache_labels(self, path=Path('./labels.cache')):
+ # Cache dataset labels, check images and read shapes
+ x = {} # dict
+ nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
+ pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
+ for i, (im_file, lb_file) in enumerate(pbar):
+ try:
+ # verify images
+ im = Image.open(im_file)
+ im.verify() # PIL verify
+ shape = exif_size(im) # image size
+ assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
+
+ # verify labels
+ if os.path.isfile(lb_file):
+ nf += 1 # label found
+ with open(lb_file, 'r') as f:
+ l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
+ if len(l):
+ assert l.shape[1] == 5, 'labels require 5 columns each'
+ assert (l >= 0).all(), 'negative labels'
+ assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
+ assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
+ else:
+ ne += 1 # label empty
+ l = np.zeros((0, 5), dtype=np.float32)
+ else:
+ nm += 1 # label missing
+ l = np.zeros((0, 5), dtype=np.float32)
+ x[im_file] = [l, shape]
+ except Exception as e:
+ nc += 1
+ print('WARNING: Ignoring corrupted image and/or label %s: %s' % (im_file, e))
+
+ pbar.desc = f"Scanning '{path.parent / path.stem}' for images and labels... " \
+ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
+
+ if nf == 0:
+ print(f'WARNING: No labels found in {path}. See {help_url}')
+
+ x['hash'] = get_hash(self.label_files + self.img_files)
+ x['results'] = [nf, nm, ne, nc, i + 1]
+ torch.save(x, path) # save for next time
+ logging.info(f"New cache created: {path}")
+ return x
+
+ def __len__(self):
+ return len(self.img_files)
+
+ # def __iter__(self):
+ # self.count = -1
+ # print('ran dataset iter')
+ # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
+ # return self
+
+ def __getitem__(self, index):
+ index = self.indices[index] # linear, shuffled, or image_weights
+
+ hyp = self.hyp
+ mosaic = self.mosaic and random.random() < hyp['mosaic']
+ if mosaic:
+ # Load mosaic
+ img, labels = load_mosaic(self, index)
+ shapes = None
+
+ # MixUp https://arxiv.org/pdf/1710.09412.pdf
+ if random.random() < hyp['mixup']:
+ img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
+ r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
+ img = (img * r + img2 * (1 - r)).astype(np.uint8)
+ labels = np.concatenate((labels, labels2), 0)
+
+ else:
+ # Load image
+ img, (h0, w0), (h, w) = load_image(self, index)
+
+ # Letterbox
+ shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
+ img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
+ shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
+
+ # Load labels
+ labels = []
+ x = self.labels[index]
+ if x.size > 0:
+ # Normalized xywh to pixel xyxy format
+ labels = x.copy()
+ labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
+ labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
+ labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
+ labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
+
+ if self.augment:
+ # Augment imagespace
+ if not mosaic:
+ img, labels = random_perspective(img, labels,
+ degrees=hyp['degrees'],
+ translate=hyp['translate'],
+ scale=hyp['scale'],
+ shear=hyp['shear'],
+ perspective=hyp['perspective'])
+
+ # Augment colorspace
+ augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
+
+ # Apply cutouts
+ # if random.random() < 0.9:
+ # labels = cutout(img, labels)
+
+ nL = len(labels) # number of labels
+ if nL:
+ labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
+ labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
+ labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
+
+ if self.augment:
+ # flip up-down
+ if random.random() < hyp['flipud']:
+ img = np.flipud(img)
+ if nL:
+ labels[:, 2] = 1 - labels[:, 2]
+
+ # flip left-right
+ if random.random() < hyp['fliplr']:
+ img = np.fliplr(img)
+ if nL:
+ labels[:, 1] = 1 - labels[:, 1]
+
+ labels_out = torch.zeros((nL, 6))
+ if nL:
+ labels_out[:, 1:] = torch.from_numpy(labels)
+
+ # Convert
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ img = np.ascontiguousarray(img)
+
+ return torch.from_numpy(img), labels_out, self.img_files[index], shapes
+
+ @staticmethod
+ def collate_fn(batch):
+ img, label, path, shapes = zip(*batch) # transposed
+ for i, l in enumerate(label):
+ l[:, 0] = i # add target image index for build_targets()
+ return torch.stack(img, 0), torch.cat(label, 0), path, shapes
+
+ @staticmethod
+ def collate_fn4(batch):
+ img, label, path, shapes = zip(*batch) # transposed
+ n = len(shapes) // 4
+ img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
+
+ ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
+ wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
+ s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
+ for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
+ i *= 4
+ if random.random() < 0.5:
+ im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
+ 0].type(img[i].type())
+ l = label[i]
+ else:
+ im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
+ l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
+ img4.append(im)
+ label4.append(l)
+
+ for i, l in enumerate(label4):
+ l[:, 0] = i # add target image index for build_targets()
+
+ return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
+
+
+# Ancillary functions --------------------------------------------------------------------------------------------------
+def load_image(self, index):
+ # loads 1 image from dataset, returns img, original hw, resized hw
+ img = self.imgs[index]
+ if img is None: # not cached
+ path = self.img_files[index]
+ img = cv2.imread(path) # BGR
+ assert img is not None, 'Image Not Found ' + path
+ h0, w0 = img.shape[:2] # orig hw
+ r = self.img_size / max(h0, w0) # resize image to img_size
+ if r != 1: # always resize down, only resize up if training with augmentation
+ interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
+ img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
+ return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
+ else:
+ return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
+
+
+def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
+ hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
+ dtype = img.dtype # uint8
+
+ x = np.arange(0, 256, dtype=np.int16)
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
+
+ img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
+ cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
+
+ # Histogram equalization
+ # if random.random() < 0.2:
+ # for i in range(3):
+ # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
+
+
+def load_mosaic(self, index):
+ # loads images in a 4-mosaic
+
+ labels4 = []
+ s = self.img_size
+ yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
+ indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
+ for i, index in enumerate(indices):
+ # Load image
+ img, _, (h, w) = load_image(self, index)
+
+ # place img in img4
+ if i == 0: # top left
+ img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
+ x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
+ elif i == 1: # top right
+ x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
+ x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
+ elif i == 2: # bottom left
+ x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
+ elif i == 3: # bottom right
+ x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
+ x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
+
+ img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
+ padw = x1a - x1b
+ padh = y1a - y1b
+
+ # Labels
+ x = self.labels[index]
+ labels = x.copy()
+ if x.size > 0: # Normalized xywh to pixel xyxy format
+ labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
+ labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
+ labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
+ labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
+ labels4.append(labels)
+
+ # Concat/clip labels
+ if len(labels4):
+ labels4 = np.concatenate(labels4, 0)
+ np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective
+ # img4, labels4 = replicate(img4, labels4) # replicate
+
+ # Augment
+ img4, labels4 = random_perspective(img4, labels4,
+ degrees=self.hyp['degrees'],
+ translate=self.hyp['translate'],
+ scale=self.hyp['scale'],
+ shear=self.hyp['shear'],
+ perspective=self.hyp['perspective'],
+ border=self.mosaic_border) # border to remove
+
+ return img4, labels4
+
+
+def load_mosaic9(self, index):
+ # loads images in a 9-mosaic
+
+ labels9 = []
+ s = self.img_size
+ indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
+ for i, index in enumerate(indices):
+ # Load image
+ img, _, (h, w) = load_image(self, index)
+
+ # place img in img9
+ if i == 0: # center
+ img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
+ h0, w0 = h, w
+ c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
+ elif i == 1: # top
+ c = s, s - h, s + w, s
+ elif i == 2: # top right
+ c = s + wp, s - h, s + wp + w, s
+ elif i == 3: # right
+ c = s + w0, s, s + w0 + w, s + h
+ elif i == 4: # bottom right
+ c = s + w0, s + hp, s + w0 + w, s + hp + h
+ elif i == 5: # bottom
+ c = s + w0 - w, s + h0, s + w0, s + h0 + h
+ elif i == 6: # bottom left
+ c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
+ elif i == 7: # left
+ c = s - w, s + h0 - h, s, s + h0
+ elif i == 8: # top left
+ c = s - w, s + h0 - hp - h, s, s + h0 - hp
+
+ padx, pady = c[:2]
+ x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
+
+ # Labels
+ x = self.labels[index]
+ labels = x.copy()
+ if x.size > 0: # Normalized xywh to pixel xyxy format
+ labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padx
+ labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + pady
+ labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padx
+ labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + pady
+ labels9.append(labels)
+
+ # Image
+ img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
+ hp, wp = h, w # height, width previous
+
+ # Offset
+ yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y
+ img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
+
+ # Concat/clip labels
+ if len(labels9):
+ labels9 = np.concatenate(labels9, 0)
+ labels9[:, [1, 3]] -= xc
+ labels9[:, [2, 4]] -= yc
+
+ np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective
+ # img9, labels9 = replicate(img9, labels9) # replicate
+
+ # Augment
+ img9, labels9 = random_perspective(img9, labels9,
+ degrees=self.hyp['degrees'],
+ translate=self.hyp['translate'],
+ scale=self.hyp['scale'],
+ shear=self.hyp['shear'],
+ perspective=self.hyp['perspective'],
+ border=self.mosaic_border) # border to remove
+
+ return img9, labels9
+
+
+def replicate(img, labels):
+ # Replicate labels
+ h, w = img.shape[:2]
+ boxes = labels[:, 1:].astype(int)
+ x1, y1, x2, y2 = boxes.T
+ s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
+ for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
+ x1b, y1b, x2b, y2b = boxes[i]
+ bh, bw = y2b - y1b, x2b - x1b
+ yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
+ x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
+ img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
+ labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
+
+ return img, labels
+
+
+def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
+ # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
+ shape = img.shape[:2] # current shape [height, width]
+ if isinstance(new_shape, int):
+ new_shape = (new_shape, new_shape)
+
+ # Scale ratio (new / old)
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
+ r = min(r, 1.0)
+
+ # Compute padding
+ ratio = r, r # width, height ratios
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
+ if auto: # minimum rectangle
+ dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
+ elif scaleFill: # stretch
+ dw, dh = 0.0, 0.0
+ new_unpad = (new_shape[1], new_shape[0])
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
+
+ dw /= 2 # divide padding into 2 sides
+ dh /= 2
+
+ if shape[::-1] != new_unpad: # resize
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
+ return img, ratio, (dw, dh)
+
+
+def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
+ # targets = [cls, xyxy]
+
+ height = img.shape[0] + border[0] * 2 # shape(h,w,c)
+ width = img.shape[1] + border[1] * 2
+
+ # Center
+ C = np.eye(3)
+ C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
+ C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
+
+ # Perspective
+ P = np.eye(3)
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
+
+ # Rotation and Scale
+ R = np.eye(3)
+ a = random.uniform(-degrees, degrees)
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
+ s = random.uniform(1 - scale, 1 + scale)
+ # s = 2 ** random.uniform(-scale, scale)
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
+
+ # Shear
+ S = np.eye(3)
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
+
+ # Translation
+ T = np.eye(3)
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
+
+ # Combined rotation matrix
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
+ if perspective:
+ img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
+ else: # affine
+ img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
+
+ # Visualize
+ # import matplotlib.pyplot as plt
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
+ # ax[0].imshow(img[:, :, ::-1]) # base
+ # ax[1].imshow(img2[:, :, ::-1]) # warped
+
+ # Transform label coordinates
+ n = len(targets)
+ if n:
+ # warp points
+ xy = np.ones((n * 4, 3))
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
+ xy = xy @ M.T # transform
+ if perspective:
+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
+ else: # affine
+ xy = xy[:, :2].reshape(n, 8)
+
+ # create new boxes
+ x = xy[:, [0, 2, 4, 6]]
+ y = xy[:, [1, 3, 5, 7]]
+ xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+
+ # # apply angle-based reduction of bounding boxes
+ # radians = a * math.pi / 180
+ # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
+ # x = (xy[:, 2] + xy[:, 0]) / 2
+ # y = (xy[:, 3] + xy[:, 1]) / 2
+ # w = (xy[:, 2] - xy[:, 0]) * reduction
+ # h = (xy[:, 3] - xy[:, 1]) * reduction
+ # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
+
+ # clip boxes
+ xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
+ xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+
+ # filter candidates
+ i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
+ targets = targets[i]
+ targets[:, 1:5] = xy[i]
+
+ return img, targets
+
+
+def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
+ # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
+ w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
+ w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
+ ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
+ return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
+
+
+def cutout(image, labels):
+ # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
+ h, w = image.shape[:2]
+
+ def bbox_ioa(box1, box2):
+ # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
+ box2 = box2.transpose()
+
+ # Get the coordinates of bounding boxes
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+
+ # Intersection area
+ inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
+ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
+
+ # box2 area
+ box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
+
+ # Intersection over box2 area
+ return inter_area / box2_area
+
+ # create random masks
+ scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
+ for s in scales:
+ mask_h = random.randint(1, int(h * s))
+ mask_w = random.randint(1, int(w * s))
+
+ # box
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
+ xmax = min(w, xmin + mask_w)
+ ymax = min(h, ymin + mask_h)
+
+ # apply random color mask
+ image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
+
+ # return unobscured labels
+ if len(labels) and s > 0.03:
+ box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
+ labels = labels[ioa < 0.60] # remove >60% obscured labels
+
+ return labels
+
+
+def create_folder(path='./new'):
+ # Create folder
+ if os.path.exists(path):
+ shutil.rmtree(path) # delete output folder
+ os.makedirs(path) # make new output folder
+
+
+def flatten_recursive(path='../coco128'):
+ # Flatten a recursive directory by bringing all files to top level
+ new_path = Path(path + '_flat')
+ create_folder(new_path)
+ for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
+ shutil.copyfile(file, new_path / Path(file).name)
+
+
+def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
+ # Convert detection dataset into classification dataset, with one directory per class
+
+ path = Path(path) # images dir
+ shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
+ files = list(path.rglob('*.*'))
+ n = len(files) # number of files
+ for im_file in tqdm(files, total=n):
+ if im_file.suffix[1:] in img_formats:
+ # image
+ im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
+ h, w = im.shape[:2]
+
+ # labels
+ lb_file = Path(img2label_paths([str(im_file)])[0])
+ if Path(lb_file).exists():
+ with open(lb_file, 'r') as f:
+ lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
+
+ for j, x in enumerate(lb):
+ c = int(x[0]) # class
+ f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
+ if not f.parent.is_dir():
+ f.parent.mkdir(parents=True)
+
+ b = x[1:] * [w, h, w, h] # box
+ # b[2:] = b[2:].max() # rectangle to square
+ b[2:] = b[2:] * 1.2 + 3 # pad
+ b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
+
+ b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
+ b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
+ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
+
+
+def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
+ """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
+ # Arguments
+ path: Path to images directory
+ weights: Train, val, test weights (list)
+ """
+ path = Path(path) # images dir
+ files = list(path.rglob('*.*'))
+ n = len(files) # number of files
+ indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
+ txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
+ [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
+ for i, img in tqdm(zip(indices, files), total=n):
+ if img.suffix[1:] in img_formats:
+ with open(path / txt[i], 'a') as f:
+ f.write(str(img) + '\n') # add image to txt file
diff --git a/utils/general.py b/utils/general.py
new file mode 100644
index 0000000..797587b
--- /dev/null
+++ b/utils/general.py
@@ -0,0 +1,451 @@
+# General utils
+
+import glob
+import logging
+import math
+import os
+import platform
+import random
+import re
+import subprocess
+import time
+from pathlib import Path
+
+import cv2
+import numpy as np
+import torch
+import torchvision
+import yaml
+
+from utils.google_utils import gsutil_getsize
+from utils.metrics import fitness
+from utils.torch_utils import init_torch_seeds
+
+# Settings
+torch.set_printoptions(linewidth=320, precision=5, profile='long')
+np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
+cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
+
+
+def set_logging(rank=-1):
+ logging.basicConfig(
+ format="%(message)s",
+ level=logging.INFO if rank in [-1, 0] else logging.WARN)
+
+
+def init_seeds(seed=0):
+ random.seed(seed)
+ np.random.seed(seed)
+ init_torch_seeds(seed)
+
+
+def get_latest_run(search_dir='.'):
+ # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
+ last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
+ return max(last_list, key=os.path.getctime) if last_list else ''
+
+
+def check_git_status():
+ # Suggest 'git pull' if repo is out of date
+ if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
+ s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
+ if 'Your branch is behind' in s:
+ print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
+
+
+def check_img_size(img_size, s=32):
+ # Verify img_size is a multiple of stride s
+ new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
+ if new_size != img_size:
+ print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
+ return new_size
+
+
+def check_file(file):
+ # Search for file if not found
+ if os.path.isfile(file) or file == '':
+ return file
+ else:
+ files = glob.glob('./**/' + file, recursive=True) # find file
+ assert len(files), 'File Not Found: %s' % file # assert file was found
+ assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
+ return files[0] # return file
+
+
+def check_dataset(dict):
+ # Download dataset if not found locally
+ val, s = dict.get('val'), dict.get('download')
+ if val and len(val):
+ val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
+ if not all(x.exists() for x in val):
+ print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
+ if s and len(s): # download script
+ print('Downloading %s ...' % s)
+ if s.startswith('http') and s.endswith('.zip'): # URL
+ f = Path(s).name # filename
+ torch.hub.download_url_to_file(s, f)
+ r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
+ else: # bash script
+ r = os.system(s)
+ print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
+ else:
+ raise Exception('Dataset not found.')
+
+
+def make_divisible(x, divisor):
+ # Returns x evenly divisible by divisor
+ return math.ceil(x / divisor) * divisor
+
+
+def clean_str(s):
+ # Cleans a string by replacing special characters with underscore _
+ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
+
+
+def one_cycle(y1=0.0, y2=1.0, steps=100):
+ # lambda function for sinusoidal ramp from y1 to y2
+ return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
+
+
+def labels_to_class_weights(labels, nc=80):
+ # Get class weights (inverse frequency) from training labels
+ if labels[0] is None: # no labels loaded
+ return torch.Tensor()
+
+ labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
+ classes = labels[:, 0].astype(np.int) # labels = [class xywh]
+ weights = np.bincount(classes, minlength=nc) # occurrences per class
+
+ # Prepend gridpoint count (for uCE training)
+ # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
+ # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
+
+ weights[weights == 0] = 1 # replace empty bins with 1
+ weights = 1 / weights # number of targets per class
+ weights /= weights.sum() # normalize
+ return torch.from_numpy(weights)
+
+
+def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
+ # Produces image weights based on class_weights and image contents
+ class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
+ image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
+ # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
+ return image_weights
+
+
+def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
+ # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
+ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
+ # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
+ # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
+ # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
+ x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
+ return x
+
+
+def xyxy2xywh(x):
+ # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
+ y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
+ y[:, 2] = x[:, 2] - x[:, 0] # width
+ y[:, 3] = x[:, 3] - x[:, 1] # height
+ return y
+
+
+def xywh2xyxy(x):
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
+ return y
+
+
+def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
+ # Rescale coords (xyxy) from img1_shape to img0_shape
+ if ratio_pad is None: # calculate from img0_shape
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
+ else:
+ gain = ratio_pad[0][0]
+ pad = ratio_pad[1]
+
+ coords[:, [0, 2]] -= pad[0] # x padding
+ coords[:, [1, 3]] -= pad[1] # y padding
+ coords[:, :4] /= gain
+ clip_coords(coords, img0_shape)
+ return coords
+
+
+def clip_coords(boxes, img_shape):
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
+
+
+def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
+ # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
+ box2 = box2.T
+
+ # Get the coordinates of bounding boxes
+ if x1y1x2y2: # x1, y1, x2, y2 = box1
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
+ else: # transform from xywh to xyxy
+ b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
+ b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
+ b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
+ b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
+
+ # Intersection area
+ inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
+ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
+
+ # Union Area
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
+ union = w1 * h1 + w2 * h2 - inter + eps
+
+ iou = inter / union
+ if GIoU or DIoU or CIoU:
+ cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
+ ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
+ if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
+ c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
+ rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
+ (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
+ if DIoU:
+ return iou - rho2 / c2 # DIoU
+ elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
+ v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+ with torch.no_grad():
+ alpha = v / ((1 + eps) - iou + v)
+ return iou - (rho2 / c2 + v * alpha) # CIoU
+ else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
+ c_area = cw * ch + eps # convex area
+ return iou - (c_area - union) / c_area # GIoU
+ else:
+ return iou # IoU
+
+
+def box_iou(box1, box2):
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
+ """
+ Return intersection-over-union (Jaccard index) of boxes.
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
+ Arguments:
+ box1 (Tensor[N, 4])
+ box2 (Tensor[M, 4])
+ Returns:
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
+ IoU values for every element in boxes1 and boxes2
+ """
+
+ def box_area(box):
+ # box = 4xn
+ return (box[2] - box[0]) * (box[3] - box[1])
+
+ area1 = box_area(box1.T)
+ area2 = box_area(box2.T)
+
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
+
+
+def wh_iou(wh1, wh2):
+ # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
+ wh1 = wh1[:, None] # [N,1,2]
+ wh2 = wh2[None] # [1,M,2]
+ inter = torch.min(wh1, wh2).prod(2) # [N,M]
+ return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
+
+
+def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
+ """Performs Non-Maximum Suppression (NMS) on inference results
+
+ Returns:
+ detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
+ """
+
+ nc = prediction.shape[2] - 5 # number of classes
+ xc = prediction[..., 4] > conf_thres # candidates
+
+ # Settings
+ min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
+ max_det = 300 # maximum number of detections per image
+ max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
+ time_limit = 10.0 # seconds to quit after
+ redundant = True # require redundant detections
+ multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
+ merge = False # use merge-NMS
+
+ t = time.time()
+ output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
+ for xi, x in enumerate(prediction): # image index, image inference
+ # Apply constraints
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
+ x = x[xc[xi]] # confidence
+
+ # Cat apriori labels if autolabelling
+ if labels and len(labels[xi]):
+ l = labels[xi]
+ v = torch.zeros((len(l), nc + 5), device=x.device)
+ v[:, :4] = l[:, 1:5] # box
+ v[:, 4] = 1.0 # conf
+ v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
+ x = torch.cat((x, v), 0)
+
+ # If none remain process next image
+ if not x.shape[0]:
+ continue
+
+ # Compute conf
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
+
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
+ box = xywh2xyxy(x[:, :4])
+
+ # Detections matrix nx6 (xyxy, conf, cls)
+ if multi_label:
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
+ else: # best class only
+ conf, j = x[:, 5:].max(1, keepdim=True)
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
+
+ # Filter by class
+ if classes is not None:
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
+
+ # Apply finite constraint
+ # if not torch.isfinite(x).all():
+ # x = x[torch.isfinite(x).all(1)]
+
+ # Check shape
+ n = x.shape[0] # number of boxes
+ if not n: # no boxes
+ continue
+ elif n > max_nms: # excess boxes
+ x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
+
+ # Batched NMS
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
+ i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
+ if i.shape[0] > max_det: # limit detections
+ i = i[:max_det]
+ if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
+ weights = iou * scores[None] # box weights
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
+ if redundant:
+ i = i[iou.sum(1) > 1] # require redundancy
+
+ output[xi] = x[i]
+ if (time.time() - t) > time_limit:
+ print(f'WARNING: NMS time limit {time_limit}s exceeded')
+ break # time limit exceeded
+
+ return output
+
+
+def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
+ # Strip optimizer from 'f' to finalize training, optionally save as 's'
+ x = torch.load(f, map_location=torch.device('cpu'))
+ x['optimizer'] = None
+ x['training_results'] = None
+ x['epoch'] = -1
+ x['model'].half() # to FP16
+ for p in x['model'].parameters():
+ p.requires_grad = False
+ torch.save(x, s or f)
+ mb = os.path.getsize(s or f) / 1E6 # filesize
+ print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
+
+
+def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
+ # Print mutation results to evolve.txt (for use with train.py --evolve)
+ a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
+ b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
+ c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
+ print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
+
+ if bucket:
+ url = 'gs://%s/evolve.txt' % bucket
+ if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
+ os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
+
+ with open('evolve.txt', 'a') as f: # append result
+ f.write(c + b + '\n')
+ x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
+ x = x[np.argsort(-fitness(x))] # sort
+ np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
+
+ # Save yaml
+ for i, k in enumerate(hyp.keys()):
+ hyp[k] = float(x[0, i + 7])
+ with open(yaml_file, 'w') as f:
+ results = tuple(x[0, :7])
+ c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
+ f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
+ yaml.dump(hyp, f, sort_keys=False)
+
+ if bucket:
+ os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
+
+
+def apply_classifier(x, model, img, im0):
+ # applies a second stage classifier to yolo outputs
+ im0 = [im0] if isinstance(im0, np.ndarray) else im0
+ for i, d in enumerate(x): # per image
+ if d is not None and len(d):
+ d = d.clone()
+
+ # Reshape and pad cutouts
+ b = xyxy2xywh(d[:, :4]) # boxes
+ b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
+ b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
+ d[:, :4] = xywh2xyxy(b).long()
+
+ # Rescale boxes from img_size to im0 size
+ scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
+
+ # Classes
+ pred_cls1 = d[:, 5].long()
+ ims = []
+ for j, a in enumerate(d): # per item
+ cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
+ im = cv2.resize(cutout, (224, 224)) # BGR
+ # cv2.imwrite('test%i.jpg' % j, cutout)
+
+ im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
+ im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
+ im /= 255.0 # 0 - 255 to 0.0 - 1.0
+ ims.append(im)
+
+ pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
+ x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
+
+ return x
+
+
+def increment_path(path, exist_ok=True, sep=''):
+ # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
+ path = Path(path) # os-agnostic
+ if (path.exists() and exist_ok) or (not path.exists()):
+ return str(path)
+ else:
+ dirs = glob.glob(f"{path}{sep}*") # similar paths
+ matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
+ i = [int(m.groups()[0]) for m in matches if m] # indices
+ n = max(i) + 1 if i else 2 # increment number
+ return f"{path}{sep}{n}" # update path
diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile
new file mode 100644
index 0000000..0155618
--- /dev/null
+++ b/utils/google_app_engine/Dockerfile
@@ -0,0 +1,25 @@
+FROM gcr.io/google-appengine/python
+
+# Create a virtualenv for dependencies. This isolates these packages from
+# system-level packages.
+# Use -p python3 or -p python3.7 to select python version. Default is version 2.
+RUN virtualenv /env -p python3
+
+# Setting these environment variables are the same as running
+# source /env/bin/activate.
+ENV VIRTUAL_ENV /env
+ENV PATH /env/bin:$PATH
+
+RUN apt-get update && apt-get install -y python-opencv
+
+# Copy the application's requirements.txt and run pip to install all
+# dependencies into the virtualenv.
+ADD requirements.txt /app/requirements.txt
+RUN pip install -r /app/requirements.txt
+
+# Add the application source code.
+ADD . /app
+
+# Run a WSGI server to serve the application. gunicorn must be declared as
+# a dependency in requirements.txt.
+CMD gunicorn -b :$PORT main:app
diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt
new file mode 100644
index 0000000..5fcc305
--- /dev/null
+++ b/utils/google_app_engine/additional_requirements.txt
@@ -0,0 +1,4 @@
+# add these requirements in your app on top of the existing ones
+pip==18.1
+Flask==1.0.2
+gunicorn==19.9.0
diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml
new file mode 100644
index 0000000..ac29d10
--- /dev/null
+++ b/utils/google_app_engine/app.yaml
@@ -0,0 +1,14 @@
+runtime: custom
+env: flex
+
+service: yolov5app
+
+liveness_check:
+ initial_delay_sec: 600
+
+manual_scaling:
+ instances: 1
+resources:
+ cpu: 1
+ memory_gb: 4
+ disk_size_gb: 20
\ No newline at end of file
diff --git a/utils/google_utils.py b/utils/google_utils.py
new file mode 100644
index 0000000..242270c
--- /dev/null
+++ b/utils/google_utils.py
@@ -0,0 +1,115 @@
+# Google utils: https://cloud.google.com/storage/docs/reference/libraries
+
+import os
+import platform
+import subprocess
+import time
+from pathlib import Path
+
+import requests
+import torch
+
+
+def gsutil_getsize(url=''):
+ # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du
+ s = subprocess.check_output('gsutil du %s' % url, shell=True).decode('utf-8')
+ return eval(s.split(' ')[0]) if len(s) else 0 # bytes
+
+
+def attempt_download(weights):
+ # Attempt to download pretrained weights if not found locally
+ weights = str(weights).strip().replace("'", '')
+ file = Path(weights).name.lower()
+
+ msg = weights + ' missing, try downloading from https://github.com/ultralytics/yolov5/releases/'
+ response = requests.get('https://api.github.com/repos/ultralytics/yolov5/releases/latest').json() # github api
+ assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...]
+ redundant = False # second download option
+
+ if file in assets and not os.path.isfile(weights):
+ try: # GitHub
+ tag = response['tag_name'] # i.e. 'v1.0'
+ url = f'https://github.com/ultralytics/yolov5/releases/download/{tag}/{file}'
+ print('Downloading %s to %s...' % (url, weights))
+ torch.hub.download_url_to_file(url, weights)
+ assert os.path.exists(weights) and os.path.getsize(weights) > 1E6 # check
+ except Exception as e: # GCP
+ print('Download error: %s' % e)
+ assert redundant, 'No secondary mirror'
+ url = 'https://storage.googleapis.com/ultralytics/yolov5/ckpt/' + file
+ print('Downloading %s to %s...' % (url, weights))
+ r = os.system('curl -L %s -o %s' % (url, weights)) # torch.hub.download_url_to_file(url, weights)
+ finally:
+ if not (os.path.exists(weights) and os.path.getsize(weights) > 1E6): # check
+ os.remove(weights) if os.path.exists(weights) else None # remove partial downloads
+ print('ERROR: Download failure: %s' % msg)
+ print('')
+ return
+
+
+def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', name='tmp.zip'):
+ # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download()
+ t = time.time()
+ print('Downloading https://drive.google.com/uc?export=download&id=%s as %s... ' % (id, name), end='')
+ os.remove(name) if os.path.exists(name) else None # remove existing
+ os.remove('cookie') if os.path.exists('cookie') else None
+
+ # Attempt file download
+ out = "NUL" if platform.system() == "Windows" else "/dev/null"
+ os.system('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out))
+ if os.path.exists('cookie'): # large file
+ s = 'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)
+ else: # small file
+ s = 'curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)
+ r = os.system(s) # execute, capture return
+ os.remove('cookie') if os.path.exists('cookie') else None
+
+ # Error check
+ if r != 0:
+ os.remove(name) if os.path.exists(name) else None # remove partial
+ print('Download error ') # raise Exception('Download error')
+ return r
+
+ # Unzip if archive
+ if name.endswith('.zip'):
+ print('unzipping... ', end='')
+ os.system('unzip -q %s' % name) # unzip
+ os.remove(name) # remove zip to free space
+
+ print('Done (%.1fs)' % (time.time() - t))
+ return r
+
+
+def get_token(cookie="./cookie"):
+ with open(cookie) as f:
+ for line in f:
+ if "download" in line:
+ return line.split()[-1]
+ return ""
+
+# def upload_blob(bucket_name, source_file_name, destination_blob_name):
+# # Uploads a file to a bucket
+# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python
+#
+# storage_client = storage.Client()
+# bucket = storage_client.get_bucket(bucket_name)
+# blob = bucket.blob(destination_blob_name)
+#
+# blob.upload_from_filename(source_file_name)
+#
+# print('File {} uploaded to {}.'.format(
+# source_file_name,
+# destination_blob_name))
+#
+#
+# def download_blob(bucket_name, source_blob_name, destination_file_name):
+# # Uploads a blob from a bucket
+# storage_client = storage.Client()
+# bucket = storage_client.get_bucket(bucket_name)
+# blob = bucket.blob(source_blob_name)
+#
+# blob.download_to_filename(destination_file_name)
+#
+# print('Blob {} downloaded to {}.'.format(
+# source_blob_name,
+# destination_file_name))
diff --git a/utils/loss.py b/utils/loss.py
new file mode 100644
index 0000000..46051f2
--- /dev/null
+++ b/utils/loss.py
@@ -0,0 +1,205 @@
+# Loss functions
+
+import torch
+import torch.nn as nn
+
+from utils.general import bbox_iou
+from utils.torch_utils import is_parallel
+
+
+def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
+ # return positive, negative label smoothing BCE targets
+ return 1.0 - 0.5 * eps, 0.5 * eps
+
+
+class BCEBlurWithLogitsLoss(nn.Module):
+ # BCEwithLogitLoss() with reduced missing label effects.
+ def __init__(self, alpha=0.05):
+ super(BCEBlurWithLogitsLoss, self).__init__()
+ self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
+ self.alpha = alpha
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ pred = torch.sigmoid(pred) # prob from logits
+ dx = pred - true # reduce only missing label effects
+ # dx = (pred - true).abs() # reduce missing label and false label effects
+ alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
+ loss *= alpha_factor
+ return loss.mean()
+
+
+class FocalLoss(nn.Module):
+ # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
+ super(FocalLoss, self).__init__()
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = loss_fcn.reduction
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+ # p_t = torch.exp(-loss)
+ # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
+
+ # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
+ pred_prob = torch.sigmoid(pred) # prob from logits
+ p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
+ modulating_factor = (1.0 - p_t) ** self.gamma
+ loss *= alpha_factor * modulating_factor
+
+ if self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else: # 'none'
+ return loss
+
+
+class QFocalLoss(nn.Module):
+ # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
+ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
+ super(QFocalLoss, self).__init__()
+ self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = loss_fcn.reduction
+ self.loss_fcn.reduction = 'none' # required to apply FL to each element
+
+ def forward(self, pred, true):
+ loss = self.loss_fcn(pred, true)
+
+ pred_prob = torch.sigmoid(pred) # prob from logits
+ alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
+ modulating_factor = torch.abs(true - pred_prob) ** self.gamma
+ loss *= alpha_factor * modulating_factor
+
+ if self.reduction == 'mean':
+ return loss.mean()
+ elif self.reduction == 'sum':
+ return loss.sum()
+ else: # 'none'
+ return loss
+
+
+def compute_loss(p, targets, model): # predictions, targets, model
+ device = targets.device
+ lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
+ tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
+ h = model.hyp # hyperparameters
+
+ # Define criteria
+ BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) # weight=model.class_weights)
+ BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
+
+ # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
+ cp, cn = smooth_BCE(eps=0.0)
+
+ # Focal loss
+ g = h['fl_gamma'] # focal loss gamma
+ if g > 0:
+ BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
+
+ # Losses
+ nt = 0 # number of targets
+ no = len(p) # number of outputs
+ balance = [4.0, 1.0, 0.3, 0.1, 0.03] # P3-P7
+ for i, pi in enumerate(p): # layer index, layer predictions
+ b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
+ tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
+
+ n = b.shape[0] # number of targets
+ if n:
+ nt += n # cumulative targets
+ ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
+
+ # Regression
+ pxy = ps[:, :2].sigmoid() * 2. - 0.5
+ pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
+ pbox = torch.cat((pxy, pwh), 1) # predicted box
+ iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
+ lbox += (1.0 - iou).mean() # iou loss
+
+ # Objectness
+ tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
+
+ # Classification
+ if model.nc > 1: # cls loss (only if multiple classes)
+ t = torch.full_like(ps[:, 5:], cn, device=device) # targets
+ t[range(n), tcls[i]] = cp
+ lcls += BCEcls(ps[:, 5:], t) # BCE
+
+ # Append targets to text file
+ # with open('targets.txt', 'a') as file:
+ # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
+
+ lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
+
+ s = 3 / no # output count scaling
+ lbox *= h['box'] * s
+ lobj *= h['obj']
+ lcls *= h['cls'] * s
+ bs = tobj.shape[0] # batch size
+
+ loss = lbox + lobj + lcls
+ return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
+
+
+def build_targets(p, targets, model):
+ # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
+ det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
+ na, nt = det.na, targets.shape[0] # number of anchors, targets
+ tcls, tbox, indices, anch = [], [], [], []
+ gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
+ ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
+ targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
+
+ g = 0.5 # bias
+ off = torch.tensor([[0, 0],
+ [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
+ # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
+ ], device=targets.device).float() * g # offsets
+
+ for i in range(det.nl):
+ anchors = det.anchors[i]
+ gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
+
+ # Match targets to anchors
+ t = targets * gain
+ if nt:
+ # Matches
+ r = t[:, :, 4:6] / anchors[:, None] # wh ratio
+ j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
+ # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
+ t = t[j] # filter
+
+ # Offsets
+ gxy = t[:, 2:4] # grid xy
+ gxi = gain[[2, 3]] - gxy # inverse
+ j, k = ((gxy % 1. < g) & (gxy > 1.)).T
+ l, m = ((gxi % 1. < g) & (gxi > 1.)).T
+ j = torch.stack((torch.ones_like(j), j, k, l, m))
+ t = t.repeat((5, 1, 1))[j]
+ offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
+ else:
+ t = targets[0]
+ offsets = 0
+
+ # Define
+ b, c = t[:, :2].long().T # image, class
+ gxy = t[:, 2:4] # grid xy
+ gwh = t[:, 4:6] # grid wh
+ gij = (gxy - offsets).long()
+ gi, gj = gij.T # grid xy indices
+
+ # Append
+ a = t[:, 6].long() # anchor indices
+ indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
+ tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
+ anch.append(anchors[a]) # anchors
+ tcls.append(c) # class
+
+ return tcls, tbox, indices, anch
diff --git a/utils/metrics.py b/utils/metrics.py
new file mode 100644
index 0000000..99d5bcf
--- /dev/null
+++ b/utils/metrics.py
@@ -0,0 +1,200 @@
+# Model validation metrics
+
+from pathlib import Path
+
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+
+from . import general
+
+
+def fitness(x):
+ # Model fitness as a weighted combination of metrics
+ w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
+ return (x[:, :4] * w).sum(1)
+
+
+def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
+ """ Compute the average precision, given the recall and precision curves.
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
+ # Arguments
+ tp: True positives (nparray, nx1 or nx10).
+ conf: Objectness value from 0-1 (nparray).
+ pred_cls: Predicted object classes (nparray).
+ target_cls: True object classes (nparray).
+ plot: Plot precision-recall curve at mAP@0.5
+ save_dir: Plot save directory
+ # Returns
+ The average precision as computed in py-faster-rcnn.
+ """
+
+ # Sort by objectness
+ i = np.argsort(-conf)
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
+
+ # Find unique classes
+ unique_classes = np.unique(target_cls)
+
+ # Create Precision-Recall curve and compute AP for each class
+ px, py = np.linspace(0, 1, 1000), [] # for plotting
+ pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
+ s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
+ ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
+ for ci, c in enumerate(unique_classes):
+ i = pred_cls == c
+ n_l = (target_cls == c).sum() # number of labels
+ n_p = i.sum() # number of predictions
+
+ if n_p == 0 or n_l == 0:
+ continue
+ else:
+ # Accumulate FPs and TPs
+ fpc = (1 - tp[i]).cumsum(0)
+ tpc = tp[i].cumsum(0)
+
+ # Recall
+ recall = tpc / (n_l + 1e-16) # recall curve
+ r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
+
+ # Precision
+ precision = tpc / (tpc + fpc) # precision curve
+ p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
+
+ # AP from recall-precision curve
+ for j in range(tp.shape[1]):
+ ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
+ if plot and (j == 0):
+ py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
+
+ # Compute F1 score (harmonic mean of precision and recall)
+ f1 = 2 * p * r / (p + r + 1e-16)
+
+ if plot:
+ plot_pr_curve(px, py, ap, save_dir, names)
+
+ return p, r, ap, f1, unique_classes.astype('int32')
+
+
+def compute_ap(recall, precision):
+ """ Compute the average precision, given the recall and precision curves
+ # Arguments
+ recall: The recall curve (list)
+ precision: The precision curve (list)
+ # Returns
+ Average precision, precision curve, recall curve
+ """
+
+ # Append sentinel values to beginning and end
+ mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01]))
+ mpre = np.concatenate(([1.], precision, [0.]))
+
+ # Compute the precision envelope
+ mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
+
+ # Integrate area under curve
+ method = 'interp' # methods: 'continuous', 'interp'
+ if method == 'interp':
+ x = np.linspace(0, 1, 101) # 101-point interp (COCO)
+ ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
+ else: # 'continuous'
+ i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
+
+ return ap, mpre, mrec
+
+
+class ConfusionMatrix:
+ # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
+ def __init__(self, nc, conf=0.25, iou_thres=0.45):
+ self.matrix = np.zeros((nc + 1, nc + 1))
+ self.nc = nc # number of classes
+ self.conf = conf
+ self.iou_thres = iou_thres
+
+ def process_batch(self, detections, labels):
+ """
+ Return intersection-over-union (Jaccard index) of boxes.
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
+ Arguments:
+ detections (Array[N, 6]), x1, y1, x2, y2, conf, class
+ labels (Array[M, 5]), class, x1, y1, x2, y2
+ Returns:
+ None, updates confusion matrix accordingly
+ """
+ detections = detections[detections[:, 4] > self.conf]
+ gt_classes = labels[:, 0].int()
+ detection_classes = detections[:, 5].int()
+ iou = general.box_iou(labels[:, 1:], detections[:, :4])
+
+ x = torch.where(iou > self.iou_thres)
+ if x[0].shape[0]:
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
+ if x[0].shape[0] > 1:
+ matches = matches[matches[:, 2].argsort()[::-1]]
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
+ matches = matches[matches[:, 2].argsort()[::-1]]
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
+ else:
+ matches = np.zeros((0, 3))
+
+ n = matches.shape[0] > 0
+ m0, m1, _ = matches.transpose().astype(np.int16)
+ for i, gc in enumerate(gt_classes):
+ j = m0 == i
+ if n and sum(j) == 1:
+ self.matrix[gc, detection_classes[m1[j]]] += 1 # correct
+ else:
+ self.matrix[gc, self.nc] += 1 # background FP
+
+ if n:
+ for i, dc in enumerate(detection_classes):
+ if not any(m1 == i):
+ self.matrix[self.nc, dc] += 1 # background FN
+
+ def matrix(self):
+ return self.matrix
+
+ def plot(self, save_dir='', names=()):
+ try:
+ import seaborn as sn
+
+ array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize
+ array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
+
+ fig = plt.figure(figsize=(12, 9), tight_layout=True)
+ sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
+ labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
+ sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
+ xticklabels=names + ['background FN'] if labels else "auto",
+ yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1))
+ fig.axes[0].set_xlabel('True')
+ fig.axes[0].set_ylabel('Predicted')
+ fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
+ except Exception as e:
+ pass
+
+ def print(self):
+ for i in range(self.nc + 1):
+ print(' '.join(map(str, self.matrix[i])))
+
+
+# Plots ----------------------------------------------------------------------------------------------------------------
+
+def plot_pr_curve(px, py, ap, save_dir='.', names=()):
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
+ py = np.stack(py, axis=1)
+
+ if 0 < len(names) < 21: # show mAP in legend if < 10 classes
+ for i, y in enumerate(py.T):
+ ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision)
+ else:
+ ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
+
+ ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
+ ax.set_xlabel('Recall')
+ ax.set_ylabel('Precision')
+ ax.set_xlim(0, 1)
+ ax.set_ylim(0, 1)
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
+ fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250)
diff --git a/utils/plots.py b/utils/plots.py
new file mode 100644
index 0000000..c883ea2
--- /dev/null
+++ b/utils/plots.py
@@ -0,0 +1,413 @@
+# Plotting utils
+
+import glob
+import math
+import os
+import random
+from copy import copy
+from pathlib import Path
+
+import cv2
+import matplotlib
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import seaborn as sns
+import torch
+import yaml
+from PIL import Image, ImageDraw
+from scipy.signal import butter, filtfilt
+
+from utils.general import xywh2xyxy, xyxy2xywh
+from utils.metrics import fitness
+
+# Settings
+matplotlib.rc('font', **{'size': 11})
+matplotlib.use('Agg') # for writing to files only
+
+
+def color_list():
+ # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
+ def hex2rgb(h):
+ return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
+
+ return [hex2rgb(h) for h in plt.rcParams['axes.prop_cycle'].by_key()['color']]
+
+
+def hist2d(x, y, n=100):
+ # 2d histogram used in labels.png and evolve.png
+ xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
+ hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
+ xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
+ yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
+ return np.log(hist[xidx, yidx])
+
+
+def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
+ # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
+ def butter_lowpass(cutoff, fs, order):
+ nyq = 0.5 * fs
+ normal_cutoff = cutoff / nyq
+ return butter(order, normal_cutoff, btype='low', analog=False)
+
+ b, a = butter_lowpass(cutoff, fs, order=order)
+ return filtfilt(b, a, data) # forward-backward filter
+
+
+def plot_one_box(x, img, color=None, label=None, line_thickness=None):
+ # Plots one bounding box on image img
+ tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
+ color = color or [random.randint(0, 255) for _ in range(3)]
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
+ cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+ if label:
+ tf = max(tl - 1, 1) # font thickness
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+ cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
+
+
+def plot_wh_methods(): # from utils.plots import *; plot_wh_methods()
+ # Compares the two methods for width-height anchor multiplication
+ # https://github.com/ultralytics/yolov3/issues/168
+ x = np.arange(-4.0, 4.0, .1)
+ ya = np.exp(x)
+ yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
+
+ fig = plt.figure(figsize=(6, 3), tight_layout=True)
+ plt.plot(x, ya, '.-', label='YOLOv3')
+ plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
+ plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
+ plt.xlim(left=-4, right=4)
+ plt.ylim(bottom=0, top=6)
+ plt.xlabel('input')
+ plt.ylabel('output')
+ plt.grid()
+ plt.legend()
+ fig.savefig('comparison.png', dpi=200)
+
+
+def output_to_target(output):
+ # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
+ targets = []
+ for i, o in enumerate(output):
+ for *box, conf, cls in o.cpu().numpy():
+ targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])
+ return np.array(targets)
+
+
+def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
+ # Plot image grid with labels
+
+ if isinstance(images, torch.Tensor):
+ images = images.cpu().float().numpy()
+ if isinstance(targets, torch.Tensor):
+ targets = targets.cpu().numpy()
+
+ # un-normalise
+ if np.max(images[0]) <= 1:
+ images *= 255
+
+ tl = 3 # line thickness
+ tf = max(tl - 1, 1) # font thickness
+ bs, _, h, w = images.shape # batch size, _, height, width
+ bs = min(bs, max_subplots) # limit plot images
+ ns = np.ceil(bs ** 0.5) # number of subplots (square)
+
+ # Check if we should resize
+ scale_factor = max_size / max(h, w)
+ if scale_factor < 1:
+ h = math.ceil(scale_factor * h)
+ w = math.ceil(scale_factor * w)
+
+ colors = color_list() # list of colors
+ mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
+ for i, img in enumerate(images):
+ if i == max_subplots: # if last batch has fewer images than we expect
+ break
+
+ block_x = int(w * (i // ns))
+ block_y = int(h * (i % ns))
+
+ img = img.transpose(1, 2, 0)
+ if scale_factor < 1:
+ img = cv2.resize(img, (w, h))
+
+ mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
+ if len(targets) > 0:
+ image_targets = targets[targets[:, 0] == i]
+ boxes = xywh2xyxy(image_targets[:, 2:6]).T
+ classes = image_targets[:, 1].astype('int')
+ labels = image_targets.shape[1] == 6 # labels if no conf column
+ conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)
+
+ if boxes.shape[1]:
+ if boxes.max() <= 1.01: # if normalized with tolerance 0.01
+ boxes[[0, 2]] *= w # scale to pixels
+ boxes[[1, 3]] *= h
+ elif scale_factor < 1: # absolute coords need scale if image scales
+ boxes *= scale_factor
+ boxes[[0, 2]] += block_x
+ boxes[[1, 3]] += block_y
+ for j, box in enumerate(boxes.T):
+ cls = int(classes[j])
+ color = colors[cls % len(colors)]
+ cls = names[cls] if names else cls
+ if labels or conf[j] > 0.25: # 0.25 conf thresh
+ label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
+ plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
+
+ # Draw image filename labels
+ if paths:
+ label = Path(paths[i]).name[:40] # trim to 40 char
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+ cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
+ lineType=cv2.LINE_AA)
+
+ # Image border
+ cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
+
+ if fname:
+ r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size
+ mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
+ # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
+ Image.fromarray(mosaic).save(fname) # PIL save
+ return mosaic
+
+
+def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
+ # Plot LR simulating training for full epochs
+ optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
+ y = []
+ for _ in range(epochs):
+ scheduler.step()
+ y.append(optimizer.param_groups[0]['lr'])
+ plt.plot(y, '.-', label='LR')
+ plt.xlabel('epoch')
+ plt.ylabel('LR')
+ plt.grid()
+ plt.xlim(0, epochs)
+ plt.ylim(0)
+ plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
+ plt.close()
+
+
+def plot_test_txt(): # from utils.plots import *; plot_test()
+ # Plot test.txt histograms
+ x = np.loadtxt('test.txt', dtype=np.float32)
+ box = xyxy2xywh(x[:, :4])
+ cx, cy = box[:, 0], box[:, 1]
+
+ fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
+ ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
+ ax.set_aspect('equal')
+ plt.savefig('hist2d.png', dpi=300)
+
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
+ ax[0].hist(cx, bins=600)
+ ax[1].hist(cy, bins=600)
+ plt.savefig('hist1d.png', dpi=200)
+
+
+def plot_targets_txt(): # from utils.plots import *; plot_targets_txt()
+ # Plot targets.txt histograms
+ x = np.loadtxt('targets.txt', dtype=np.float32).T
+ s = ['x targets', 'y targets', 'width targets', 'height targets']
+ fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(4):
+ ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
+ ax[i].legend()
+ ax[i].set_title(s[i])
+ plt.savefig('targets.jpg', dpi=200)
+
+
+def plot_study_txt(path='study/', x=None): # from utils.plots import *; plot_study_txt()
+ # Plot study.txt generated by test.py
+ fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
+ ax = ax.ravel()
+
+ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
+ for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]:
+ y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
+ x = np.arange(y.shape[1]) if x is None else np.array(x)
+ s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
+ for i in range(7):
+ ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
+ ax[i].set_title(s[i])
+
+ j = y[3].argmax() + 1
+ ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
+ label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
+
+ ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
+ 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
+
+ ax2.grid()
+ ax2.set_xlim(0, 30)
+ ax2.set_ylim(29, 51)
+ ax2.set_yticks(np.arange(30, 55, 5))
+ ax2.set_xlabel('GPU Speed (ms/img)')
+ ax2.set_ylabel('COCO AP val')
+ ax2.legend(loc='lower right')
+ plt.savefig('test_study.png', dpi=300)
+
+
+def plot_labels(labels, save_dir=Path(''), loggers=None):
+ # plot dataset labels
+ print('Plotting labels... ')
+ c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
+ nc = int(c.max() + 1) # number of classes
+ colors = color_list()
+ x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
+
+ # seaborn correlogram
+ sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))
+ plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)
+ plt.close()
+
+ # matplotlib labels
+ matplotlib.use('svg') # faster
+ ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()
+ ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
+ ax[0].set_xlabel('classes')
+ sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)
+ sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)
+
+ # rectangles
+ labels[:, 1:3] = 0.5 # center
+ labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000
+ img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)
+ for cls, *box in labels[:1000]:
+ ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot
+ ax[1].imshow(img)
+ ax[1].axis('off')
+
+ for a in [0, 1, 2, 3]:
+ for s in ['top', 'right', 'left', 'bottom']:
+ ax[a].spines[s].set_visible(False)
+
+ plt.savefig(save_dir / 'labels.jpg', dpi=200)
+ matplotlib.use('Agg')
+ plt.close()
+
+ # loggers
+ for k, v in loggers.items() or {}:
+ if k == 'wandb' and v:
+ v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]})
+
+
+def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()
+ # Plot hyperparameter evolution results in evolve.txt
+ with open(yaml_file) as f:
+ hyp = yaml.load(f, Loader=yaml.FullLoader)
+ x = np.loadtxt('evolve.txt', ndmin=2)
+ f = fitness(x)
+ # weights = (f - f.min()) ** 2 # for weighted results
+ plt.figure(figsize=(10, 12), tight_layout=True)
+ matplotlib.rc('font', **{'size': 8})
+ for i, (k, v) in enumerate(hyp.items()):
+ y = x[:, i + 7]
+ # mu = (y * weights).sum() / weights.sum() # best weighted result
+ mu = y[f.argmax()] # best single result
+ plt.subplot(6, 5, i + 1)
+ plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
+ plt.plot(mu, f.max(), 'k+', markersize=15)
+ plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
+ if i % 5 != 0:
+ plt.yticks([])
+ print('%15s: %.3g' % (k, mu))
+ plt.savefig('evolve.png', dpi=200)
+ print('\nPlot saved as evolve.png')
+
+
+def profile_idetection(start=0, stop=0, labels=(), save_dir=''):
+ # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection()
+ ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel()
+ s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS']
+ files = list(Path(save_dir).glob('frames*.txt'))
+ for fi, f in enumerate(files):
+ try:
+ results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows
+ n = results.shape[1] # number of rows
+ x = np.arange(start, min(stop, n) if stop else n)
+ results = results[:, x]
+ t = (results[0] - results[0].min()) # set t0=0s
+ results[0] = x
+ for i, a in enumerate(ax):
+ if i < len(results):
+ label = labels[fi] if len(labels) else f.stem.replace('frames_', '')
+ a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5)
+ a.set_title(s[i])
+ a.set_xlabel('time (s)')
+ # if fi == len(files) - 1:
+ # a.set_ylim(bottom=0)
+ for side in ['top', 'right']:
+ a.spines[side].set_visible(False)
+ else:
+ a.remove()
+ except Exception as e:
+ print('Warning: Plotting error for %s; %s' % (f, e))
+
+ ax[1].legend()
+ plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200)
+
+
+def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay()
+ # Plot training 'results*.txt', overlaying train and val losses
+ s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends
+ t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
+ for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
+ ax = ax.ravel()
+ for i in range(5):
+ for j in [i, i + 5]:
+ y = results[j, x]
+ ax[i].plot(x, y, marker='.', label=s[j])
+ # y_smooth = butter_lowpass_filtfilt(y)
+ # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
+
+ ax[i].set_title(t[i])
+ ax[i].legend()
+ ax[i].set_ylabel(f) if i == 0 else None # add filename
+ fig.savefig(f.replace('.txt', '.png'), dpi=200)
+
+
+def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
+ # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')
+ fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
+ ax = ax.ravel()
+ s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
+ 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']
+ if bucket:
+ # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
+ files = ['results%g.txt' % x for x in id]
+ c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
+ os.system(c)
+ else:
+ files = list(Path(save_dir).glob('results*.txt'))
+ assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
+ for fi, f in enumerate(files):
+ try:
+ results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
+ n = results.shape[1] # number of rows
+ x = range(start, min(stop, n) if stop else n)
+ for i in range(10):
+ y = results[i, x]
+ if i in [0, 1, 2, 5, 6, 7]:
+ y[y == 0] = np.nan # don't show zero loss values
+ # y /= y[0] # normalize
+ label = labels[fi] if len(labels) else f.stem
+ ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)
+ ax[i].set_title(s[i])
+ # if i in [5, 6, 7]: # share train and val loss y axes
+ # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
+ except Exception as e:
+ print('Warning: Plotting error for %s; %s' % (f, e))
+
+ ax[1].legend()
+ fig.savefig(Path(save_dir) / 'results.png', dpi=200)
diff --git a/utils/torch_utils.py b/utils/torch_utils.py
new file mode 100644
index 0000000..75bcb7f
--- /dev/null
+++ b/utils/torch_utils.py
@@ -0,0 +1,284 @@
+# PyTorch utils
+
+import logging
+import math
+import os
+import time
+from contextlib import contextmanager
+from copy import deepcopy
+
+import torch
+import torch.backends.cudnn as cudnn
+import torch.nn as nn
+import torch.nn.functional as F
+import torchvision
+
+try:
+ import thop # for FLOPS computation
+except ImportError:
+ thop = None
+logger = logging.getLogger(__name__)
+
+
+@contextmanager
+def torch_distributed_zero_first(local_rank: int):
+ """
+ Decorator to make all processes in distributed training wait for each local_master to do something.
+ """
+ if local_rank not in [-1, 0]:
+ torch.distributed.barrier()
+ yield
+ if local_rank == 0:
+ torch.distributed.barrier()
+
+
+def init_torch_seeds(seed=0):
+ # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
+ torch.manual_seed(seed)
+ if seed == 0: # slower, more reproducible
+ cudnn.benchmark, cudnn.deterministic = False, True
+ else: # faster, less reproducible
+ cudnn.benchmark, cudnn.deterministic = True, False
+
+
+def select_device(device='', batch_size=None):
+ # device = 'cpu' or '0' or '0,1,2,3'
+ s = f'Using torch {torch.__version__} ' # string
+ cpu = device.lower() == 'cpu'
+ if cpu:
+ os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
+ elif device: # non-cpu device requested
+ os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
+ assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
+
+ cuda = torch.cuda.is_available() and not cpu
+ if cuda:
+ n = torch.cuda.device_count()
+ if n > 1 and batch_size: # check that batch_size is compatible with device_count
+ assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
+ space = ' ' * len(s)
+ for i, d in enumerate(device.split(',') if device else range(n)):
+ p = torch.cuda.get_device_properties(i)
+ s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
+ else:
+ s += 'CPU'
+
+ logger.info(f'{s}\n') # skip a line
+ return torch.device('cuda:0' if cuda else 'cpu')
+
+
+def time_synchronized():
+ # pytorch-accurate time
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ return time.time()
+
+
+def profile(x, ops, n=100, device=None):
+ # profile a pytorch module or list of modules. Example usage:
+ # x = torch.randn(16, 3, 640, 640) # input
+ # m1 = lambda x: x * torch.sigmoid(x)
+ # m2 = nn.SiLU()
+ # profile(x, [m1, m2], n=100) # profile speed over 100 iterations
+
+ device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+ x = x.to(device)
+ x.requires_grad = True
+ print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
+ print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
+ for m in ops if isinstance(ops, list) else [ops]:
+ m = m.to(device) if hasattr(m, 'to') else m # device
+ m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
+ dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
+ try:
+ flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
+ except:
+ flops = 0
+
+ for _ in range(n):
+ t[0] = time_synchronized()
+ y = m(x)
+ t[1] = time_synchronized()
+ try:
+ _ = y.sum().backward()
+ t[2] = time_synchronized()
+ except: # no backward method
+ t[2] = float('nan')
+ dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
+ dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
+
+ s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
+ s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
+ p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
+ print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
+
+
+def is_parallel(model):
+ return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
+
+
+def intersect_dicts(da, db, exclude=()):
+ # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
+ return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
+
+
+def initialize_weights(model):
+ for m in model.modules():
+ t = type(m)
+ if t is nn.Conv2d:
+ pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ elif t is nn.BatchNorm2d:
+ m.eps = 1e-3
+ m.momentum = 0.03
+ elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
+ m.inplace = True
+
+
+def find_modules(model, mclass=nn.Conv2d):
+ # Finds layer indices matching module class 'mclass'
+ return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
+
+
+def sparsity(model):
+ # Return global model sparsity
+ a, b = 0., 0.
+ for p in model.parameters():
+ a += p.numel()
+ b += (p == 0).sum()
+ return b / a
+
+
+def prune(model, amount=0.3):
+ # Prune model to requested global sparsity
+ import torch.nn.utils.prune as prune
+ print('Pruning model... ', end='')
+ for name, m in model.named_modules():
+ if isinstance(m, nn.Conv2d):
+ prune.l1_unstructured(m, name='weight', amount=amount) # prune
+ prune.remove(m, 'weight') # make permanent
+ print(' %.3g global sparsity' % sparsity(model))
+
+
+def fuse_conv_and_bn(conv, bn):
+ # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
+ fusedconv = nn.Conv2d(conv.in_channels,
+ conv.out_channels,
+ kernel_size=conv.kernel_size,
+ stride=conv.stride,
+ padding=conv.padding,
+ groups=conv.groups,
+ bias=True).requires_grad_(False).to(conv.weight.device)
+
+ # prepare filters
+ w_conv = conv.weight.clone().view(conv.out_channels, -1)
+ w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
+ fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
+
+ # prepare spatial bias
+ b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
+ b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
+ fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
+
+ return fusedconv
+
+
+def model_info(model, verbose=False, img_size=640):
+ # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
+ n_p = sum(x.numel() for x in model.parameters()) # number parameters
+ n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
+ if verbose:
+ print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
+ for i, (name, p) in enumerate(model.named_parameters()):
+ name = name.replace('module_list.', '')
+ print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
+ (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
+
+ try: # FLOPS
+ from thop import profile
+ stride = int(model.stride.max()) if hasattr(model, 'stride') else 32
+ img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
+ flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
+ img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
+ fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
+ except (ImportError, Exception):
+ fs = ''
+
+ logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
+
+
+def load_classifier(name='resnet101', n=2):
+ # Loads a pretrained model reshaped to n-class output
+ model = torchvision.models.__dict__[name](pretrained=True)
+
+ # ResNet model properties
+ # input_size = [3, 224, 224]
+ # input_space = 'RGB'
+ # input_range = [0, 1]
+ # mean = [0.485, 0.456, 0.406]
+ # std = [0.229, 0.224, 0.225]
+
+ # Reshape output to n classes
+ filters = model.fc.weight.shape[1]
+ model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
+ model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
+ model.fc.out_features = n
+ return model
+
+
+def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
+ # scales img(bs,3,y,x) by ratio constrained to gs-multiple
+ if ratio == 1.0:
+ return img
+ else:
+ h, w = img.shape[2:]
+ s = (int(h * ratio), int(w * ratio)) # new size
+ img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
+ if not same_shape: # pad/crop img
+ h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
+ return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
+
+
+def copy_attr(a, b, include=(), exclude=()):
+ # Copy attributes from b to a, options to only include [...] and to exclude [...]
+ for k, v in b.__dict__.items():
+ if (len(include) and k not in include) or k.startswith('_') or k in exclude:
+ continue
+ else:
+ setattr(a, k, v)
+
+
+class ModelEMA:
+ """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
+ Keep a moving average of everything in the model state_dict (parameters and buffers).
+ This is intended to allow functionality like
+ https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
+ A smoothed version of the weights is necessary for some training schemes to perform well.
+ This class is sensitive where it is initialized in the sequence of model init,
+ GPU assignment and distributed training wrappers.
+ """
+
+ def __init__(self, model, decay=0.9999, updates=0):
+ # Create EMA
+ self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
+ # if next(model.parameters()).device.type != 'cpu':
+ # self.ema.half() # FP16 EMA
+ self.updates = updates # number of EMA updates
+ self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
+ for p in self.ema.parameters():
+ p.requires_grad_(False)
+
+ def update(self, model):
+ # Update EMA parameters
+ with torch.no_grad():
+ self.updates += 1
+ d = self.decay(self.updates)
+
+ msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
+ for k, v in self.ema.state_dict().items():
+ if v.dtype.is_floating_point:
+ v *= d
+ v += (1. - d) * msd[k].detach()
+
+ def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
+ # Update EMA attributes
+ copy_attr(self.ema, model, include, exclude)
diff --git a/weights/download_weights.sh b/weights/download_weights.sh
new file mode 100644
index 0000000..43c8e31
--- /dev/null
+++ b/weights/download_weights.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Download latest models from https://github.com/ultralytics/yolov5/releases
+# Usage:
+# $ bash weights/download_weights.sh
+
+python - <