@@ -1,6 +1,6 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="PublishConfigData" serverName="th@192.168.11.8:32178" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<serverData> | |||
<paths name="10.21"> | |||
<serverdata> | |||
@@ -46,13 +46,6 @@ | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="th@192.168.11.8:32178"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/th/tuo_heng/dev/tuoheng_alg" local="$PROJECT_DIR$" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
</serverData> | |||
</component> | |||
</project> |
@@ -6,7 +6,6 @@ | |||
<sshConfig authType="PASSWORD" host="192.168.10.21" id="adf5e1da-4910-4668-bfbb-432f4e2ae77c" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" host="192.168.10.22" id="ac18a75e-ff42-4875-a5da-ad98d2d695ea" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" connectionConfig="{"serverAliveInterval":300}" host="192.168.10.66" id="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" port="32782" nameFormat="DESCRIPTIVE" username="root" /> | |||
<sshConfig authType="PASSWORD" host="192.168.11.8" id="34e9c3c2-edbc-42f0-8c89-cb75bfdf55e1" port="32178" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" host="192.168.11.7" id="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
</configs> | |||
</component> |
@@ -31,9 +31,9 @@ | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="b761b5c5-5f66-4c6a-ad49-4783ff5df619" name="192.168.11.8"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="34e9c3c2-edbc-42f0-8c89-cb75bfdf55e1" sshConfig="th@192.168.11.8:32178 password"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="080a8ea2-04ef-404c-8202-a30cad7668a2" sshConfig="th@192.168.11.8:32178 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> |
@@ -5,15 +5,13 @@ | |||
</component> | |||
<component name="ChangeListManager"> | |||
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes"> | |||
<change afterPath="$PROJECT_DIR$/test/__init__.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/deployment.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/sshConfigs.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/sshConfigs.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/webServers.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/webServers.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/common/Constant.py" beforeDir="false" afterPath="$PROJECT_DIR$/common/Constant.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FeedbackThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FeedbackThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FileUpdateThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FileUploadThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FileUploadThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FileUploadThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/PullStreamThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullStreamThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/dsp_application.yml" beforeDir="false" afterPath="$PROJECT_DIR$/dsp_application.yml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/dsp_master.py" beforeDir="false" afterPath="$PROJECT_DIR$/dsp_master.py" afterDir="false" /> | |||
@@ -21,20 +19,14 @@ | |||
<change beforePath="$PROJECT_DIR$/enums/BaiduSdkEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/BaiduSdkEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ExceptionEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ExceptionEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/exception/CustomerException.py" beforeDir="false" afterPath="$PROJECT_DIR$/exception/CustomerException.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg2.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg2.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/gpu/gputest.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/gpu/gputest.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/路径/Test.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/路径/Test.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/集合/test.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/集合/test.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/AliyunSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/AliyunSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/Cv2Utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/Cv2Utils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/FileUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/FileUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/GPUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/GPUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ImageUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ImageUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ImgBaiduSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ImgBaiduSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/KafkaUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/KafkaUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/LogUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/LogUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/OcrBaiduSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/OcrBaiduSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/YmlUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/YmlUtils.py" afterDir="false" /> | |||
</list> | |||
<option name="SHOW_DIALOG" value="false" /> | |||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> | |||
@@ -163,7 +155,7 @@ | |||
"WebServerToolWindowPanel.toolwindow.show.date": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.permissions": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.size": "false", | |||
"last_opened_file_path": "D:/tuoheng/codenew/tuoheng_alg", | |||
"last_opened_file_path": "D:/tuoheng/codenew/tuoheng_dsp", | |||
"node.js.detected.package.eslint": "true", | |||
"node.js.detected.package.tslint": "true", | |||
"node.js.selected.package.eslint": "(autodetect)", | |||
@@ -177,20 +169,21 @@ | |||
}]]></component> | |||
<component name="RecentsManager"> | |||
<key name="CopyFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\cuda" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\util" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\aliyun" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\enums" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\.idea" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\demo" /> | |||
<recent name="D:\tuoheng\code\tuoheng_alg" /> | |||
<recent name="D:\tuoheng\code\tuoheng_alg\concurrency" /> | |||
</key> | |||
<key name="MoveFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\font" /> | |||
<recent name="D:\work\alg_new\tuoheng_alg\test\image" /> | |||
<recent name="D:\work\alg\tuoheng_alg\test\水印" /> | |||
<recent name="D:\work\alg\tuoheng_alg\image" /> | |||
</key> | |||
</component> | |||
<component name="RunManager" selected="Python.Test (2)"> | |||
<configuration name="协程笔记" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<component name="RunManager" selected="Python.test"> | |||
<configuration name="Test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
@@ -198,12 +191,12 @@ | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/协程" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/序列化" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/协程/协程笔记.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/序列化/Test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -212,20 +205,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="Test (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/线程" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/线程/Test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -234,7 +227,7 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="Test (2)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="gputest" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
@@ -242,12 +235,12 @@ | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/路径" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/gpu" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/路径/Test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/gpu/gputest.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -256,7 +249,7 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="Test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="mysqltest" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
@@ -264,12 +257,12 @@ | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/validate" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/validate/Test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/mysqltest.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -278,20 +271,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<configuration name="test (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/cuda" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/cuda/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -300,7 +293,7 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="mysqltest" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
@@ -308,12 +301,12 @@ | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/集合" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/mysqltest.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/集合/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -322,7 +315,7 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="test1" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
@@ -330,12 +323,12 @@ | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/集合" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/cuda" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/集合/test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/cuda/test1.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -347,18 +340,18 @@ | |||
<list> | |||
<item itemvalue="Python.editImage" /> | |||
<item itemvalue="Python.mysqltest" /> | |||
<item itemvalue="Python.Test (2)" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.Test (1)" /> | |||
<item itemvalue="Python.协程笔记" /> | |||
<item itemvalue="Python.gputest" /> | |||
<item itemvalue="Python.test1" /> | |||
<item itemvalue="Python.test (1)" /> | |||
<item itemvalue="Python.Test" /> | |||
</list> | |||
<recent_temporary> | |||
<list> | |||
<item itemvalue="Python.Test (2)" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.Test (1)" /> | |||
<item itemvalue="Python.协程笔记" /> | |||
<item itemvalue="Python.test1" /> | |||
<item itemvalue="Python.test (1)" /> | |||
<item itemvalue="Python.gputest" /> | |||
<item itemvalue="Python.Test" /> | |||
</list> | |||
</recent_temporary> | |||
@@ -500,7 +493,15 @@ | |||
<workItem from="1681301257655" duration="429000" /> | |||
<workItem from="1681344786746" duration="5993000" /> | |||
<workItem from="1681363389283" duration="5626000" /> | |||
<workItem from="1681431288218" duration="1010000" /> | |||
<workItem from="1681431288218" duration="25974000" /> | |||
<workItem from="1681690599771" duration="2894000" /> | |||
<workItem from="1681696465772" duration="30396000" /> | |||
<workItem from="1681826261843" duration="1474000" /> | |||
<workItem from="1681863254347" duration="13207000" /> | |||
<workItem from="1681950317514" duration="23460000" /> | |||
<workItem from="1682036333722" duration="651000" /> | |||
<workItem from="1682405963588" duration="37651000" /> | |||
<workItem from="1682554149580" duration="33819000" /> | |||
</task> | |||
<servers /> | |||
</component> | |||
@@ -532,9 +533,14 @@ | |||
<option name="timeStamp" value="2" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py</url> | |||
<line>341</line> | |||
<option name="timeStamp" value="3" /> | |||
<url>file://$PROJECT_DIR$/test/aliyun/ossdemo.py</url> | |||
<line>4</line> | |||
<option name="timeStamp" value="4" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/util/Cv2Utils.py</url> | |||
<line>1</line> | |||
<option name="timeStamp" value="5" /> | |||
</line-breakpoint> | |||
</breakpoints> | |||
</breakpoint-manager> | |||
@@ -544,6 +550,7 @@ | |||
<select /> | |||
</component> | |||
<component name="com.intellij.coverage.CoverageDataManagerImpl"> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$voddemo.coverage" NAME="voddemo 覆盖结果" MODIFIED="1681722102430" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo1.coverage" NAME="demo1 覆盖结果" MODIFIED="1680162882599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/demo" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
@@ -554,10 +561,14 @@ | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg12.coverage" NAME="ffmpeg12 覆盖结果" MODIFIED="1675391366890" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681431899512" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681796501563" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test1.coverage" NAME="test1 覆盖结果" MODIFIED="1681988279624" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/cuda" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ossdemo.coverage" NAME="ossdemo 覆盖结果" MODIFIED="1681715255761" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__1_.coverage" NAME="test (1) 覆盖结果" MODIFIED="1681969578447" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/cuda" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="协程笔记 覆盖结果" MODIFIED="1680926972744" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$gputest.coverage" NAME="gputest 覆盖结果" MODIFIED="1681950938970" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/gpu" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="协程1 覆盖结果" MODIFIED="1667866542122" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$3.coverage" NAME="协程3 覆盖结果" MODIFIED="1668147029048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
@@ -574,8 +585,8 @@ | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master 覆盖结果" MODIFIED="1680503755624" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__1_.coverage" NAME="Test (1) 覆盖结果" MODIFIED="1681199611277" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1681199625806" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1680847539455" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/validate" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1682582986112" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1681810213173" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/序列化" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> |
@@ -1,5 +1,5 @@ | |||
import asyncio | |||
from concurrent.futures import ThreadPoolExecutor | |||
import copy | |||
from concurrent.futures import ThreadPoolExecutor, as_completed | |||
from threading import Thread | |||
from loguru import logger | |||
import cv2 | |||
@@ -7,6 +7,7 @@ from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from util.PlotsUtils import draw_painting_joint | |||
class FileUpload(Thread): | |||
@@ -19,16 +20,19 @@ class FileUpload(Thread): | |||
self.msg = msg | |||
self.similarity = self.content["service"]["filter"]["similarity"] | |||
self.picture_similarity = self.content["service"]["filter"]["picture_similarity"] | |||
self.frame_score = float(self.content["service"]["frame_score"]) | |||
self.frame_step = int(self.content["service"]["filter"]["frame_step"]) | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def build_image_name(self, current_frame, last_frame, mode_type, image_type): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
''' | |||
比如原图检测目标等信息,target用O表示 | |||
''' | |||
def build_image_name(self, current_frame, last_frame, mode_type, image_type, target): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \ | |||
"-{target}-{requestId}_{image_type}.jpg" | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_name = image_format.format( | |||
@@ -38,6 +42,7 @@ class FileUpload(Thread): | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
target=target, | |||
requestId=self.msg.get('request_id'), | |||
image_type=image_type) | |||
return image_name | |||
@@ -64,38 +69,43 @@ class ImageFileUpload(FileUpload): | |||
if diff_frame_num < self.frame_step: | |||
flag = False | |||
det_result = frame_all.get("det_xywh") | |||
model_info = [] | |||
if flag and det_result is not None and len(det_result) > 0: | |||
model_info = [] | |||
# 封装当前图片信息 | |||
num = 0 | |||
for c in list(det_result.keys()): | |||
mode_code_info = {"code": c, "target": []} | |||
det_xywh = det_result.get(c) | |||
det_num = 0 | |||
if det_xywh is not None and len(det_xywh) > 0: | |||
# [float(cls_c), xc,yc,w,h, float(conf_c),code] | |||
for d in det_xywh: | |||
score = d[5] # 得分 | |||
target = str(int(d[0])) # 检测目标 | |||
mode_code_info["target"].append(target) | |||
if score < self.frame_score: | |||
det_num += 1 | |||
model_info.append(mode_code_info) | |||
if det_num != len(det_xywh): | |||
num += 1 | |||
if num == 0: | |||
return None | |||
''' | |||
det_xywh:{ | |||
'code':{ | |||
1: [[detect_targets_code, box, score, label_array, color]] | |||
} | |||
} | |||
模型编号:modeCode | |||
检测目标:detectTargetCode | |||
''' | |||
# 更加模型编码解析数据 | |||
for modelCode in list(det_result.keys()): | |||
# 模型编号下面的检测目标对象 | |||
det_info = det_result.get(modelCode) | |||
if det_info is not None and len(det_info) > 0: | |||
for detectTargetCode in list(det_info.keys()): | |||
target_list = det_info.get(detectTargetCode) | |||
if target_list is not None and len(target_list) > 0: | |||
orFrame = copy.deepcopy(frame_all.get("frame")) | |||
for target in target_list: | |||
draw_painting_joint(target[1], orFrame, target[3], target[2], target[4], "leftTop") | |||
model_info.append({ | |||
"modelCode": str(modelCode), | |||
"detectTargetCode": str(detectTargetCode), | |||
"frame": orFrame | |||
}) | |||
if len(model_info) > 0: | |||
high_score_image["or_frame"] = frame_all.get("frame") | |||
high_score_image["current_frame"] = frame_all.get("cct_frame") | |||
image_result = { | |||
"or_frame": frame_all.get("frame"), | |||
"ai_frame": frame_all.get("ai_frame"), | |||
"model_info": model_info, | |||
"current_frame": frame_all.get("cct_frame"), | |||
"last_frame": frame_all.get("cct_frame") + self.frame_step, | |||
"progress": "", | |||
"mode_service": self.analyse_type, | |||
"model_info": model_info | |||
} | |||
return image_result | |||
return None | |||
@@ -106,7 +116,7 @@ class ImageFileUpload(FileUpload): | |||
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id")) | |||
aliyunOssSdk.get_oss_bucket() | |||
high_score_image = {} | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
with ThreadPoolExecutor(max_workers=5) as t: | |||
try: | |||
while True: | |||
try: | |||
@@ -118,40 +128,46 @@ class ImageFileUpload(FileUpload): | |||
if command == 'stop': | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
image_resut = self.handle_image(high_score_image, image_dict) | |||
if image_resut is not None: | |||
image_result = self.handle_image(high_score_image, image_dict) | |||
if image_result is not None: | |||
# 图片帧数编码 | |||
or_result, or_image = cv2.imencode(".jpg", image_resut.get("or_frame")) | |||
ai_result, ai_image = cv2.imencode(".jpg", image_resut.get("ai_frame")) | |||
# 图片名称待后期修改 | |||
or_image_name = self.build_image_name(str(image_resut.get("current_frame")), | |||
str(image_resut.get("last_frame")), | |||
image_resut.get("mode_service"), | |||
"OR") | |||
ai_image_name = self.build_image_name(str(image_resut.get("current_frame")), | |||
str(image_resut.get("last_frame")), | |||
image_resut.get("mode_service"), | |||
"AI") | |||
task = [] | |||
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame")) | |||
or_image_name = self.build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"OR", "O") | |||
or_future = t.submit(aliyunOssSdk.sync_upload_file, or_image_name, | |||
or_image.tobytes()) | |||
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name, | |||
ai_image.tobytes()) | |||
or_future.result() | |||
ai_future.result() | |||
# 发送kafka消息 | |||
self.sendResult({"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.analyse_type, "", "", | |||
image_resut.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
image_resut.get( | |||
"model_info"), | |||
TimeUtils.now_date_to_str())}) | |||
task.append(or_future) | |||
model_info_list = image_result.get("model_info") | |||
msg_list = [] | |||
for model_info in model_info_list: | |||
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame")) | |||
ai_image_name = self.build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"AI", model_info.get("detectTargetCode")) | |||
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name, | |||
ai_image.tobytes()) | |||
task.append(ai_future) | |||
msg_list.append( | |||
{"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.analyse_type, "", "", | |||
image_result.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
model_info.get('modelCode'), | |||
model_info.get('detectTargetCode'), | |||
TimeUtils.now_date_to_str())}) | |||
for thread_result in as_completed(task): | |||
thread_result.result() | |||
for msg in msg_list: | |||
self.sendResult(msg) | |||
except Exception as e: | |||
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id")) | |||
finally: | |||
high_score_image.clear() | |||
t.shutdown(wait=True) # wait=True等待所有线程完成 | |||
logger.info("停止图片上传线程, requestId:{}", self.msg.get("request_id")) |
@@ -118,9 +118,6 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
@@ -152,6 +149,9 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
continue | |||
init_pull_num = 1 | |||
pull_stream_read_start_time = time.time() | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
"cct_frame": concurrent_frame, |
@@ -132,7 +132,7 @@ service: | |||
picture_similarity: True | |||
# 相似度阀值 | |||
similarity: 0.65 | |||
frame_step: 80 | |||
frame_step: 160 | |||
timeout: 21600 # 一次识别任务超时时间,单位秒,默认6个小时 | |||
cv2_pull_stream_timeout: 3600 # 直播开始视频未推流超时时间 | |||
cv2_read_stream_timeout: 1800 # 直播读流中超时时间 |
@@ -3,7 +3,7 @@ import os | |||
import sys | |||
from service import Dispatcher | |||
# import torch | |||
import torch | |||
''' | |||
dsp主程序入口 | |||
@@ -12,5 +12,5 @@ if __name__ == '__main__': | |||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||
# 获取主程序执行根路径 | |||
base_dir = os.path.dirname(os.path.realpath(sys.argv[0])) | |||
# torch.multiprocessing.set_start_method('spawn') | |||
torch.multiprocessing.set_start_method('spawn') | |||
Dispatcher.DispatcherService(base_dir).start_service() |
@@ -1,5 +1,5 @@ | |||
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="", | |||
model_info=[], analyse_time="", analyse_results=""): | |||
modelCode="", detectTargetCode="", analyse_time="", analyse_results=""): | |||
taskfb = {} | |||
results = [] | |||
result_msg = {} | |||
@@ -12,7 +12,8 @@ def message_feedback(requestId, status, type, error_code="", error_msg="", progr | |||
result_msg["original_url"] = original_url | |||
result_msg["sign_url"] = sign_url | |||
result_msg["analyse_results"] = analyse_results | |||
result_msg["model_info"] = model_info | |||
result_msg["model_code"] = modelCode | |||
result_msg["detect_targets_code"] = detectTargetCode | |||
result_msg["analyse_time"] = analyse_time | |||
results.append(result_msg) | |||
taskfb["results"] = results |
@@ -170,12 +170,12 @@ BAIDUERRORDATA = { | |||
@unique | |||
class VehicleEnum(Enum): | |||
CAR = ("car", "car") | |||
TRICYCLE = ("tricycle", "tricycle") | |||
MOTORBIKE = ("motorbike", "motorbike") | |||
CARPLATE = ("carplate", "carplate") | |||
TRUCK = ("truck", "truck") | |||
BUS = ("bus", "bus") | |||
CAR = ("car", "小汽车", 0) | |||
TRICYCLE = ("tricycle", "三轮车", 1) | |||
MOTORBIKE = ("motorbike", "摩托车", 2) | |||
CARPLATE = ("carplate", "车牌", 3) | |||
TRUCK = ("truck", "卡车", 4) | |||
BUS = ("bus", "巴士", 5) | |||
VehicleEnumVALUE={ |
@@ -13,7 +13,7 @@ class ExceptionType(Enum): | |||
READSTREAM_TIMEOUT_EXCEPTION = ("SP003", "原视频读取视频流超时!") | |||
GET_VIDEO_URL_EXCEPTION = ("SP004", "获取原视频播放地址失败!") | |||
GET_VIDEO_URL_EXCEPTION = ("SP004", "获取视频播放地址失败!") | |||
GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP005", "获取原视频播放地址超时!") | |||
@@ -12,14 +12,13 @@ from enum import Enum, unique | |||
2: 只支持图片模型之间的组合 | |||
''' | |||
@unique | |||
class ModelType(Enum): | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', 0) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', 0) | |||
# TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'road') | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', 0) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, 2) | |||
@@ -36,7 +35,7 @@ class ModelType(Enum): | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', 0) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship', 0) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', 0) | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, 2) | |||
@@ -58,16 +57,17 @@ class ModelType(Enum): | |||
@unique | |||
class BaiduModelTarget(Enum): | |||
VEHICLE_DETECTION = ("车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id)) | |||
VEHICLE_DETECTION = ( | |||
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id)) | |||
HUMAN_DETECTION = ("人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id)) | |||
HUMAN_DETECTION = ( | |||
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id)) | |||
PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id)) | |||
BAIDU_MODEL_TARGET_CONFIG = { | |||
BaiduModelTarget.VEHICLE_DETECTION.value[1]: BaiduModelTarget.VEHICLE_DETECTION, | |||
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION, | |||
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING | |||
} | |||
@@ -1,5 +1,7 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import torch | |||
from cerberus import Validator | |||
from concurrency.FeedbackThread import FeedbackThread | |||
from entity.FeedBack import message_feedback, recording_feedback | |||
@@ -14,6 +16,7 @@ from multiprocessing import Queue | |||
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \ | |||
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess, ScreenRecordingProcess | |||
from util import GPUtils | |||
from util.GPUtils import get_all_gpu_ids, get_first_gpu_name | |||
''' | |||
分发服务 | |||
@@ -21,20 +24,22 @@ from util import GPUtils | |||
class DispatcherService: | |||
''' | |||
初始化 | |||
''' | |||
# 初始化 | |||
def __init__(self, base_dir): | |||
##################################### 初始化alg相关配置 ##################################### | |||
self.base_dir = base_dir | |||
self.content = YmlUtils.getConfigs(base_dir) | |||
self.content['base_dir'] = base_dir | |||
self.feedbackThread = None | |||
self.base_dir = base_dir # 根路径 | |||
self.content = YmlUtils.getConfigs(base_dir) # 获取alg需要使用的配置 | |||
self.content['base_dir'] = base_dir # 将根路径设置到上下文中 | |||
self.feedbackThread = None # 初始化反馈线程对象 | |||
###################################### 初始化日志框架 ###################################### | |||
LogUtils.init_log(self.content) | |||
LogUtils.init_log(self.content) # 初始化日志框架 | |||
#################################### 初始化视频保存文件夹 #################################### | |||
FileUtils.create_dir_not_exist(self.content["video"]["file_path"]) | |||
FileUtils.create_dir_not_exist(self.content["video"]["file_path"]) # 创建文件夹 | |||
###################################### 创建任务记录字典 ###################################### | |||
self.onlineProcesses = {} # 记录当前正在执行的实时流分析任务 | |||
@@ -55,21 +60,36 @@ class DispatcherService: | |||
self.topics = [self.online_topic, self.offline_topic, self.image_topic, self.recording_task_topic] | |||
self.analysisType = { | |||
self.online_topic: (AnalysisType.ONLINE.value, | |||
lambda x: self.online(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
lambda x, y: self.online(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.offline_topic: (AnalysisType.OFFLINE.value, | |||
lambda x: self.offline(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
lambda x, y: self.offline(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.image_topic: (AnalysisType.IMAGE.value, | |||
lambda x: self.image(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
lambda x, y: self.image(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.recording_task_topic: (AnalysisType.RECORDING.value, | |||
lambda x: self.recording(x), | |||
lambda x, y, z, t: self.recording_method(x, y, z, t)) | |||
lambda x, y: self.recording(x, y), | |||
lambda x, y, z: self.recording_method(x, y, z)) | |||
} | |||
####################################### 获取当前显卡设备名称 ###################################### | |||
gpu_name = get_first_gpu_name() | |||
gpu_codes = ['3090', '2080', '4090', 'A10'] | |||
gpu_array = [g for g in gpu_codes if g in gpu_name] | |||
if len(gpu_array) > 0: | |||
self.content['gpu_name'] = gpu_array[0] | |||
if gpu_array[0] == '2080': | |||
self.content['gpu_name'] = '2080Ti' | |||
else: | |||
raise Exception("GPU资源不在提供的模型所支持的范围内!请先提供对应的GPU模型!") | |||
# 服务调用启动方法 | |||
def start_service(self): | |||
if not torch.cuda.is_available(): | |||
raise Exception("cuda不在活动状态, 请检测显卡驱动是否正常!!!!") | |||
# 初始化kafka监听者 | |||
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.content, topics=self.topics) | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
@@ -84,69 +104,94 @@ class DispatcherService: | |||
for k, v in msg.items(): | |||
for m in v: | |||
message = m.value | |||
analysisType = self.analysisType.get(m.topic)[0] | |||
self.analysisType.get(m.topic)[2](customerKafkaConsumer, m, message, analysisType) | |||
# 提交offset | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, | |||
self.getRequestId(message.get("request_id"))) | |||
topic_method = self.analysisType.get(m.topic) | |||
topic_method[2](m, message, topic_method[0]) | |||
else: | |||
time.sleep(1) | |||
except Exception as e: | |||
logger.exception("主线程异常:", e) | |||
def identify_method(self, customerKafkaConsumer, m, message, analysisType): | |||
''' | |||
考虑到requestId为空的场景 | |||
''' | |||
def getRequestId(self, request_id): | |||
if request_id is None or len(request_id) == 0: | |||
return '' | |||
return request_id | |||
''' | |||
实时、离线、图片识别逻辑 | |||
1. m kafka消息体 | |||
2. 请求消息体 | |||
3. 分析类型:实时、离线、图片 | |||
''' | |||
def identify_method(self, m, message, analysisType): | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info( | |||
"当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
self.analysisType.get(m.topic)[1](message, analysisType) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
def recording_method(self, customerKafkaConsumer, m, message, analysisType): | |||
logger.exception("消息监听异常:{}, requestId: {}", e, self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
def recording_method(self, m, message, analysisType): | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info( | |||
"当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
self.analysisType.get(m.topic)[1](message, analysisType) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)}) | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}) | |||
logger.exception("消息监听异常:{}, requestId: {}", e, self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}) | |||
# 开启实时进程 | |||
def startOnlineProcess(self, msg, gpu_ids): | |||
def startOnlineProcess(self, msg, gpu_ids, analysisType): | |||
# 相同的requestId不在执行 | |||
if self.onlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.ONLINE.value} | |||
"analyse_type": analysisType} | |||
# 创建在线识别进程并启动 | |||
oirp = OnlineIntelligentRecognitionProcess(cfg) | |||
oirp.start() | |||
@@ -161,7 +206,9 @@ class DispatcherService: | |||
return | |||
ps.sendEvent({'command': 'stop'}) | |||
# 检查实时、离线进程任务运行情况,去除不活动的任务 | |||
''' | |||
检查实时、离线进程任务运行情况,去除不活动的任务 | |||
''' | |||
def check_process_task(self): | |||
for process in self.listeningProcesses: | |||
for requestId in list(process.keys()): | |||
@@ -169,13 +216,13 @@ class DispatcherService: | |||
del process[requestId] | |||
# 开启离线进程 | |||
def startOfflineProcess(self, msg, gpu_ids): | |||
def startOfflineProcess(self, msg, gpu_ids, analysisType): | |||
# 相同的requestId不在执行 | |||
if self.offlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.OFFLINE.value} | |||
"analyse_type": analysisType} | |||
# 创建在线识别进程并启动 | |||
ofirp = OfflineIntelligentRecognitionProcess(cfg) | |||
ofirp.start() | |||
@@ -190,21 +237,21 @@ class DispatcherService: | |||
ps.sendEvent({'command': 'stop'}) | |||
# 开启图片分析进程 | |||
def startImageProcess(self, msg, gpu_ids): | |||
def startImageProcess(self, msg, gpu_ids, analysisType): | |||
# 相同的requestId不在执行 | |||
pp = self.photoProcesses.get(msg.get("request_id")) | |||
if pp is not None: | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.IMAGE.value} | |||
"analyse_type": analysisType} | |||
# 创建在线识别进程并启动 | |||
imagep = PhotosIntelligentRecognitionProcess(cfg) | |||
imagep.start() | |||
self.photoProcesses[msg.get("request_id")] = imagep | |||
''' | |||
校验kafka消息 | |||
校验kafka消息 | |||
''' | |||
def check_msg(self, msg): | |||
@@ -223,72 +270,62 @@ class DispatcherService: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
''' | |||
开启反馈线程,用于发送消息 | |||
''' | |||
def start_feedback_thread(self): | |||
# 如果反馈线程为空,启动反馈线程,如果反馈线程意外停止,再次启动反馈线程 | |||
if self.feedbackThread is None or not self.feedbackThread.is_alive(): | |||
self.feedbackThread = FeedbackThread(self.fbQueue, self.content) | |||
self.feedbackThread.setDaemon(True) | |||
self.feedbackThread.start() | |||
''' | |||
在线分析逻辑 | |||
''' | |||
def online(self, message): | |||
# 参数校验 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
def online(self, message, analysisType): | |||
if 'start' == message.get("command"): | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startOnlineProcess(message, gpu_ids) | |||
self.startOnlineProcess(message, gpu_ids, analysisType) | |||
elif 'stop' == message.get("command"): | |||
self.stopOnlineProcess(message) | |||
else: | |||
pass | |||
def offline(self, message): | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
def offline(self, message, analysisType): | |||
if 'start' == message.get("command"): | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startOfflineProcess(message, gpu_ids) | |||
self.startOfflineProcess(message, gpu_ids, analysisType) | |||
elif 'stop' == message.get("command"): | |||
self.stopOfflineProcess(message) | |||
else: | |||
pass | |||
def image(self, message): | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
def image(self, message, analysisType): | |||
if 'start' == message.get("command"): | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startImageProcess(message, gpu_ids) | |||
self.startImageProcess(message, gpu_ids, analysisType) | |||
else: | |||
pass | |||
def recording(self, message): | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
def recording(self, message, analysisType): | |||
if 'start' == message.get("command"): | |||
logger.info("开始录屏") | |||
self.startRecordingProcess(message) | |||
self.startRecordingProcess(message, analysisType) | |||
elif 'stop' == message.get("command"): | |||
self.stopRecordingProcess(message) | |||
else: | |||
pass | |||
# 开启录屏进程 | |||
def startRecordingProcess(self, msg): | |||
def startRecordingProcess(self, msg, analysisType): | |||
if self.recordingProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, | |||
"analyse_type": AnalysisType.RECORDING.value} | |||
"analyse_type": analysisType} | |||
srp = ScreenRecordingProcess(cfg) | |||
srp.start() | |||
self.recordingProcesses[msg.get("request_id")] = srp |
@@ -0,0 +1,119 @@ | |||
# -*- coding: utf-8 -*- | |||
import datetime | |||
import cv2 | |||
import oss2 | |||
import time | |||
from loguru import logger | |||
''' | |||
图片上传使用OSS | |||
1. 阿里云对象存储OSS官网地址:https://help.aliyun.com/product/31815.html?spm=a2c4g.32006.0.0.8c546cf0BpkAQ2 | |||
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/32006.html?spm=a2c4g.32006.0.0.66874b78q1pwLa | |||
3. python安装SDK地址: https://help.aliyun.com/document_detail/85288.html?spm=a2c4g.32026.0.0.3f24417coCphWj | |||
4. 安装SDK: pip install oss2 | |||
5. 安装python-devel | |||
安装python-devel | |||
由于SDK需要crcmod库计算CRC校验码,而crcmod依赖Python.h文件,如果系统缺少这个头文件,安装SDK不会失败,但crcmod的C扩展模式安装会失败,因此导致上传、下载等操作效率非常低下。 | |||
如果python-devel包不存在,则首先要安装这个包。 | |||
对于Windows系统和Mac OS X系统,由于安装Python的时候会将Python依赖的头文件一并安装,因此您无需安装python-devel。 | |||
对于CentOS、RHEL、Fedora系统,请执行以下命令安装python-devel。 | |||
sudo yum install python-devel | |||
对于Debian,Ubuntu系统,请执行以下命令安装python-devel。 | |||
sudo apt-get install python-dev | |||
6、图片域名地址:https://image.t-aaron.com/ | |||
''' | |||
class AliyunOssSdk: | |||
def __init__(self): | |||
self.__client = None | |||
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9' | |||
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk' | |||
self.__endpoint = 'http://oss-cn-shanghai.aliyuncs.com' | |||
self.__bucket = 'ta-tech-image' | |||
def get_oss_bucket(self): | |||
if not self.__client: | |||
auth = oss2.Auth(self.__access_key, self.__access_secret) | |||
self.__client = oss2.Bucket(auth, self.__endpoint, self.__bucket, connect_timeout=30) | |||
def upload_file(self, updatePath, fileByte): | |||
logger.info("开始上传文件到oss!") | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
self.get_oss_bucket() | |||
result = self.__client.put_object(updatePath, fileByte) | |||
return result | |||
logger.info("上传文件到oss成功!") | |||
break | |||
except Exception as e: | |||
self.__client = None | |||
retry_count += 1 | |||
time.sleep(1) | |||
logger.info("上传文件到oss失败, 重试次数:{}", retry_count) | |||
if retry_count > MAX_RETRIES: | |||
logger.exception("上传文件到oss重试失败:{}", e) | |||
raise e | |||
YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S" | |||
YMDHMSF = "%Y%m%d%H%M%S%f" | |||
def generate_timestamp(): | |||
"""根据当前时间获取时间戳,返回整数""" | |||
return int(time.time()) | |||
def now_date_to_str(fmt=None): | |||
if fmt is None: | |||
fmt = YY_MM_DD_HH_MM_SS | |||
return datetime.datetime.now().strftime(fmt) | |||
if __name__ == "__main__": | |||
# 初始化oss对象 | |||
ossClient = AliyunOssSdk() | |||
# 读取本地图片 | |||
image_frame = cv2.imread('aaa.jpeg') | |||
or_result, or_image = cv2.imencode(".jpg", image_frame) | |||
# 图片名称命名规则 | |||
# 1、base_dir 基本文件夹名称,由拓恒公司传参 | |||
# 2、time_now 现在的时间 | |||
# 3、current_frame 当前视频的帧数 | |||
# 4、last_frame 如果有跳帧操作, 填写跳帧的步长,如果没有,和current_frame参数保持一致 | |||
# 5、random_num 随机时间字符串 | |||
# 6、mode_type 类型:实时视频直播的方式用(online) 离线视频直播(填写视频地址识别)用(offline) | |||
# 7、requestId 请求id, 拓恒公司传参 | |||
# 8、image_type 原图用(OR) AI识别后的图片用(AI) | |||
random_num = now_date_to_str(YMDHMSF) | |||
time_now = now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
image_name = image_format.format( | |||
base_dir='PWL202304141639429276', | |||
time_now=time_now, | |||
current_frame='0', | |||
last_frame='0', | |||
random_num=random_num, | |||
mode_type='offline', | |||
requestId='111111111111111111', | |||
image_type='OR') | |||
result = ossClient.upload_file(image_name, or_image.tobytes()) | |||
# print('http status: {0}'.format(result.status)) | |||
# # 请求ID。请求ID是本次请求的唯一标识,强烈建议在程序日志中添加此参数。 | |||
# print('request_id: {0}'.format(result.request_id)) | |||
# # ETag是put_object方法返回值特有的属性,用于标识一个Object的内容。 | |||
# print('ETag: {0}'.format(result.etag)) | |||
# # HTTP响应头部。 | |||
# print('date: {0}'.format(result.headers['date'])) | |||
# print(result.__reduce__()) | |||
# 对于图片上传, 上传成功后,直接将image_name给拓恒公司就可以了 | |||
# 如果测试查看图片是否上传成功 | |||
# 可以使用域名拼接 | |||
image_url = 'https://image.t-aaron.com/' + image_name | |||
print(image_url) | |||
# 拓恒公司只需要image_name | |||
@@ -0,0 +1,130 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import json | |||
from aliyunsdkcore.client import AcsClient | |||
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest | |||
from voduploadsdk.AliyunVodUtils import * | |||
from voduploadsdk.AliyunVodUploader import AliyunVodUploader | |||
from voduploadsdk.UploadVideoRequest import UploadVideoRequest | |||
''' | |||
视频上传使用vod | |||
1. 阿里云VOD文档地址:https://help.aliyun.com/product/29932.html?spm=5176.8413026.J_3895079540.5.1b4a1029mXvncc | |||
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/64148.html?spm=a2c4g.64148.0.0.5ae54150jUecEU | |||
4. 安装SDK: | |||
python -m pip install aliyun-python-sdk-core -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-live -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-core-v3 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-vod -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install alibabacloud_vod20170321 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install oss2 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install voduploadsdk -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
5. 视频域名地址:https://vod.play.t-aaron.com/ | |||
''' | |||
class AliyunVodSdk: | |||
def __init__(self): | |||
self.__client = None | |||
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9' | |||
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk' | |||
self.__regionId = "cn-shanghai" | |||
self.__cateId = '1000468340' | |||
def init_vod_client(self): | |||
return AcsClient(self.__access_key, self.__access_secret, self.__regionId, auto_retry=True, max_retry_time=3, | |||
timeout=5) | |||
''' | |||
根据videoId获取视频地址 | |||
''' | |||
def get_play_info(self, videoId): | |||
logger.info("开始获取视频地址,videoId:{}", videoId) | |||
start = time.time() | |||
while True: | |||
try: | |||
clt = self.init_vod_client() | |||
request = GetPlayInfoRequest.GetPlayInfoRequest() | |||
request.set_accept_format('JSON') | |||
request.set_VideoId(videoId) | |||
request.set_AuthTimeout(3600 * 5) | |||
response = json.loads(clt.do_action_with_exception(request)) | |||
play_url = response["PlayInfoList"]["PlayInfo"][0]["PlayURL"] | |||
logger.info("获取视频地址成功,视频地址: {}", play_url) | |||
return play_url | |||
except Exception as e: | |||
logger.error("获取视频地址失败,5秒后重试, requestId: {}") | |||
time.sleep(5) | |||
current_time = time.time() | |||
if "HTTP Status: 403" not in str(e): | |||
logger.exception("获取视频地址失败: {}", e) | |||
raise e | |||
if "HTTP Status: 403" in str(e) and ("UploadFail" in str(e) or "TranscodeFail" in str(e)): | |||
self.logger.exception("获取视频地址失败: {}", e) | |||
raise e | |||
diff_time = current_time - start | |||
if diff_time > 60 * 60 * 2: | |||
logger.exception("获取视频地址失败超时异常: {},超时时间:{}", e, diff_time) | |||
raise e | |||
def upload_local_video(self, filePath, file_title, storageLocation=None): | |||
logger.info("开始执行vod视频上传, filePath: {}", filePath) | |||
uploader = AliyunVodUploader(self.__access_key, self.__access_secret) | |||
uploadVideoRequest = UploadVideoRequest(filePath, file_title) | |||
uploadVideoRequest.setCateId(self.__cateId) | |||
if storageLocation: | |||
uploadVideoRequest.setStorageLocation(storageLocation) | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
result = uploader.uploadLocalVideo(uploadVideoRequest) | |||
logger.info("vod视频上传成功, videoId:{}", result.get("VideoId")) | |||
return result.get("VideoId") | |||
except AliyunVodException as e: | |||
retry_count += 1 | |||
time.sleep(3) | |||
logger.error("vod视频上传失败,重试次数:{}", retry_count) | |||
if retry_count >= MAX_RETRIES: | |||
self.logger.exception("vod视频上传重试失败: {}", e) | |||
raise e | |||
YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S" | |||
YMDHMSF = "%Y%m%d%H%M%S%f" | |||
def generate_timestamp(): | |||
"""根据当前时间获取时间戳,返回整数""" | |||
return int(time.time()) | |||
def now_date_to_str(fmt=None): | |||
if fmt is None: | |||
fmt = YY_MM_DD_HH_MM_SS | |||
return datetime.datetime.now().strftime(fmt) | |||
if __name__ == "__main__": | |||
# 本地原视频命名 | |||
random_time = now_date_to_str(YMDHMSF) | |||
# # 如果是离线视频,将 _on_or_ 替换为 _off_or_ | |||
# orFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_or_", 'requestId', ".mp4") | |||
# # 本地AI识别后的视频命名 | |||
# # 如果是离线视频,将 _on_ai_ 替换为 _off_ai_ | |||
# aiFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_ai_", 'requestId', ".mp4") | |||
# filePath = "%s%s%s%s%s" % ('D:\\shipin\\', random_time, "_on_ai_", '11111111', ".mp4") | |||
filePath = 'D:\\shipin\\777.mp4' | |||
codClinet = AliyunVodSdk() | |||
result = codClinet.upload_local_video(filePath, 'aiOnLineVideo1') | |||
print(result) | |||
url = codClinet.get_play_info(result) | |||
print(url) | |||
@@ -0,0 +1,88 @@ | |||
import time | |||
from pathlib import Path | |||
import GPUtil | |||
import cv2 | |||
import numpy as np | |||
import torch | |||
from PIL import ImageFont, Image, ImageDraw | |||
# print(Path(__file__)) # 表示当前脚本文件的路径 | |||
# print(Path(__file__).parent) # 表示当前路径的父级目录 | |||
# import time | |||
# from contextlib import contextmanager | |||
# | |||
# @contextmanager | |||
# def timer(): | |||
# start_time = time.time() | |||
# yield | |||
# end_time = time.time() | |||
# print('Time elapsed:', end_time - start_time) | |||
# # 使用上下文管理器 | |||
# with timer(): | |||
# time.sleep(1) | |||
# print(torch.cuda.is_available()) | |||
# print(GPUtil.getGPUs()[0].name) | |||
# def get_first_gpu_name(): | |||
# gps = GPUtil.getGPUs() | |||
# if gps is None or len(gps) == 0: | |||
# raise Exception("未获取到gpu资源, 先检测服务器是否已经配置GPU资源!") | |||
# return gps[0].name | |||
# gpu_name = get_first_gpu_name() | |||
# aa = [g for g in ['3090', '2080', '4090', 'A10'] if g in gpu_name] | |||
# print(aa) | |||
# | |||
# import tensorrt as trt | |||
# # 定义反序列化引擎文件的函数 | |||
# def deserialize_engine_from_file(engine_file_path): | |||
# runtime = trt.Runtime(trt.Logger()) | |||
# engine = None | |||
# with open(engine_file_path, "rb") as f: | |||
# while True: | |||
# data = f.read(1024 * 1024) | |||
# if not data: | |||
# break | |||
# tmp_engine = runtime.deserialize_cuda_engine(data) | |||
# if engine is None: | |||
# engine = tmp_engine | |||
# else: | |||
# for i in range(tmp_engine.num_bindings): | |||
# engine.set_binding_shape(i, tmp_engine.get_binding_shape(i)) | |||
# return engine | |||
# engine_file_path = "/path/to/engine_file.engine" | |||
# s = time.time() | |||
# engine = deserialize_engine_from_file(engine_file_path) | |||
# print("1 加载trt文件时间", time.time() - s) | |||
# s1 = time.time() | |||
# with open(engine_file_path, "rb") as f1, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
# model = runtime.deserialize_cuda_engine(f1.read()) | |||
# print("2 加载trt文件时间", time.time() - s1) | |||
def get_label_array(color=None, label=None, outfontsize=None, fontpath="conf/platech.ttf"): | |||
# Plots one bounding box on image 'im' using PIL | |||
fontsize = outfontsize | |||
font = ImageFont.truetype(fontpath, fontsize, encoding='utf-8') | |||
x,y,txt_width, txt_height = font.getbbox(label) | |||
print(x,y,txt_width, txt_height) | |||
im = np.zeros((txt_height, txt_width, 3), dtype=np.uint8) | |||
im = Image.fromarray(im) | |||
draw = ImageDraw.Draw(im) | |||
draw.rectangle([0, 0, txt_width, txt_height], fill=tuple(color)) | |||
draw.text((0, -3), label, fill=(255, 255, 255), font=font) | |||
im_array = np.asarray(im) | |||
# if outfontsize: | |||
# scaley = outfontsize / txt_height | |||
# im_array = cv2.resize(im_array, (0, 0), fx=scaley, fy=scaley) | |||
return im_array | |||
aaa = time.time() | |||
im_array = get_label_array(color=(0, 255, 0), label="排口", outfontsize=40, fontpath="platech.ttf") | |||
print(time.time() - aaa) | |||
cv2.imshow("frame", im_array) | |||
cv2.waitKey(0) |
@@ -0,0 +1,27 @@ | |||
from sklearn import linear_model | |||
# x = [[20, 3], | |||
# [23, 7], | |||
# [31, 10], | |||
# [42, 13], | |||
# [50, 7], | |||
# [60, 5]] | |||
# y = [0, 1, 1, 1, 0, 0] | |||
# lr = linear_model.LogisticRegression() | |||
# lr.fit(x, y) | |||
# testX = [[28, 8]] | |||
# label = lr.predict(testX) | |||
# print("predicted label = ", label) | |||
# | |||
# prob = lr.predict_proba(testX) | |||
# print("probability = ", prob) | |||
import tensorflow as tf | |||
tf.compat.v1.disable_eager_execution() | |||
hello = tf.constant("hello, world!") | |||
sess = tf.compat.v1.Session() | |||
result = sess.run(hello) | |||
sess.close() | |||
print(result) |
@@ -0,0 +1,7 @@ | |||
2023-04-18 13:41:42.066 [ERROR][MainProcess-25160-MainThread-30168][16] Test-<module> - 异常信息:division by zero | |||
Traceback (most recent call last): | |||
> File "D:\tuoheng\codenew\tuoheng_alg\test\路径\Test.py", line 14, in <module> | |||
2/0 | |||
ZeroDivisionError: division by zero |
@@ -1,34 +1,7 @@ | |||
import os | |||
import pynvml | |||
pynvml.nvmlInit() | |||
# 安装 pip install nvidia-ml-py3 | |||
def usegpu(need_gpu_count=1): | |||
nouse=[] | |||
for index in range(pynvml.nvmlDeviceGetCount()): | |||
# 这里的0是GPU id | |||
handle = pynvml.nvmlDeviceGetHandleByIndex(index) | |||
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) | |||
used= meminfo.used/meminfo.total | |||
print(meminfo.used) | |||
print(meminfo.total) | |||
if used < 0.8: | |||
nouse.append(index) | |||
if len(nouse) >= need_gpu_count: | |||
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, nouse[:need_gpu_count])) | |||
return nouse[:need_gpu_count] | |||
elif len(nouse)>0: | |||
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, nouse)) | |||
return len(nouse) | |||
else: | |||
return 0 | |||
import tensorflow as tf | |||
import torch | |||
if __name__ == '__main__': | |||
gpus=usegpu(need_gpu_count=2) | |||
print(gpus) | |||
if gpus: | |||
print("use gpu ok") | |||
else: | |||
print("no gpu is valid") | |||
gpu_name = torch.cuda.get_device_name(0) | |||
print(gpu_name) |
@@ -0,0 +1,21 @@ | |||
from loguru import logger | |||
import pickle | |||
# 定义一个类 | |||
class Person: | |||
def __init__(self, name, age): | |||
self.name = name | |||
self.age = age | |||
# 创建一个Person实例 | |||
person = Person("Alice", 25) | |||
# 使用Loguru的serialize方法将Person实例序列化为字节字符串 | |||
serialized_person = logger.serialize(person) | |||
print(serialized_person) | |||
# 使用pickle库将字节字符串反序列化为Python对象 | |||
deserialized_person = pickle.loads(serialized_person) | |||
# 输出反序列化后的对象属性 | |||
print(deserialized_person.name) # Alice | |||
print(deserialized_person.age) # 25 |
@@ -1,5 +1,16 @@ | |||
import os | |||
import sys | |||
from util import YmlUtils, LogUtils | |||
from loguru import logger | |||
print(os.getcwd()) | |||
print(os.path.relpath(__file__)) | |||
print(os.path.relpath(__file__)) | |||
base_dir = os.path.dirname(os.path.realpath(sys.argv[0])) | |||
content = YmlUtils.getConfigs(base_dir + "/../../") | |||
LogUtils.init_log(content) | |||
try: | |||
2/0 | |||
except Exception as e: | |||
logger.exception("异常信息:{}", e) |
@@ -1,7 +1,23 @@ | |||
list1 = [1, 2, 3, 4] | |||
list2 = [1,2,4] | |||
if set(list2) == set(list1): | |||
print("1111111") | |||
else: | |||
print("222222") | |||
# list1 = [1, 2, 3, 4] | |||
# list2 = [1,2,4] | |||
# if set(list2) == set(list1): | |||
# print("1111111") | |||
# else: | |||
# print("222222") | |||
import numpy as np | |||
# list1 = [1, 2, 3, 4] | |||
# tl = np.asarray([1, 2], np.float32) | |||
# box = np.asarray([tl], np.int32) | |||
# print(tl)c | |||
# print(box[0][1]) | |||
import cv2 | |||
ai_video_file = cv2.VideoWriter(r"C:\Users\chenyukun\Desktop\fsdownload\aa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 25, | |||
(1920,1080)) | |||
# ai_video_file.set(cv2.VIDEOWRITER_PROP_BITRATE, 4000) | |||
ai_video_file.set(cv2.CAP_PROP_BITRATE, 4000) | |||
ai_video_file.set(cv2.VIDEOWRITER_PROP_QUALITY, 80) | |||
print(help(cv2.VideoWriter.set)) | |||
print(dir(cv2)) | |||
print(help(cv2)) |
@@ -55,7 +55,7 @@ class ThAliyunVodSdk(): | |||
def init_vod_client(self, accessKeyId, accessKeySecret): | |||
regionId = self.content["aliyun"]["vod"]["ecsRegionId"] | |||
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=5) | |||
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=30) | |||
def get_play_info(self, videoId): | |||
self.logger.info("开始获取视频地址,videoId:{}, requestId:{}", videoId, self.requestId) | |||
@@ -116,14 +116,13 @@ class ThAliyunVodSdk(): | |||
time.sleep(3) | |||
self.logger.error("vod视频上传失败,重试次数:{}, requestId:{}", retry_count, self.requestId) | |||
if retry_count >= MAX_RETRIES: | |||
self.logger.exception("vod视频上传重试失败: {}, requestId:{}", e, self.requestId) | |||
self.logger.exception("vod视频上传重试失败: {}, requestId:{}", e.message, self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def get_play_url(self, filePath, file_title): | |||
videoId = self.upload_local_video(filePath, file_title) | |||
if videoId is None or len(videoId) == 0: | |||
return None | |||
return self.get_play_info(videoId) | |||
def get_play_url(args): | |||
thAliyunVodSdk = ThAliyunVodSdk(args[2], args[3], args[4]) | |||
videoId = thAliyunVodSdk.upload_local_video(args[0], args[1]) | |||
if videoId is None or len(videoId) == 0: | |||
return None | |||
return thAliyunVodSdk.get_play_info(videoId) |
@@ -113,7 +113,7 @@ class Cv2Util(): | |||
self.clear_video_info() | |||
raise s | |||
except Exception as e: | |||
logger.exception("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||
logger.error("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
''' | |||
@@ -285,7 +285,7 @@ class Cv2Util(): | |||
in_bytes = self.pull_p.stdout.read(self.wh) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
try: | |||
result = (np.frombuffer(in_bytes, np.uint8).reshape([int(self.height), int(self.width), 3])) | |||
result = (np.frombuffer(in_bytes, np.uint8).reshape([self.height, self.width, 3])) | |||
# img = (np.frombuffer(in_bytes, np.uint8)).reshape((self.h, self.w)) | |||
except Exception as ei: | |||
logger.exception("视频格式异常:{}, requestId:{}", ei, self.requestId) | |||
@@ -294,7 +294,7 @@ class Cv2Util(): | |||
# result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) | |||
# result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR) | |||
if self.width > Constant.width: | |||
result = cv2.resize(result, self.w, self.h, interpolation=cv2.INTER_LINEAR) | |||
result = cv2.resize(result, (self.w, self.h), interpolation=cv2.INTER_LINEAR) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as e: | |||
@@ -420,7 +420,8 @@ class Cv2Util(): | |||
"-an", | |||
# '-flvflags', 'no_duration_filesize', | |||
# '-preset', 'fast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-preset', 'llhq', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-preset', 'p6', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-tune', 'll', | |||
'-f', 'flv', | |||
self.pushUrl] | |||
logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||
@@ -479,13 +480,9 @@ class Cv2Util(): | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
if self.or_video_file is not None: | |||
self.or_video_file.release() | |||
logger.exception("构建OR文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
if self.or_video_file is not None: | |||
self.or_video_file.release() | |||
logger.exception("构建OR文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
@@ -499,13 +496,9 @@ class Cv2Util(): | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
if self.ai_video_file is not None: | |||
self.ai_video_file.release() | |||
logger.exception("构建AI文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
if self.ai_video_file is not None: | |||
self.ai_video_file.release() | |||
logger.exception("构建AI文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
@@ -522,12 +515,13 @@ class Cv2Util(): | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
logger.exception("重新写入原视频视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
logger.exception("重新写入原视频视频到本地,重试失败, requestId: {}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.or_video_file.write(frame) | |||
logger.info("重新写入原视频视到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
break | |||
except Exception as e: | |||
logger.exception("重新写入原视频视到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) | |||
@@ -545,12 +539,13 @@ class Cv2Util(): | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
logger.exception("重新写入分析后的视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
logger.exception("重新写入分析后的视频到本地,重试失败, requestId: {}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.ai_video_file.write(frame) | |||
logger.info("重新写入分析后的视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
break | |||
except Exception as e: | |||
logger.exception("重新写入分析后的视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) |
@@ -33,6 +33,13 @@ def get_all_gpu_ids(): | |||
return GPUtil.getGPUs() | |||
def get_first_gpu_name(): | |||
gps = GPUtil.getGPUs() | |||
if gps is None or len(gps) == 0: | |||
raise Exception("未获取到gpu资源, 先检测服务器是否已经配置GPU资源!") | |||
return gps[0].name | |||
def check_gpu_resource(content): | |||
gpu_ids = get_gpu_ids(content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): |
@@ -0,0 +1,224 @@ | |||
import cv2 | |||
import numpy as np | |||
from PIL import Image, ImageDraw, ImageFont | |||
from PIL.Image import Image | |||
def get_label_array(color=None, label=None, font=None, fontSize=None): | |||
x, y, width, height = font.getbbox(label) | |||
text_image = np.zeros((height, width, 3), dtype=np.uint8) | |||
text_image = Image.fromarray(text_image) | |||
draw = ImageDraw.Draw(text_image) | |||
draw.rectangle((0, 0, width, height), fill=tuple(color)) | |||
draw.text((0, -3), label, fill=(255, 255, 255), font=font) | |||
im_array = np.asarray(text_image) | |||
scale = fontSize / height | |||
im_array = cv2.resize(im_array, (0, 0), fx=scale, fy=scale) | |||
return im_array | |||
def get_label_arrays(labelNames, colors, fontSize=40, fontPath="platech.ttf"): | |||
font = ImageFont.truetype(fontPath, fontSize, encoding='utf-8') | |||
label_arraylist = [] | |||
for i, label_name in enumerate(labelNames): | |||
color = colors[i % 20] | |||
label_arraylist.append(get_label_array(color, label_name, font, fontSize)) | |||
return label_arraylist | |||
def draw_painting_joint(box, img, label_array, score=0.5, color=None, score_location="leftTop"): | |||
# 识别问题描述图片的高、宽 | |||
lh, lw = label_array.shape[0:2] | |||
# 图片的长度和宽度 | |||
imh, imw = img.shape[0:2] | |||
if not isinstance(box[0], (list, tuple, np.ndarray)): | |||
xc = int(box[0]) | |||
yc = int(box[1]) | |||
w = int(box[2]) | |||
h = int(box[3]) | |||
bw = int(w/2) | |||
bh = int(h/2) | |||
tl = (xc - bw, yc - bh) | |||
tr = (xc + bw, yc - bh) | |||
br = (xc + bw, yc + bh) | |||
bl = (xc - bw, yc + bh) | |||
box = [tl, tr, br, bl] | |||
# 框框左上的位置 | |||
if score_location == 'leftTop': | |||
x0, y1 = box[0][0], box[0][1] | |||
# 框框左下的位置 | |||
elif score_location == 'leftBottom': | |||
x0, y1 = box[3][0], box[3][1] | |||
else: | |||
x0, y1 = box[0][0], box[0][1] | |||
# x1 框框左上x位置 + 描述的宽 | |||
# y0 框框左上y位置 - 描述的高 | |||
x1, y0 = x0 + lw, y1 - lh | |||
# 如果y0小于0, 说明超过上边框 | |||
if y0 < 0: | |||
y0 = 0 | |||
# y1等于文字高度 | |||
y1 = y0 + lh | |||
# 如果y1框框的高大于图片高度 | |||
if y1 > imh: | |||
# y1等于图片高度 | |||
y1 = imh | |||
# y0等于y1减去文字高度 | |||
y0 = y1 - lh | |||
# 如果x0小于0 | |||
if x0 < 0: | |||
x0 = 0 | |||
x1 = x0 + lw | |||
if x1 > imw: | |||
x1 = imw | |||
x0 = x1 - lw | |||
img[y0:y1, x0:x1, :] = label_array | |||
pts_cls = [(x0, y0), (x1, y1)] | |||
# 把四边形的框画上 | |||
# box_tl = font['boxLine_thickness'] or round(0.002 * (imh + imw) / 2) + 1 | |||
box_tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1 | |||
''' | |||
1. img(array) 为ndarray类型(可以为cv.imread)直接读取的数据 | |||
2. box(array):为所画多边形的顶点坐标 | |||
3. 所画四边形是否闭合,通常为True | |||
4. color(tuple):BGR三个通道的值 | |||
5. thickness(int):画线的粗细 | |||
6. shift:顶点坐标中小数的位数 | |||
''' | |||
box1 = np.asarray(box, np.int32) | |||
cv2.polylines(img, [box1], True, color, box_tl) | |||
# 把英文字符score画到类别旁边 | |||
tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1 | |||
label = ' %.2f' % score | |||
tf = max(tl, 1) | |||
fontScale = float(format(imw / 1920 * 1.1, '.2f')) or tl * 0.33 | |||
''' | |||
1. text:要计算大小的文本内容,类型为字符串。 | |||
2. fontFace:字体类型,例如cv2.FONT_HERSHEY_SIMPLEX等。 | |||
3. fontScale:字体大小的缩放因子,例如1.2表示字体大小增加20%。 | |||
4. thickness:文本线条的粗细,以像素为单位。 | |||
5. (text_width, text_height):给定文本在指定字体、字体大小、线条粗细下所占用的像素宽度和高度。 | |||
''' | |||
t_size = cv2.getTextSize(label, 0, fontScale=fontScale, thickness=tf)[0] | |||
# if socre_location=='leftTop': | |||
p1, p2 = (pts_cls[1][0], pts_cls[0][1]), (pts_cls[1][0] + t_size[0], pts_cls[1][1]) | |||
''' | |||
1. img:要绘制矩形的图像 | |||
2. pt1:矩形框的左上角坐标,可以是一个包含两个整数的元组或列表,例如(x1, y1)或[x1, y1]。 | |||
3. pt2:矩形框的右下角坐标,可以是一个包含两个整数的元组或列表,例如(x2, y2)或[x2, y2]。 | |||
4. color:矩形框的颜色,可以是一个包含三个整数的元组或列表,例如(255, 0, 0)表示蓝色,或一个标量值,例如255表示白色。颜色顺序为BGR。 | |||
5. thickness:线条的粗细,以像素为单位。如果为负值,则表示要绘制填充矩形。默认值为1。 | |||
6. lineType:线条的类型,可以是cv2.LINE_AA表示抗锯齿线条,或cv2.LINE_4表示4连通线条,或cv2.LINE_8表示8连通线条。默认值为cv2.LINE_8。 | |||
7. shift:坐标点小数点位数。默认值为0。 | |||
''' | |||
cv2.rectangle(img, p1, p2, color, -1, cv2.LINE_AA) | |||
p3 = pts_cls[1][0], pts_cls[1][1] - (lh - t_size[1]) // 2 | |||
''' | |||
1. img:要在其上绘制文本的图像 | |||
2. text:要绘制的文本内容,类型为字符串 | |||
3. org:文本起始位置的坐标,可以是一个包含两个整数的元组或列表,例如(x, y)或[x, y]。 | |||
4. fontFace:字体类型,例如cv2.FONT_HERSHEY_SIMPLEX等。 | |||
5. fontScale:字体大小的缩放因子,例如1.2表示字体大小增加20%。 | |||
6. color:文本的颜色,可以是一个包含三个整数的元组或列表,例如(255, 0, 0)表示蓝色,或一个标量值,例如255表示白色。颜色顺序为BGR。 | |||
7. thickness:文本线条的粗细,以像素为单位。默认值为1。 | |||
8. lineType:线条的类型,可以是cv2.LINE_AA表示抗锯齿线条,或cv2.LINE_4表示4连通线条,或cv2.LINE_8表示8连通线条。默认值为cv2.LINE_8。 | |||
9. bottomLeftOrigin:文本起始位置是否为左下角。如果为True,则文本起始位置为左下角,否则为左上角。默认值为False。 | |||
''' | |||
cv2.putText(img, label, p3, 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) | |||
return img, box | |||
# def draw_painting_joint(box,img,label_array,score=0.5,color=None,font={ 'line_thickness':None,'boxLine_thickness':None, 'fontSize':None},socre_location="leftTop"): | |||
# #如果box[0]不是list or 元组,则box是[ (x0,y0),(x1,y1),(x2,y2),(x3,y3)]四点格式 | |||
# if isinstance(box[0], (list, tuple,np.ndarray ) ): | |||
# ###先把中文类别字体赋值到img中 | |||
# lh, lw, lc = label_array.shape | |||
# imh, imw, imc = img.shape | |||
# if socre_location=='leftTop': | |||
# x0 , y1 = box[0][0],box[0][1] | |||
# elif socre_location=='leftBottom': | |||
# x0,y1=box[3][0],box[3][1] | |||
# else: | |||
# print('plot.py line217 ,label_location:%s not implemented '%( socre_location )) | |||
# sys.exit(0) | |||
# | |||
# x1 , y0 = x0 + lw , y1 - lh | |||
# if y0<0:y0=0;y1=y0+lh | |||
# if y1>imh: y1=imh;y0=y1-lh | |||
# if x0<0:x0=0;x1=x0+lw | |||
# if x1>imw:x1=imw;x0=x1-lw | |||
# img[y0:y1,x0:x1,:] = label_array | |||
# pts_cls=[(x0,y0),(x1,y1) ] | |||
# | |||
# #把四边形的框画上 | |||
# box_tl= font['boxLine_thickness'] or round(0.002 * (imh + imw) / 2) + 1 | |||
# cv2.polylines(img, [box], True,color , box_tl) | |||
# | |||
# ####把英文字符score画到类别旁边 | |||
# tl = font['line_thickness'] or round(0.002*(imh+imw)/2)+1#line/font thickness | |||
# label = ' %.2f'%(score) | |||
# tf = max(tl , 1) # font thickness | |||
# fontScale = font['fontSize'] or tl * 0.33 | |||
# t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] | |||
# | |||
# | |||
# #if socre_location=='leftTop': | |||
# p1,p2= (pts_cls[1][0], pts_cls[0][1]),(pts_cls[1][0]+t_size[0],pts_cls[1][1]) | |||
# cv2.rectangle(img, p1 , p2, color, -1, cv2.LINE_AA) | |||
# p3 = pts_cls[1][0],pts_cls[1][1]-(lh-t_size[1])//2 | |||
# | |||
# cv2.putText(img, label,p3, 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) | |||
# return img | |||
# else:####两点格式[x0,y0,x1,y1] | |||
# try: | |||
# box = [int(xx.cpu()) for xx in box] | |||
# except: | |||
# box=[ int(x) for x in box] | |||
# ###先把中文类别字体赋值到img中 | |||
# lh, lw, lc = label_array.shape | |||
# imh, imw, imc = img.shape | |||
# if socre_location=='leftTop': | |||
# x0 , y1 = box[0:2] | |||
# elif socre_location=='leftBottom': | |||
# x0,y1=box[0],box[3] | |||
# else: | |||
# print('plot.py line217 ,socre_location:%s not implemented '%( socre_location )) | |||
# sys.exit(0) | |||
# x1 , y0 = x0 + lw , y1 - lh | |||
# if y0<0:y0=0;y1=y0+lh | |||
# if y1>imh: y1=imh;y0=y1-lh | |||
# if x0<0:x0=0;x1=x0+lw | |||
# if x1>imw:x1=imw;x0=x1-lw | |||
# img[y0:y1,x0:x1,:] = label_array | |||
# | |||
# | |||
# | |||
# ###把矩形框画上,指定颜色和线宽 | |||
# tl = font['line_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness | |||
# box_tl= font['boxLine_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 | |||
# c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) | |||
# cv2.rectangle(img, c1, c2, color, thickness=box_tl, lineType=cv2.LINE_AA) | |||
# | |||
# ###把英文字符score画到类别旁边 | |||
# label = ' %.2f'%(score) | |||
# tf = max(tl , 1) # font thickness | |||
# fontScale = font['fontSize'] or tl * 0.33 | |||
# t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] | |||
# | |||
# if socre_location=='leftTop': | |||
# c2 = c1[0]+ lw + t_size[0], c1[1] - lh | |||
# cv2.rectangle(img, (int(box[0])+lw,int(box[1])) , c2, color, -1, cv2.LINE_AA) # filled | |||
# cv2.putText(img, label, (c1[0]+lw, c1[1] - (lh-t_size[1])//2 ), 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) | |||
# elif socre_location=='leftBottom': | |||
# c2 = box[0]+ lw + t_size[0], box[3] - lh | |||
# cv2.rectangle(img, (int(box[0])+lw,int(box[3])) , c2, color, -1, cv2.LINE_AA) # filled | |||
# cv2.putText(img, label, ( box[0] + lw, box[3] - (lh-t_size[1])//2 ), 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) | |||
# | |||
# #print('#####line224 fontScale:',fontScale,' thickness:',tf,' line_thickness:',font['line_thickness'],' boxLine thickness:',box_tl) | |||
# return img | |||
@@ -0,0 +1,346 @@ | |||
from kafka import KafkaProducer, KafkaConsumer | |||
from kafka.errors import kafka_errors | |||
import traceback | |||
import json, base64,os | |||
import numpy as np | |||
from multiprocessing import Process,Queue | |||
import time,cv2,string,random | |||
import subprocess as sp | |||
import matplotlib.pyplot as plt | |||
from utils.datasets import LoadStreams, LoadImages | |||
from models.experimental import attempt_load | |||
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path | |||
import torch,sys | |||
#from segutils.segmodel import SegModel,get_largest_contours | |||
#sys.path.extend(['../yolov5/segutils']) | |||
from segutils.segWaterBuilding import SegModel,get_largest_contours,illBuildings | |||
#from segutils.core.models.bisenet import BiSeNet | |||
from segutils.core.models.bisenet import BiSeNet_MultiOutput | |||
from utils.plots import plot_one_box,plot_one_box_PIL,draw_painting_joint,get_label_arrays,get_websource | |||
from collections import Counter | |||
#import matplotlib | |||
import matplotlib.pyplot as plt | |||
# get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str | |||
#FP_DEBUG=open('debut.txt','w') | |||
def bsJpgCode(image_ori): | |||
jpgCode = cv2.imencode('.jpg',image_ori)[-1]###np.array,(4502009,1) | |||
bsCode = str(base64.b64encode(jpgCode))[2:-1] ###str,长6002680 | |||
return bsCode | |||
def bsJpgDecode(bsCode): | |||
bsDecode = base64.b64decode(bsCode)###types,长4502009 | |||
npString = np.frombuffer(bsDecode,np.uint8)###np.array,(长4502009,) | |||
jpgDecode = cv2.imdecode(npString,cv2.IMREAD_COLOR)###np.array,(3000,4000,3) | |||
return jpgDecode | |||
def get_ms(time0,time1): | |||
str_time ='%.2f ms'%((time1-time0)*1000) | |||
return str_time | |||
rainbows=[ | |||
(0,0,255),(0,255,0),(255,0,0),(255,0,255),(255,255,0),(255,127,0),(255,0,127), | |||
(127,255,0),(0,255,127),(0,127,255),(127,0,255),(255,127,255),(255,255,127), | |||
(127,255,255),(0,255,255),(255,127,255),(127,255,255), | |||
(0,127,0),(0,0,127),(0,255,255) | |||
] | |||
def get_labelnames(labelnames): | |||
with open(labelnames,'r') as fp: | |||
namesjson=json.load(fp) | |||
names_fromfile=namesjson['labelnames'] | |||
names = names_fromfile | |||
return names | |||
def check_stream(stream): | |||
cap = cv2.VideoCapture(stream) | |||
if cap.isOpened(): | |||
return True | |||
else: | |||
return False | |||
##### | |||
def drawWater(pred,image_array0,river={'color':(0,255,255),'line_width':3,'segRegionCnt':2}):####pred是模型的输出,只有水分割的任务 | |||
##画出水体区域 | |||
contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) | |||
water = pred.copy(); water[:,:] = 0 | |||
if len(contours)==0: | |||
return image_array0,water | |||
max_ids = get_largest_contours(contours,river['segRegionCnt']); | |||
for max_id in max_ids: | |||
cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) | |||
cv2.drawContours(image_array0,contours,max_id,river['color'],river['line_width'] ) | |||
return image_array0,water | |||
def scale_back(boxes,padInfos): | |||
top, left,r = padInfos[0:3] | |||
boxes[:,0] = (boxes[:,0] - left) * r | |||
boxes[:,2] = (boxes[:,2] - left) * r | |||
boxes[:,1] = (boxes[:,1] - top) * r | |||
boxes[:,3] = (boxes[:,3] - top) * r | |||
return boxes | |||
def img_pad(img, size, pad_value=[114,114,114]): | |||
###填充成固定尺寸 | |||
H,W,_ = img.shape | |||
r = max(H/size[0], W/size[1]) | |||
img_r = cv2.resize(img, (int(W/r), int(H/r))) | |||
tb = size[0] - img_r.shape[0] | |||
lr = size[1] - img_r.shape[1] | |||
top = int(tb/2) | |||
bottom = tb - top | |||
left = int(lr/2) | |||
right = lr - left | |||
pad_image = cv2.copyMakeBorder(img_r, top, bottom, left, right, cv2.BORDER_CONSTANT,value=pad_value) | |||
return pad_image,(top, left,r) | |||
def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ): | |||
object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt'] | |||
##输入dataset genereate 生成的数据,model预测的结果pred,nms参数 | |||
##主要操作NMS ---> 坐标转换 ---> 画图 | |||
##输出原图、AI处理后的图、检测结果 | |||
time0=time.time() | |||
path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6]; | |||
#segmodel=True | |||
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) | |||
time1=time.time() | |||
i=0;det=pred[0]###一次检测一张图片 | |||
time1_1 = time.time() | |||
#p, s, im0 = path[i], '%g: ' % i, im0s[i].copy() | |||
p, s, im0 = path[i], '%g: ' % i, im0s[i] | |||
time1_2 = time.time() | |||
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh | |||
time1_3 = time.time() | |||
det_xywh=[]; | |||
#im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR); | |||
if segmodel: | |||
if len(seg_pred)==2: | |||
im0,water = illBuildings(seg_pred,im0) | |||
else: | |||
river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt } | |||
im0,water = drawWater(seg_pred,im0,river) | |||
time2=time.time() | |||
#plt.imshow(im0);plt.show() | |||
if len(det)>0: | |||
# Rescale boxes from img_size to im0 size | |||
if not padInfos: | |||
det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round() | |||
else: | |||
#print('####line131:',det[:, :]) | |||
det[:, :4] = scale_back( det[:, :4],padInfos).round() | |||
#print('####line133:',det[:, :]) | |||
#用seg模型,确定有效检测匡及河道轮廓线 | |||
if segmodel: | |||
cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32) | |||
##判断哪些目标属于岸坡的 | |||
slope_flag = np.array([x in slopeIndex for x in cls_indexs ] ) | |||
det_c = det.clone(); det_c=det_c.cpu().numpy() | |||
try: | |||
area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] ) | |||
except: | |||
print('*****************************line143: error:',det_c) | |||
water_flag = np.array(area_factors>0.1) | |||
det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 | |||
#对检测匡绘图 | |||
for *xyxy, conf, cls in reversed(det): | |||
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh | |||
cls_c = cls.cpu().numpy() | |||
if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示 | |||
continue | |||
conf_c = conf.cpu().numpy() | |||
line = [float(cls_c), *xywh, float(conf_c)] # label format | |||
det_xywh.append(line) | |||
label = f'{names[int(cls)]} {conf:.2f}' | |||
im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font) | |||
time3=time.time() | |||
strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) ) | |||
return [im0s[0],im0,det_xywh,iframe],strout | |||
def preprocess(par): | |||
print('#####process:',par['name']) | |||
##负责读取视频,生成原图及供检测的使用图,numpy格式 | |||
#source='rtmp://liveplay.yunhengzhizao.cn/live/demo_HD5M' | |||
#img_size=640; stride=32 | |||
while True: | |||
cap = cv2.VideoCapture(par['source']) | |||
iframe = 0 | |||
if cap.isOpened(): | |||
print( '#### read %s success!'%(par['source'])) | |||
try: | |||
dataset = LoadStreams(par['source'], img_size=640, stride=32) | |||
for path, img, im0s, vid_cap in dataset: | |||
datas=[path, img, im0s, vid_cap,iframe] | |||
par['queOut'].put(datas) | |||
iframe +=1 | |||
except Exception as e: | |||
print('###read error:%s '%(par['source'])) | |||
time.sleep(10) | |||
iframe = 0 | |||
else: | |||
print('###read error:%s '%(par['source'] )) | |||
time.sleep(10) | |||
iframe = 0 | |||
def gpu_process(par): | |||
print('#####process:',par['name']) | |||
half=True | |||
##gpu运算,检测模型 | |||
weights = par['weights'] | |||
device = par['device'] | |||
print('###line127:',par['device']) | |||
model = attempt_load(par['weights'], map_location=par['device']) # load FP32 model | |||
if half: | |||
model.half() | |||
##gpu运算,分割模型 | |||
seg_nclass = par['seg_nclass'] | |||
seg_weights = par['seg_weights'] | |||
#segmodel = SegModel(nclass=seg_nclass,weights=seg_weights,device=device) | |||
nclass = [2,2] | |||
Segmodel = BiSeNet_MultiOutput(nclass) | |||
weights='weights/segmentation/WaterBuilding.pth' | |||
segmodel = SegModel(model=Segmodel,nclass=nclass,weights=weights,device='cuda:0',multiOutput=True) | |||
while True: | |||
if not par['queIn'].empty(): | |||
time0=time.time() | |||
datas = par['queIn'].get() | |||
path, img, im0s, vid_cap,iframe = datas[0:5] | |||
time1=time.time() | |||
img = torch.from_numpy(img).to(device) | |||
img = img.half() if half else img.float() # uint8 to fp16/32 | |||
img /= 255.0 # 0 - 255 to 0.0 - 1.0 | |||
time2 = time.time() | |||
pred = model(img,augment=False)[0] | |||
time3 = time.time() | |||
seg_pred = segmodel.eval(im0s[0],outsize=None,smooth_kernel=20) | |||
time4 = time.time() | |||
fpStr= 'process:%s ,iframe:%d,getdata:%s,copygpu:%s,dettime:%s,segtime:%s , time:%s, queLen:%d '%( par['name'],iframe,get_ms(time0,time1) ,get_ms(time1,time2) ,get_ms(time2,time3) ,get_ms(time3,time4),get_ms(time0,time4) ,par['queIn'].qsize() ) | |||
#FP_DEBUG.write( fpStr+'\n' ) | |||
datasOut = [path, img, im0s, vid_cap,pred,seg_pred,iframe] | |||
par['queOut'].put(datasOut) | |||
if par['debug']: | |||
print('#####process:',par['name'],' line107') | |||
else: | |||
time.sleep(1/300) | |||
def get_cls(array): | |||
dcs = Counter(array) | |||
keys = list(dcs.keys()) | |||
values = list(dcs.values()) | |||
max_index = values.index(max(values)) | |||
cls = int(keys[max_index]) | |||
return cls | |||
def save_problem_images(post_results,iimage_cnt,names,streamName='live-THSAHD5M',outImaDir='problems/images_tmp',imageTxtFile=False): | |||
## [cls, x,y,w,h, conf] | |||
problem_image=[[] for i in range(6)] | |||
dets_list = [x[2] for x in post_results] | |||
mean_scores=[ np.array(x)[:,5].mean() for x in dets_list ] ###mean conf | |||
best_index = mean_scores.index(max(mean_scores)) ##获取该批图片里,问题图片的index | |||
best_frame = post_results[ best_index][3] ##获取绝对帧号 | |||
img_send = post_results[best_index][1]##AI处理后的图 | |||
img_bak = post_results[best_index][0]##原图 | |||
cls_max = get_cls( x[0] for x in dets_list[best_index] ) | |||
time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) | |||
uid=''.join(random.sample(string.ascii_letters + string.digits, 16)) | |||
#ori_name = '2022-01-20-15-57-36_frame-368-720_type-漂浮物_qVh4zI08ZlwJN9on_s-live-THSAHD5M_OR.jpg' | |||
#2022-01-13-15-07-57_frame-9999-9999_type-结束_9999999999999999_s-off-XJRW20220110115904_AI.jpg | |||
outnameOR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_AI.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) | |||
outnameAR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_OR.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) | |||
cv2.imwrite(outnameOR,img_send) | |||
cv2.imwrite(outnameAR,img_bak) | |||
if imageTxtFile: | |||
outnameOR_txt = outnameOR.replace('.jpg','.txt') | |||
fp=open(outnameOR_txt,'w');fp.write(outnameOR+'\n');fp.close() | |||
outnameAI_txt = outnameAR.replace('.jpg','.txt') | |||
fp=open(outnameAI_txt,'w');fp.write(outnameAR+'\n');fp.close() | |||
parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid | |||
parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); | |||
parOut['time_str'] = time_str;parOut['type'] = names[cls_max] | |||
return parOut | |||
def post_process(par): | |||
print('#####process:',par['name']) | |||
###post-process参数 | |||
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] | |||
labelnames=par['labelnames'] | |||
rainbows=par['rainbows'] | |||
fpsample = par['fpsample'] | |||
names=get_labelnames(labelnames) | |||
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) | |||
iimage_cnt = 0 | |||
post_results=[] | |||
while True: | |||
if not par['queIn'].empty(): | |||
time0=time.time() | |||
datas = par['queIn'].get() | |||
iframe = datas[6] | |||
if par['debug']: | |||
print('#####process:',par['name'],' line129') | |||
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) | |||
par['queOut'].put(p_result) | |||
##输出结果 | |||
##每隔 fpsample帧处理一次,如果有问题就保存图片 | |||
if (iframe % fpsample == 0) and (len(post_results)>0) : | |||
#print('####line204:',iframe,post_results) | |||
save_problem_images(post_results,iframe,names) | |||
post_results=[] | |||
if len(p_result[2] )>0: ## | |||
#post_list = p_result.append(iframe) | |||
post_results.append(p_result) | |||
#print('####line201:',type(p_result)) | |||
time1=time.time() | |||
outstr='process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) | |||
#FP_DEBUG.write(outstr +'\n') | |||
#print( 'process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) ) | |||
else: | |||
time.sleep(1/300) | |||
def save_logfile(name,txt): | |||
if os.path.exists(name): | |||
fp=open(name,'r+') | |||
else: | |||
fp=open(name,'w') | |||
fp.write('%s %s \n'%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),txt)) | |||
fp.close() | |||
def time_str(): | |||
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) | |||
if __name__=='__main__': | |||
jsonfile='config/queRiver.json' | |||
#image_encode_decode() | |||
work_stream(jsonfile) | |||
#par={'name':'preprocess'} | |||
#preprocess(par) |
@@ -0,0 +1,315 @@ | |||
# YOLOv5 PyTorch utils | |||
import datetime | |||
import math | |||
import os | |||
import platform | |||
import subprocess | |||
import time | |||
from contextlib import contextmanager | |||
from copy import deepcopy | |||
from pathlib import Path | |||
from loguru import logger | |||
import torch | |||
import torch.backends.cudnn as cudnn | |||
import torch.nn as nn | |||
import torch.nn.functional as F | |||
import torchvision | |||
try: | |||
import thop # for FLOPS computation | |||
except ImportError: | |||
thop = None | |||
@contextmanager | |||
def torch_distributed_zero_first(local_rank: int): | |||
""" | |||
Decorator to make all processes in distributed training wait for each local_master to do something. | |||
""" | |||
if local_rank not in [-1, 0]: | |||
torch.distributed.barrier() | |||
yield | |||
if local_rank == 0: | |||
torch.distributed.barrier() | |||
def init_torch_seeds(seed=0): | |||
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html | |||
torch.manual_seed(seed) | |||
if seed == 0: # slower, more reproducible | |||
cudnn.benchmark, cudnn.deterministic = False, True | |||
else: # faster, less reproducible | |||
cudnn.benchmark, cudnn.deterministic = True, False | |||
# 文件最后修改时间 | |||
def date_modified(path=__file__): | |||
# return human-readable file modification date, i.e. '2021-3-26' | |||
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) | |||
return f'{t.year}-{t.month}-{t.day}' | |||
def git_describe(path=Path(__file__).parent): # path must be a directory | |||
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe | |||
# -C 指定GIT仓库的路径 | |||
# describe: 命令名,表示获取最近的Git标签信息。 | |||
# tags: 选项,表示只考虑标签。 | |||
# long: 选项,表示使用完整的Git SHA-1哈希值来描述提交。 | |||
# always: 选项,表示如果没有Git标签,则使用Git哈希值来描述提交。 | |||
s = f'git -C {path} describe --tags --long --always' | |||
try: | |||
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] | |||
except subprocess.CalledProcessError as e: | |||
return '' | |||
def select_device(device='0'): | |||
logger.info("当前torch版本: {}", torch.__version__) | |||
# 设置环境变量 | |||
os.environ['CUDA_VISIBLE_DEVICES'] = device | |||
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' | |||
return torch.device('cuda:%s' % device) | |||
# def select_device(device='', batch_size=None): | |||
# # device = 'cpu' or '0' or '0,1,2,3' | |||
# s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string | |||
# cpu = device.lower() == 'cpu' | |||
# if cpu: | |||
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False | |||
# elif device: # non-cpu device requested | |||
# os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable | |||
# assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability | |||
# | |||
# cuda = not cpu and torch.cuda.is_available() | |||
# if cuda: | |||
# n = torch.cuda.device_count() | |||
# if n > 1 and batch_size: # check that batch_size is compatible with device_count | |||
# assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' | |||
# space = ' ' * len(s) | |||
# for i, d in enumerate(device.split(',') if device else range(n)): | |||
# p = torch.cuda.get_device_properties(i) | |||
# s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB | |||
# else: | |||
# s += 'CPU\n' | |||
# logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe | |||
# return torch.device('cuda:0' if cuda else 'cpu') | |||
def time_synchronized(): | |||
# pytorch-accurate time | |||
if torch.cuda.is_available(): | |||
torch.cuda.synchronize() | |||
return time.time() | |||
def profile(x, ops, n=100, device=None): | |||
# profile a pytorch module or list of modules. Example usage: | |||
# x = torch.randn(16, 3, 640, 640) # input | |||
# m1 = lambda x: x * torch.sigmoid(x) | |||
# m2 = nn.SiLU() | |||
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations | |||
device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') | |||
x = x.to(device) | |||
x.requires_grad = True | |||
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') | |||
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") | |||
for m in ops if isinstance(ops, list) else [ops]: | |||
m = m.to(device) if hasattr(m, 'to') else m # device | |||
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type | |||
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward | |||
try: | |||
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS | |||
except: | |||
flops = 0 | |||
for _ in range(n): | |||
t[0] = time_synchronized() | |||
y = m(x) | |||
t[1] = time_synchronized() | |||
try: | |||
_ = y.sum().backward() | |||
t[2] = time_synchronized() | |||
except: # no backward method | |||
t[2] = float('nan') | |||
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward | |||
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward | |||
s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' | |||
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' | |||
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters | |||
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') | |||
def is_parallel(model): | |||
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) | |||
def intersect_dicts(da, db, exclude=()): | |||
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values | |||
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} | |||
def initialize_weights(model): | |||
for m in model.modules(): | |||
t = type(m) | |||
if t is nn.Conv2d: | |||
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') | |||
elif t is nn.BatchNorm2d: | |||
m.eps = 1e-3 | |||
m.momentum = 0.03 | |||
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: | |||
m.inplace = True | |||
def find_modules(model, mclass=nn.Conv2d): | |||
# Finds layer indices matching module class 'mclass' | |||
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] | |||
def sparsity(model): | |||
# Return global model sparsity | |||
a, b = 0., 0. | |||
for p in model.parameters(): | |||
a += p.numel() | |||
b += (p == 0).sum() | |||
return b / a | |||
def prune(model, amount=0.3): | |||
# Prune model to requested global sparsity | |||
import torch.nn.utils.prune as prune | |||
print('Pruning model... ', end='') | |||
for name, m in model.named_modules(): | |||
if isinstance(m, nn.Conv2d): | |||
prune.l1_unstructured(m, name='weight', amount=amount) # prune | |||
prune.remove(m, 'weight') # make permanent | |||
print(' %.3g global sparsity' % sparsity(model)) | |||
def fuse_conv_and_bn(conv, bn): | |||
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ | |||
fusedconv = nn.Conv2d(conv.in_channels, | |||
conv.out_channels, | |||
kernel_size=conv.kernel_size, | |||
stride=conv.stride, | |||
padding=conv.padding, | |||
groups=conv.groups, | |||
bias=True).requires_grad_(False).to(conv.weight.device) | |||
# prepare filters | |||
w_conv = conv.weight.clone().view(conv.out_channels, -1) | |||
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) | |||
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) | |||
# prepare spatial bias | |||
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias | |||
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) | |||
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) | |||
return fusedconv | |||
def model_info(model, verbose=False, img_size=640): | |||
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] | |||
n_p = sum(x.numel() for x in model.parameters()) # number parameters | |||
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients | |||
if verbose: | |||
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) | |||
for i, (name, p) in enumerate(model.named_parameters()): | |||
name = name.replace('module_list.', '') | |||
print('%5g %40s %9s %12g %20s %10.3g %10.3g' % | |||
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) | |||
try: # FLOPS | |||
from thop import profile | |||
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 | |||
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input | |||
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS | |||
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float | |||
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS | |||
except (ImportError, Exception): | |||
fs = '' | |||
logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") | |||
def load_classifier(name='resnet101', n=2): | |||
# Loads a pretrained model reshaped to n-class output | |||
model = torchvision.models.__dict__[name](pretrained=True) | |||
# ResNet model properties | |||
# input_size = [3, 224, 224] | |||
# input_space = 'RGB' | |||
# input_range = [0, 1] | |||
# mean = [0.485, 0.456, 0.406] | |||
# std = [0.229, 0.224, 0.225] | |||
# Reshape output to n classes | |||
filters = model.fc.weight.shape[1] | |||
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) | |||
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) | |||
model.fc.out_features = n | |||
return model | |||
def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) | |||
# scales img(bs,3,y,x) by ratio constrained to gs-multiple | |||
if ratio == 1.0: | |||
return img | |||
else: | |||
h, w = img.shape[2:] | |||
s = (int(h * ratio), int(w * ratio)) # new size | |||
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize | |||
if not same_shape: # pad/crop img | |||
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] | |||
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean | |||
def copy_attr(a, b, include=(), exclude=()): | |||
# Copy attributes from b to a, options to only include [...] and to exclude [...] | |||
for k, v in b.__dict__.items(): | |||
if (len(include) and k not in include) or k.startswith('_') or k in exclude: | |||
continue | |||
else: | |||
setattr(a, k, v) | |||
class ModelEMA: | |||
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models | |||
Keep a moving average of everything in the model state_dict (parameters and buffers). | |||
This is intended to allow functionality like | |||
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage | |||
A smoothed version of the weights is necessary for some training schemes to perform well. | |||
This class is sensitive where it is initialized in the sequence of model init, | |||
GPU assignment and distributed training wrappers. | |||
""" | |||
def __init__(self, model, decay=0.9999, updates=0): | |||
# Create EMA | |||
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA | |||
# if next(model.parameters()).device.type != 'cpu': | |||
# self.ema.half() # FP16 EMA | |||
self.updates = updates # number of EMA updates | |||
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) | |||
for p in self.ema.parameters(): | |||
p.requires_grad_(False) | |||
def update(self, model): | |||
# Update EMA parameters | |||
with torch.no_grad(): | |||
self.updates += 1 | |||
d = self.decay(self.updates) | |||
msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict | |||
for k, v in self.ema.state_dict().items(): | |||
if v.dtype.is_floating_point: | |||
v *= d | |||
v += (1. - d) * msd[k].detach() | |||
def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): | |||
# Update EMA attributes | |||
copy_attr(self.ema, model, include, exclude) |