@@ -1,6 +1,6 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="PublishConfigData" serverName="192.168.11.7" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<component name="PublishConfigData" serverName="th@192.168.11.8:32178" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<serverData> | |||
<paths name="10.21"> | |||
<serverdata> | |||
@@ -46,45 +46,10 @@ | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="dell@192.168.10.12:22"> | |||
<paths name="th@192.168.11.8:32178"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/chenyukun/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="root@212.129.223.66:20653"> | |||
<serverdata> | |||
<mappings> | |||
<mapping local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw2@192.168.10.66:22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/chenyukun/dev/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw2@212.129.223.66:6500"> | |||
<serverdata> | |||
<mappings> | |||
<mapping local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw@192.168.10.11:22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/thsw/chenyukun/algSch/" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw@212.129.223.66:6000"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/thsw/chenyukun/algSch" local="$PROJECT_DIR$" web="/" /> | |||
<mapping deploy="/home/th/tuo_heng/dev/tuoheng_alg" local="$PROJECT_DIR$" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> |
@@ -1,4 +1,4 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Python 3.8 (test)" project-jdk-type="Python SDK" /> | |||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Remote Python 3.8.15 (sftp://th@192.168.11.8:32178/home/th/anaconda3/envs/chenyukun/bin/python3.8)" project-jdk-type="Python SDK" /> | |||
</project> |
@@ -5,8 +5,36 @@ | |||
</component> | |||
<component name="ChangeListManager"> | |||
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes"> | |||
<change beforePath="$PROJECT_DIR$/.idea/deployment.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/common/Constant.py" beforeDir="false" afterPath="$PROJECT_DIR$/common/Constant.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FeedbackThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FeedbackThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FileUpdateThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FileUploadThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/PullStreamThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullStreamThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/dsp_application.yml" beforeDir="false" afterPath="$PROJECT_DIR$/dsp_application.yml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/dsp_master.py" beforeDir="false" afterPath="$PROJECT_DIR$/dsp_master.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/entity/FeedBack.py" beforeDir="false" afterPath="$PROJECT_DIR$/entity/FeedBack.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/BaiduSdkEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/BaiduSdkEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ExceptionEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ExceptionEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/exception/CustomerException.py" beforeDir="false" afterPath="$PROJECT_DIR$/exception/CustomerException.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg2.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg2.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/AliyunSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/AliyunSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/Cv2Utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/Cv2Utils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/FileUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/FileUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/GPUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/GPUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ImageUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ImageUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ImgBaiduSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ImgBaiduSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/KafkaUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/KafkaUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/LogUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/LogUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/OcrBaiduSdk.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/OcrBaiduSdk.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/YmlUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/YmlUtils.py" afterDir="false" /> | |||
</list> | |||
<option name="SHOW_DIALOG" value="false" /> | |||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> | |||
@@ -135,23 +163,25 @@ | |||
"WebServerToolWindowPanel.toolwindow.show.date": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.permissions": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.size": "false", | |||
"last_opened_file_path": "D:/tuoheng/code/AIlib2", | |||
"last_opened_file_path": "D:/tuoheng/codenew/tuoheng_alg", | |||
"node.js.detected.package.eslint": "true", | |||
"node.js.detected.package.tslint": "true", | |||
"node.js.selected.package.eslint": "(autodetect)", | |||
"node.js.selected.package.tslint": "(autodetect)", | |||
"project.structure.last.edited": "模块", | |||
"project.structure.last.edited": "SDK", | |||
"project.structure.proportion": "0.15", | |||
"project.structure.side.proportion": "0.2816092" | |||
"project.structure.side.proportion": "0.2816092", | |||
"settings.editor.selected.configurable": "preferences.pluginManager", | |||
"vue.rearranger.settings.migration": "true" | |||
} | |||
}]]></component> | |||
<component name="RecentsManager"> | |||
<key name="CopyFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\enums" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\.idea" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\demo" /> | |||
<recent name="D:\tuoheng\code\tuoheng_alg" /> | |||
<recent name="D:\tuoheng\code\tuoheng_alg\concurrency" /> | |||
<recent name="D:\tuoheng\code\tuoheng_alg\enums" /> | |||
<recent name="D:\work\alg_new\tuoheng_alg\enums" /> | |||
<recent name="D:\work\alg_new\tuoheng_alg\util" /> | |||
</key> | |||
<key name="MoveFile.RECENT_KEYS"> | |||
<recent name="D:\work\alg_new\tuoheng_alg\test\image" /> | |||
@@ -159,21 +189,21 @@ | |||
<recent name="D:\work\alg\tuoheng_alg\image" /> | |||
</key> | |||
</component> | |||
<component name="RunManager" selected="Python.ImgBaiduSdk"> | |||
<configuration name="ImgBaiduSdk" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<component name="RunManager" selected="Python.Test (2)"> | |||
<configuration name="协程笔记" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/util" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/协程" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/util/ImgBaiduSdk.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/协程/协程笔记.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -182,20 +212,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<configuration name="Test (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/线程" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/线程/Test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -204,20 +234,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="ffmpeg12" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="Test (2)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/路径" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/ffmpeg11/ffmpeg12.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/路径/Test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -226,20 +256,42 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="ffmpeg13" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="Test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/validate" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/validate/Test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/ffmpeg11/ffmpeg13.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -277,13 +329,13 @@ | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/str" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/集合" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/str/test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/集合/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -295,17 +347,19 @@ | |||
<list> | |||
<item itemvalue="Python.editImage" /> | |||
<item itemvalue="Python.mysqltest" /> | |||
<item itemvalue="Python.Test (2)" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.ffmpeg12" /> | |||
<item itemvalue="Python.ffmpeg13" /> | |||
<item itemvalue="Python.ImgBaiduSdk" /> | |||
<item itemvalue="Python.Test (1)" /> | |||
<item itemvalue="Python.协程笔记" /> | |||
<item itemvalue="Python.Test" /> | |||
</list> | |||
<recent_temporary> | |||
<list> | |||
<item itemvalue="Python.Test (2)" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.ffmpeg13" /> | |||
<item itemvalue="Python.ffmpeg12" /> | |||
<item itemvalue="Python.ImgBaiduSdk" /> | |||
<item itemvalue="Python.Test (1)" /> | |||
<item itemvalue="Python.协程笔记" /> | |||
<item itemvalue="Python.Test" /> | |||
</list> | |||
</recent_temporary> | |||
</component> | |||
@@ -423,7 +477,30 @@ | |||
<workItem from="1679013228398" duration="17427000" /> | |||
<workItem from="1679039229464" duration="9832000" /> | |||
<workItem from="1679118299629" duration="17688000" /> | |||
<workItem from="1679289612196" duration="5380000" /> | |||
<workItem from="1679289612196" duration="5820000" /> | |||
<workItem from="1679297557058" duration="1333000" /> | |||
<workItem from="1679359163976" duration="1997000" /> | |||
<workItem from="1679444345433" duration="1190000" /> | |||
<workItem from="1679633582926" duration="1979000" /> | |||
<workItem from="1679876991879" duration="1396000" /> | |||
<workItem from="1680136325711" duration="24199000" /> | |||
<workItem from="1680250415691" duration="1353000" /> | |||
<workItem from="1680486532876" duration="8132000" /> | |||
<workItem from="1680502907387" duration="10960000" /> | |||
<workItem from="1680527121128" duration="3411000" /> | |||
<workItem from="1680577929248" duration="5512000" /> | |||
<workItem from="1680741123267" duration="14728000" /> | |||
<workItem from="1680826640176" duration="21580000" /> | |||
<workItem from="1680914030055" duration="14971000" /> | |||
<workItem from="1680952718810" duration="967000" /> | |||
<workItem from="1681086404430" duration="27714000" /> | |||
<workItem from="1681170492379" duration="39568000" /> | |||
<workItem from="1681220684404" duration="2140000" /> | |||
<workItem from="1681258113350" duration="32577000" /> | |||
<workItem from="1681301257655" duration="429000" /> | |||
<workItem from="1681344786746" duration="5993000" /> | |||
<workItem from="1681363389283" duration="5626000" /> | |||
<workItem from="1681431288218" duration="1010000" /> | |||
</task> | |||
<servers /> | |||
</component> | |||
@@ -454,6 +531,11 @@ | |||
<line>2</line> | |||
<option name="timeStamp" value="2" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py</url> | |||
<line>341</line> | |||
<option name="timeStamp" value="3" /> | |||
</line-breakpoint> | |||
</breakpoints> | |||
</breakpoint-manager> | |||
</component> | |||
@@ -462,42 +544,46 @@ | |||
<select /> | |||
</component> | |||
<component name="com.intellij.coverage.CoverageDataManagerImpl"> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="字典 覆盖结果" MODIFIED="1668089121018" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/字典" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo1.coverage" NAME="demo1 覆盖结果" MODIFIED="1680162882599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/demo" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1675048794635" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg12.coverage" NAME="ffmpeg12 覆盖结果" MODIFIED="1675391366890" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa.coverage" NAME="aa 覆盖结果" MODIFIED="1670490313339" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/chenyukun/algSch/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$editImage.coverage" NAME="editImage 覆盖结果" MODIFIED="1678348350574" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/editimage" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="协程1 覆盖结果" MODIFIED="1667866542122" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681431899512" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master Coverage Results" MODIFIED="1663403978477" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="协程笔记 覆盖结果" MODIFIED="1680926972744" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="协程1 覆盖结果" MODIFIED="1667866542122" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$3.coverage" NAME="协程3 覆盖结果" MODIFIED="1668147029048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImgBaiduSdk.coverage" NAME="ImgBaiduSdk 覆盖结果" MODIFIED="1678355024003" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$editImage.coverage" NAME="editImage 覆盖结果" MODIFIED="1678348350574" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/editimage" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$TimeUtils.coverage" NAME="TimeUtils Coverage Results" MODIFIED="1661222768678" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="协程2 覆盖结果" MODIFIED="1668066168428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng/algSch/test/协程/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImgBaiduSdk.coverage" NAME="ImgBaiduSdk 覆盖结果" MODIFIED="1678355024003" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master 覆盖结果" MODIFIED="1680503755624" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__1_.coverage" NAME="Test (1) 覆盖结果" MODIFIED="1681199611277" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1681199625806" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1680847539455" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/validate" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test2.coverage" NAME="test2 覆盖结果" MODIFIED="1669178077956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa.coverage" NAME="aa 覆盖结果" MODIFIED="1670490313339" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/chenyukun/algSch/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils__1_.coverage" NAME="KafkaUtils (1) Coverage Results" MODIFIED="1663464961001" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="协程2 覆盖结果" MODIFIED="1668066168428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng/algSch/test/协程/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
</component> | |||
</project> |
@@ -1,11 +1,13 @@ | |||
# 配置文件名称 | |||
APPLICATION_CONFIG="dsp_application.yml" | |||
APPLICATION_CONFIG = "dsp_application.yml" | |||
# 编码格式 | |||
UTF_8="utf-8" | |||
UTF_8 = "utf-8" | |||
# 文件读模式 | |||
R='r' | |||
R = 'r' | |||
# 进度100% | |||
success_progess="1.0000" | |||
success_progess = "1.0000" | |||
# 拉流每帧图片缩小宽度大小限制, 大于1400像素缩小一半, 小于1400像素不变 | |||
width = 1400 |
@@ -16,6 +16,9 @@ class FeedbackThread(Thread): | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
''' | |||
阻塞获取反馈消息 | |||
''' | |||
def getFeedback(self): | |||
return self.fbQueue.get() | |||
@@ -40,4 +43,4 @@ class FeedbackThread(Thread): | |||
time.sleep(1) | |||
except Exception as e: | |||
logger.exception("问题反馈异常:{}, requestId:{}", e, feedback.get("request_id")) | |||
logger.info("问题反馈进程执行完成") | |||
logger.info("问题反馈线程执行完成") |
@@ -1,108 +0,0 @@ | |||
import asyncio | |||
import time | |||
from threading import Thread | |||
from loguru import logger | |||
import cv2 | |||
from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
import numpy as np | |||
from PIL import Image | |||
class FileUpdate(Thread): | |||
def __init__(self, fbQueue, content, msg, imageQueue, mode_service): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
self.imageQueue = imageQueue | |||
self.mode_service = mode_service | |||
self.msg = msg | |||
# # 获取下一个事件 | |||
def getImageQueue(self): | |||
eBody = None | |||
try: | |||
eBody = self.imageQueue.get() | |||
except Exception as e: | |||
pass | |||
return eBody | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def build_image_name(base_dir, time_now, current_frame, last_frame, random_num, mode_type, requestId, image_type): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
image_name = image_format.format( | |||
base_dir=base_dir, | |||
time_now=time_now, | |||
current_frame=current_frame, | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
requestId=requestId, | |||
image_type=image_type) | |||
return image_name | |||
class ImageFileUpdate(FileUpdate): | |||
def run(self): | |||
logger.info("开始启动图片上传线程, requestId:{}", self.msg.get("request_id")) | |||
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id")) | |||
aliyunOssSdk.get_oss_bucket() | |||
loop = asyncio.new_event_loop() | |||
asyncio.set_event_loop(loop) | |||
while True: | |||
try: | |||
image_msg = self.getImageQueue() | |||
if image_msg is not None and len(image_msg) > 0: | |||
image_dict = image_msg.get("image") | |||
command = image_msg.get("command") | |||
if command == 'stop': | |||
logger.info("触发文件上传停止指令!") | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
# 图片帧数编码 | |||
or_result, or_image = cv2.imencode(".jpg", image_dict.get("or_frame")) | |||
ai_result, ai_image = cv2.imencode(".jpg", image_dict.get("ai_frame")) | |||
# 定义上传图片名称 | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
# 图片名称待后期修改 | |||
or_image_name = build_image_name(self.msg.get('results_base_dir'), time_now, | |||
str(image_dict.get("current_frame")), | |||
str(image_dict.get("last_frame")), | |||
random_num, | |||
image_dict.get("mode_service"), | |||
self.msg.get('request_id'), "OR") | |||
ai_image_name = build_image_name(self.msg.get('results_base_dir'), time_now, | |||
str(image_dict.get("current_frame")), | |||
str(image_dict.get("last_frame")), | |||
random_num, | |||
image_dict.get("mode_service"), | |||
self.msg.get('request_id'), "AI") | |||
task = loop.create_task(aliyunOssSdk.upload_file(or_image_name, or_image.tobytes())) | |||
task1 = loop.create_task(aliyunOssSdk.upload_file(ai_image_name, ai_image.tobytes())) | |||
loop.run_until_complete(asyncio.wait([task, task1])) | |||
# 上传原图片 | |||
# aliyunOssSdk.upload_file(or_image_name, Image.fromarray(np.uint8(or_image)).tobytes()) | |||
# aliyunOssSdk.upload_file(ai_image_name, Image.fromarray(np.uint8(ai_image)).tobytes()) | |||
# 发送kafka消息 | |||
self.sendResult({"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.mode_service, "", "", | |||
image_dict.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
image_dict.get("model_type_code"), | |||
image_dict.get("model_detection_code"), | |||
TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id")) | |||
loop.close() | |||
logger.info("结束图片上传线程, requestId:{}", self.msg.get("request_id")) |
@@ -0,0 +1,157 @@ | |||
import asyncio | |||
from concurrent.futures import ThreadPoolExecutor | |||
from threading import Thread | |||
from loguru import logger | |||
import cv2 | |||
from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
class FileUpload(Thread): | |||
def __init__(self, fbQueue, content, msg, imageQueue, analyse_type): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
self.imageQueue = imageQueue | |||
self.analyse_type = analyse_type | |||
self.msg = msg | |||
self.similarity = self.content["service"]["filter"]["similarity"] | |||
self.picture_similarity = self.content["service"]["filter"]["picture_similarity"] | |||
self.frame_score = float(self.content["service"]["frame_score"]) | |||
self.frame_step = int(self.content["service"]["filter"]["frame_step"]) | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def build_image_name(self, current_frame, last_frame, mode_type, image_type): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_name = image_format.format( | |||
base_dir=self.msg.get('results_base_dir'), | |||
time_now=time_now, | |||
current_frame=current_frame, | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
requestId=self.msg.get('request_id'), | |||
image_type=image_type) | |||
return image_name | |||
''' | |||
图片上传线程 | |||
''' | |||
class ImageFileUpload(FileUpload): | |||
def handle_image(self, high_score_image, frame_all): | |||
flag = True | |||
if self.picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame_all.get("frame")) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity = 1 - dist * 1.0 / 64 | |||
if similarity >= self.similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_all.get("cct_frame") - high_score_image.get("current_frame") | |||
if diff_frame_num < self.frame_step: | |||
flag = False | |||
det_result = frame_all.get("det_xywh") | |||
if flag and det_result is not None and len(det_result) > 0: | |||
model_info = [] | |||
# 封装当前图片信息 | |||
num = 0 | |||
for c in list(det_result.keys()): | |||
mode_code_info = {"code": c, "target": []} | |||
det_xywh = det_result.get(c) | |||
det_num = 0 | |||
if det_xywh is not None and len(det_xywh) > 0: | |||
# [float(cls_c), xc,yc,w,h, float(conf_c),code] | |||
for d in det_xywh: | |||
score = d[5] # 得分 | |||
target = str(int(d[0])) # 检测目标 | |||
mode_code_info["target"].append(target) | |||
if score < self.frame_score: | |||
det_num += 1 | |||
model_info.append(mode_code_info) | |||
if det_num != len(det_xywh): | |||
num += 1 | |||
if num == 0: | |||
return None | |||
if len(model_info) > 0: | |||
high_score_image["or_frame"] = frame_all.get("frame") | |||
high_score_image["current_frame"] = frame_all.get("cct_frame") | |||
image_result = { | |||
"or_frame": frame_all.get("frame"), | |||
"ai_frame": frame_all.get("ai_frame"), | |||
"current_frame": frame_all.get("cct_frame"), | |||
"last_frame": frame_all.get("cct_frame") + self.frame_step, | |||
"progress": "", | |||
"mode_service": self.analyse_type, | |||
"model_info": model_info | |||
} | |||
return image_result | |||
return None | |||
def run(self): | |||
logger.info("启动图片上传线程, requestId:{}", self.msg.get("request_id")) | |||
# 初始化oss客户端 | |||
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id")) | |||
aliyunOssSdk.get_oss_bucket() | |||
high_score_image = {} | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
try: | |||
while True: | |||
try: | |||
# 获取队列中的消息 | |||
image_msg = self.imageQueue.get() | |||
if image_msg is not None and len(image_msg) > 0: | |||
image_dict = image_msg.get("image") | |||
command = image_msg.get("command") | |||
if command == 'stop': | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
image_resut = self.handle_image(high_score_image, image_dict) | |||
if image_resut is not None: | |||
# 图片帧数编码 | |||
or_result, or_image = cv2.imencode(".jpg", image_resut.get("or_frame")) | |||
ai_result, ai_image = cv2.imencode(".jpg", image_resut.get("ai_frame")) | |||
# 图片名称待后期修改 | |||
or_image_name = self.build_image_name(str(image_resut.get("current_frame")), | |||
str(image_resut.get("last_frame")), | |||
image_resut.get("mode_service"), | |||
"OR") | |||
ai_image_name = self.build_image_name(str(image_resut.get("current_frame")), | |||
str(image_resut.get("last_frame")), | |||
image_resut.get("mode_service"), | |||
"AI") | |||
or_future = t.submit(aliyunOssSdk.sync_upload_file, or_image_name, | |||
or_image.tobytes()) | |||
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name, | |||
ai_image.tobytes()) | |||
or_future.result() | |||
ai_future.result() | |||
# 发送kafka消息 | |||
self.sendResult({"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.analyse_type, "", "", | |||
image_resut.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
image_resut.get( | |||
"model_info"), | |||
TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id")) | |||
finally: | |||
high_score_image.clear() | |||
t.shutdown(wait=True) # wait=True等待所有线程完成 | |||
logger.info("停止图片上传线程, requestId:{}", self.msg.get("request_id")) |
@@ -9,12 +9,12 @@ from entity.FeedBack import message_feedback | |||
class Heartbeat(Thread): | |||
def __init__(self, fbQueue, hbQueue, request_id, mode_service): | |||
def __init__(self, fbQueue, hbQueue, request_id, analyse_type): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.request_id = request_id | |||
self.mode_service = mode_service | |||
self.analyse_type = analyse_type | |||
self.progress = "0.0000" | |||
def getHbQueue(self): | |||
@@ -35,7 +35,7 @@ class Heartbeat(Thread): | |||
def sendhbMessage(self, analysisStatus): | |||
self.sendResult({"feedback": message_feedback(self.request_id, | |||
analysisStatus, | |||
self.mode_service, | |||
self.analyse_type, | |||
progress=self.progress, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
@@ -3,23 +3,24 @@ import time | |||
from queue import Queue | |||
from threading import Thread | |||
import GPUtil | |||
from loguru import logger | |||
from entity.FeedBack import recording_feedback | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
from exception.CustomerException import ServiceException | |||
from util import GPUtils | |||
from util.Cv2Utils import Cv2Util | |||
class PullStreamThread(Thread): | |||
def __init__(self, msg, content, pullQueue, fbQueue, gpu_ids): | |||
def __init__(self, msg, content, pullQueue, fbQueue): | |||
super().__init__() | |||
self.command = Queue() | |||
self.msg = msg | |||
self.content = content | |||
self.pullQueue = pullQueue | |||
self.gpu_ids = gpu_ids | |||
self.fbQueue = fbQueue | |||
self.recording_pull_stream_timeout = int(self.content["service"]["recording_pull_stream_timeout"]) | |||
@@ -44,14 +45,15 @@ class RecordingPullStreamThread(PullStreamThread): | |||
cv2tool = None | |||
try: | |||
logger.info("录屏任务, 开启拉流, requestId:{}", self.msg.get("request_id")) | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), content=self.content, gpu_ids=self.gpu_ids) | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), content=self.content, | |||
gpu_ids=gpu_ids) | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
start_time_2 = time.time() | |||
concurrent_frame = 1 | |||
cv2tool.get_recording_video_info() | |||
cv2tool.recording_pull_p() | |||
while True: | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
@@ -60,17 +62,11 @@ class RecordingPullStreamThread(PullStreamThread): | |||
self.sendPullQueue({"status": "2"}) | |||
break | |||
if self.pullQueue.full(): | |||
time.sleep(0.05) | |||
time.sleep(0.1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
# 如果没有拉倒流, 每30秒发送一次提示消息,告诉用户拉流地址无法拉取视频流 | |||
if cv2_init_num % 60 == 0: | |||
self.fbQueue.put({"recording": recording_feedback(self.msg.get("request_id"), | |||
RecordingStatus.RECORDING_RUNNING.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])}) | |||
pull_stream_init_timeout = time.time() - start_time | |||
if pull_stream_init_timeout > self.recording_pull_stream_timeout: | |||
logger.info("录屏拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, | |||
@@ -87,16 +83,19 @@ class RecordingPullStreamThread(PullStreamThread): | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id")) | |||
pull_stream_read_timeout = time.time() - start_time_2 | |||
if init_pull_num % 60 == 0: | |||
self.fbQueue.put({"recording": recording_feedback(self.msg.get("request_id"), | |||
RecordingStatus.RECORDING_RUNNING.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])}) | |||
if pull_stream_read_timeout > self.recording_pull_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
if cv2tool.all_frames is not None and len(cv2tool.all_frames) > 0: | |||
if concurrent_frame < cv2tool.all_frames - 100: | |||
logger.info("流异常结束:requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) | |||
break | |||
logger.info("拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
break | |||
init_pull_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.recording_pull_p() |
@@ -4,9 +4,8 @@ from multiprocessing import Process, Queue | |||
from loguru import logger | |||
from concurrency.FileUpdateThread import ImageFileUpdate | |||
from concurrency.FileUploadThread import ImageFileUpload | |||
from concurrency.HeartbeatThread import Heartbeat | |||
from enums.AnalysisTypeEnum import AnalysisType | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import LogUtils | |||
@@ -14,7 +13,7 @@ from util.Cv2Utils import Cv2Util | |||
class PullVideoStreamProcess(Process): | |||
def __init__(self, msg, content, pullQueue, fbQueue, hbQueue, imageQueue): | |||
def __init__(self, msg, content, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type): | |||
super().__init__() | |||
self.command = Queue() | |||
self.msg = msg | |||
@@ -23,12 +22,11 @@ class PullVideoStreamProcess(Process): | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.imageQueue = imageQueue | |||
self.step = int(self.content["service"]["frame_step"]) | |||
self.analyse_type = analyse_type | |||
self.pull_stream_timeout = int(self.content["service"]["cv2_pull_stream_timeout"]) | |||
self.read_stream_timeout = int(self.content["service"]["cv2_read_stream_timeout"]) | |||
self.service_timeout = int(self.content["service"]["timeout"]) | |||
def getCommand(self): | |||
eBody = None | |||
try: | |||
@@ -46,80 +44,87 @@ class PullVideoStreamProcess(Process): | |||
def sendImageResult(self, result): | |||
self.imageQueue.put(result) | |||
def start_File_upload(self): | |||
imageFileUpload = ImageFileUpload(self.fbQueue, self.content, self.msg, self.imageQueue, self.analyse_type) | |||
imageFileUpload.setDaemon(True) | |||
imageFileUpload.start() | |||
return imageFileUpload | |||
def start_heartbeat(self): | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), self.analyse_type) | |||
hb.setDaemon(True) | |||
hb.start() | |||
return hb | |||
def check(self, start_time, imageFileUpload, hb): | |||
create_task_time = time.time() - start_time | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
# 检测图片上传线程是否正常运行 | |||
if not imageFileUpload.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
# 检测心跳线程是否正常运行 | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
def run(self): | |||
LogUtils.init_log(self.content) | |||
cv2tool = None | |||
imageFileUpdate = None | |||
imageFileUpload = None | |||
hb = None | |||
try: | |||
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.ONLINE.value) | |||
imageFileUpdate.setDaemon(True) | |||
imageFileUpdate.start() | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), AnalysisType.ONLINE.value) | |||
hb.setDaemon(True) | |||
hb.start() | |||
logger.info("开启视频拉流线程, requestId:{}", self.msg.get("request_id")) | |||
# 加载日志框架 | |||
LogUtils.init_log(self.content) | |||
logger.info("开启视频拉流进程, requestId:{}", self.msg.get("request_id")) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload() | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat() | |||
# 初始化拉流工具类 | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id")) | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
start_time_1 = time.time() | |||
start_time_2 = time.time() | |||
pull_stream_start_time = time.time() | |||
pull_stream_read_start_time = time.time() | |||
concurrent_frame = 1 | |||
stop_imageFile = False | |||
stop_pull_stream_step = False | |||
while True: | |||
end_time = time.time() | |||
create_task_time = int(end_time - start_time) | |||
# 检测任务执行是否超时 | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
# 检测图片上传线程是否正常运行 | |||
if not imageFileUpdate.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
# 检测心跳线程是否正常运行 | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop_pull_stream' == body.get("command"): | |||
# 检测任务执行是否超时、心跳线程是否正常、图片上传线程是否正常 | |||
self.check(start_time, imageFileUpload, hb) | |||
# 获取指令信息 | |||
command = self.getCommand() | |||
if command is not None and len(command) > 0: | |||
# 停止拉流 | |||
if 'stop_pull_stream' == command.get("command"): | |||
self.sendPullQueue({"status": "9"}) # 9 停止拉流 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
if 'stop_image' == body.get("command"): | |||
time.sleep(5) | |||
# 停止图片上传线程 | |||
if 'stop_image_hb' == command.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if 'stop_ex' == body.get("command"): | |||
time.sleep(5) | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
# self.pullQueue.cancel_join_thread() | |||
logger.error("拉流、图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if stop_imageFile: | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig() or cv2tool.pull_p is None: | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
pull_stream_init_timeout = time.time() - start_time_1 | |||
pull_stream_init_timeout = time.time() - pull_stream_start_time | |||
if pull_stream_init_timeout > self.pull_stream_timeout: | |||
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, | |||
self.msg.get("request_id")) | |||
@@ -128,27 +133,25 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
cv2_init_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.get_video_info() | |||
cv2tool.build_pull_p() | |||
continue | |||
start_time_1 = time.time() | |||
pull_stream_start_time = time.time() | |||
cv2_init_num = 1 | |||
frame = cv2tool.read() | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id")) | |||
pull_stream_read_timeout = time.time() - start_time_2 | |||
pull_stream_read_timeout = time.time() - pull_stream_read_start_time | |||
if pull_stream_read_timeout > self.read_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) # 3 超时 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
init_pull_num += 1 | |||
time.sleep(0.1) | |||
cv2tool.build_pull_p() | |||
continue | |||
init_pull_num = 1 | |||
start_time_2 = time.time() | |||
pull_stream_read_start_time = time.time() | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
"cct_frame": concurrent_frame, | |||
@@ -166,86 +169,66 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
finally: | |||
if cv2tool: | |||
cv2tool.close() | |||
if imageFileUpdate: | |||
if imageFileUpload: | |||
self.sendImageResult({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
self.hbQueue.put({"command": "stop"}) | |||
hb.join(60*3) | |||
hb.join(60 * 3) | |||
logger.info("实时拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
def run(self): | |||
LogUtils.init_log(self.content) | |||
cv2tool = None | |||
imageFileUpdate = None | |||
imageFileUpload = None | |||
hb = None | |||
try: | |||
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.OFFLINE.value) | |||
imageFileUpdate.setDaemon(True) | |||
imageFileUpdate.start() | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), AnalysisType.OFFLINE.value) | |||
hb.setDaemon(True) | |||
hb.start() | |||
# 初始化日志 | |||
LogUtils.init_log(self.content) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload() | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat() | |||
cv2tool = Cv2Util(pullUrl=self.msg.get('original_url'), requestId=self.msg.get("request_id")) | |||
cv2_init_num = 1 | |||
start_time = time.time() | |||
cv2tool.get_video_info() | |||
concurrent_frame = 1 | |||
stop_imageFile = False | |||
stop_pull_stream_step = False | |||
while True: | |||
end_time = time.time() | |||
create_task_time = int(end_time - start_time) | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
if not imageFileUpdate.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
self.check(start_time, imageFileUpload, hb) | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop_pull_stream' == body.get("command"): | |||
self.sendPullQueue({"status": "9"}) # 9 停止拉流 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
if 'stop_image' == body.get("command"): | |||
if 'stop_image_hb' == body.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.info("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if 'stop_ex' == body.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
# self.pullQueue.cancel_join_thread() | |||
logger.info("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if stop_imageFile: | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
if cv2tool.checkconfig() or cv2tool.pull_p is None: | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
if cv2_init_num > 3: | |||
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(1) | |||
cv2tool.get_video_info() | |||
cv2tool.build_pull_p() | |||
continue | |||
frame = cv2tool.read() | |||
if frame is None: | |||
@@ -255,13 +238,13 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
if concurrent_frame < cv2tool.all_frames - 100: | |||
logger.info("离线拉流异常结束:requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
cv2tool.close() | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
continue | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
@@ -280,10 +263,10 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
finally: | |||
if cv2tool is not None: | |||
cv2tool.close() | |||
if imageFileUpdate: | |||
if imageFileUpload: | |||
self.sendImageResult({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
self.hbQueue.put({"command": "stop"}) | |||
hb.join(60*3) | |||
hb.join(60 * 3) | |||
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id")) |
@@ -126,13 +126,13 @@ aliyun: | |||
prod: | |||
CateId: 1000468340 | |||
service: | |||
frame_step: 300 # 多少帧数步长之间获取一次分析图片 | |||
frame_score: 0.4 # 获取最低得分以上的图片 | |||
filter: | |||
# 识别相似度是否开启 | |||
picture_similarity: True | |||
# 相似度阀值 | |||
similarity: 0.65 | |||
frame_step: 80 | |||
timeout: 21600 # 一次识别任务超时时间,单位秒,默认6个小时 | |||
cv2_pull_stream_timeout: 3600 # 直播开始视频未推流超时时间 | |||
cv2_read_stream_timeout: 1800 # 直播读流中超时时间 | |||
@@ -153,7 +153,9 @@ baidu: | |||
APP_ID: 31096755 | |||
API_KEY: CiWrt4iyxOly36n3kR7utiAG | |||
SECRET_KEY: K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v | |||
# 模型相关配置 | |||
model: | |||
limit: 5 # 模型组合个数限制 | |||
# 日志设置 | |||
log: | |||
# 是否开启文件输出 True:开启 False:关闭 |
@@ -1,4 +1,7 @@ | |||
# -*- coding: utf-8 -*- | |||
import os | |||
import sys | |||
from service import Dispatcher | |||
# import torch | |||
@@ -7,5 +10,7 @@ from service import Dispatcher | |||
''' | |||
if __name__ == '__main__': | |||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||
# 获取主程序执行根路径 | |||
base_dir = os.path.dirname(os.path.realpath(sys.argv[0])) | |||
# torch.multiprocessing.set_start_method('spawn') | |||
Dispatcher.DispatcherService().start_service() | |||
Dispatcher.DispatcherService(base_dir).start_service() |
@@ -1,5 +1,5 @@ | |||
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="", | |||
model_type_code="", model_detection_code='', analyse_time="", analyse_results=""): | |||
model_info=[], analyse_time="", analyse_results=""): | |||
taskfb = {} | |||
results = [] | |||
result_msg = {} | |||
@@ -12,8 +12,7 @@ def message_feedback(requestId, status, type, error_code="", error_msg="", progr | |||
result_msg["original_url"] = original_url | |||
result_msg["sign_url"] = sign_url | |||
result_msg["analyse_results"] = analyse_results | |||
result_msg["model_type_code"] = model_type_code | |||
result_msg["model_detection_code"] = model_detection_code | |||
result_msg["model_info"] = model_info | |||
result_msg["analyse_time"] = analyse_time | |||
results.append(result_msg) | |||
taskfb["results"] = results |
@@ -1,6 +1,7 @@ | |||
from enum import Enum, unique | |||
''' | |||
ocr官方文档: https://ai.baidu.com/ai-doc/OCR/zkibizyhz | |||
官方文档: https://ai.baidu.com/ai-doc/VEHICLE/rk3inf9tj | |||
参数1: 异常编号 | |||
参数2: 异常英文描述 | |||
@@ -28,25 +29,25 @@ class BaiduSdkErrorEnum(Enum): | |||
GET_SERVICE_TOKEN_FAILED = (13, "Get service token failed", "获取token失败", 0, 2) | |||
IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 2) | |||
IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 1) | |||
APP_NOT_EXSITS_OR_CREATE_FAILED = (15, "app not exsits or create failed", "应用不存在或者创建失败", 0, 0) | |||
API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 5) | |||
API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 2) | |||
API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 5) | |||
API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 10) | |||
API_TOTAL_REQUEST_LIMIT_REACHED = (19, "Open api total request limit reached", "请求总量超限额!", 1, 2) | |||
INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数,token拉取失败", 0, 2) | |||
INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数,token拉取失败", 0, 1) | |||
ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效,token有效期为30天", 0, 2) | |||
ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效,token有效期为30天", 0, 1) | |||
ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期,token有效期为30天", 0, 2) | |||
ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期,token有效期为30天", 0, 1) | |||
INTERNAL_ERROR = (282000, "internal error", "服务器内部错误", 0, 1) | |||
INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 0) | |||
INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 1) | |||
NOT_ENOUGH_PARAM = (216101, "not enough param", "缺少必须的参数!", 0, 0) | |||
@@ -64,9 +65,9 @@ class BaiduSdkErrorEnum(Enum): | |||
IMAGE_SIZE_BASE_ERROR = (216203, "image size error", "上传的图片编码有误", 1, 0) | |||
RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 3) | |||
RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 2) | |||
DETECT_ERROR = (216634, "detect error", "检测错误", 2, 3) | |||
DETECT_ERROR = (216634, "detect error", "检测错误", 2, 2) | |||
MISSING_PARAMETERS = (282003, "missing parameters: {参数名}", "请求参数缺失", 0, 0) | |||
@@ -82,6 +83,14 @@ class BaiduSdkErrorEnum(Enum): | |||
TARGET_RECOGNIZE_ERROR = (282103, "target recognize error", "图片目标识别错误!", 2, 1) | |||
URLS_NOT_EXIT = (282110, "urls not exit", "URL参数不存在,请核对URL后再次提交!", 1, 0) | |||
URL_FORMAT_ILLEGAL = (282111, "url format illegal", "URL格式非法!", 1, 0) | |||
URL_DOWNLOAD_TIMEOUT = (282112, "url download timeout", "URL格式非法!", 1, 0) | |||
URL_RESPONSE_INVALID = (282113, "url response invalid", "URL返回无效参数!", 1, 0) | |||
URL_SIZE_ERROR = (282114, "url size error", "URL长度超过1024字节或为0!", 1, 0) | |||
REQUEST_ID_NOT_EXIST = (282808, "request id: xxxxx not exist", "request id xxxxx 不存在", 0, 0) | |||
@@ -153,6 +162,10 @@ BAIDUERRORDATA = { | |||
BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR, | |||
BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT.value[0]: BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT, | |||
BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT.value[0]: BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT, | |||
BaiduSdkErrorEnum.URLS_NOT_EXIT.value[0]: BaiduSdkErrorEnum.URLS_NOT_EXIT, | |||
BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL.value[0]: BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL, | |||
BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT.value[0]: BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT, | |||
BaiduSdkErrorEnum.URL_RESPONSE_INVALID.value[0]: BaiduSdkErrorEnum.URL_RESPONSE_INVALID | |||
} | |||
@unique |
@@ -27,7 +27,7 @@ class ExceptionType(Enum): | |||
ILLEGAL_PARAMETER_FORMAT = ("SP010", "非法参数格式!") | |||
PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道被占用, 请稍后再试!") | |||
PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道可能被占用, 请稍后再试!") | |||
VIDEO_RESOLUTION_EXCEPTION = ("SP012", "不支持该分辨率类型的视频,请切换分辨率再试!") | |||
@@ -39,6 +39,30 @@ class ExceptionType(Enum): | |||
OR_VIDEO_DO_NOT_EXEIST_EXCEPTION = ("SP016", "原视频不存在!") | |||
MODEL_LOADING_EXCEPTION = ("SP017", "模型加载异常!") | |||
MODEL_ANALYSE_EXCEPTION = ("SP018", "算法模型分析异常!") | |||
AI_MODEL_CONFIG_EXCEPTION = ("SP019", "模型配置不能为空!") | |||
AI_MODEL_GET_CONFIG_EXCEPTION = ("SP020", "获取模型配置异常, 请检查模型配置是否正确!") | |||
MODEL_GROUP_LIMIT_EXCEPTION = ("SP021", "模型组合个数超过限制!") | |||
MODEL_NOT_SUPPORT_VIDEO_EXCEPTION = ("SP022", "%s不支持视频识别!") | |||
MODEL_NOT_SUPPORT_IMAGE_EXCEPTION = ("SP023", "%s不支持图片识别!") | |||
THE_DETECTION_TARGET_CANNOT_BE_EMPTY = ("SP024", "检测目标不能为空!") | |||
URL_ADDRESS_ACCESS_FAILED = ("SP025", "URL地址访问失败, 请检测URL地址是否正确!") | |||
UNIVERSAL_TEXT_RECOGNITION_FAILED = ("SP026", "识别失败!") | |||
COORDINATE_ACQUISITION_FAILED = ("SP027", "飞行坐标识别异常!") | |||
SERVICE_COMMON_EXCEPTION = ("SP997", "公共服务异常!") | |||
NO_GPU_RESOURCES = ("SP998", "暂无GPU资源可以使用,请稍后再试!") | |||
SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常!") |
@@ -1,32 +1,46 @@ | |||
from enum import Enum, unique | |||
# 异常枚举 | |||
''' | |||
参数说明 | |||
1. 编号 | |||
2. 模型编号 | |||
3. 模型名称 | |||
4. 选用的模型名称 | |||
5. 是否可以参与多个模型组合调用 | |||
0: 视频、图片模型组合都支持 | |||
1: 只支持视频模型之间的组合 | |||
2: 只支持图片模型之间的组合 | |||
''' | |||
@unique | |||
class ModelType(Enum): | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river') | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', 0) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', 0) | |||
# TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'road') | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2') | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', 0) | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'road') | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, 2) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None, 2) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', 0) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle') | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', 0) | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian') | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', 0) | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire') | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', 0) | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer') | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', 0) | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad') | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship', 0) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship') | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, 2) | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None) | |||
# TRAFFICACCIDENT_MODEL = ("13", "013", "交通事故模型", 'trafficAccident', 0) | |||
def checkCode(code): | |||
for model in ModelType: |
@@ -8,9 +8,12 @@ from loguru import logger | |||
class ServiceException(Exception): # 继承异常类 | |||
def __init__(self, code, msg): | |||
def __init__(self, code, msg, desc=None): | |||
self.code = code | |||
self.msg = msg | |||
if desc is None: | |||
self.msg = msg | |||
else: | |||
self.msg = msg % desc | |||
def __str__(self): | |||
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg) |
@@ -1,7 +1,6 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import GPUtil | |||
from cerberus import Validator | |||
from concurrency.FeedbackThread import FeedbackThread | |||
from entity.FeedBack import message_feedback, recording_feedback | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
@@ -24,127 +23,121 @@ from util import GPUtils | |||
class DispatcherService: | |||
# 初始化 | |||
def __init__(self): | |||
# 获取DSP环境所需要的配置 | |||
self.content = YmlUtils.getConfigs() | |||
# 初始化日志 | |||
def __init__(self, base_dir): | |||
##################################### 初始化alg相关配置 ##################################### | |||
self.base_dir = base_dir | |||
self.content = YmlUtils.getConfigs(base_dir) | |||
self.content['base_dir'] = base_dir | |||
self.feedbackThread = None | |||
###################################### 初始化日志框架 ###################################### | |||
LogUtils.init_log(self.content) | |||
# 检查视频保存地址,不存在创建文件夹,迁移初始化 | |||
#################################### 初始化视频保存文件夹 #################################### | |||
FileUtils.create_dir_not_exist(self.content["video"]["file_path"]) | |||
# 记录当前正在执行的实时流分析任务 | |||
self.onlineProcesses = {} | |||
# 记录当前正在执行的离线视频分析任务 | |||
self.offlineProcesses = {} | |||
# 记录当前正在执行的图片分析任务 | |||
self.photoProcesses = {} | |||
# 记录当前录屏任务 | |||
self.recordingProcesses = {} | |||
# 算法反馈队列 | |||
###################################### 创建任务记录字典 ###################################### | |||
self.onlineProcesses = {} # 记录当前正在执行的实时流分析任务 | |||
self.offlineProcesses = {} # 记录当前正在执行的离线视频分析任务 | |||
self.photoProcesses = {} # 记录当前正在执行的图片分析任务 | |||
self.recordingProcesses = {} # 记录当前录屏任务 | |||
self.listeningProcesses = [self.onlineProcesses, self.offlineProcesses, self.photoProcesses, | |||
self.recordingProcesses] | |||
######################################## 反馈队列 ######################################## | |||
self.fbQueue = Queue() | |||
###################################### 监听topic信息 ###################################### | |||
self.online_topic = self.content["kafka"]["topic"]["dsp-alg-online-tasks-topic"] | |||
self.offline_topic = self.content["kafka"]["topic"]["dsp-alg-offline-tasks-topic"] | |||
self.image_topic = self.content["kafka"]["topic"]["dsp-alg-image-tasks-topic"] | |||
self.recording_task_topic = self.content["kafka"]["topic"]["dsp-recording-task-topic"] | |||
self.topics = [self.online_topic, self.offline_topic, self.image_topic, self.recording_task_topic] | |||
self.analysisType = { | |||
self.online_topic: (AnalysisType.ONLINE.value, lambda x: self.online(x)), | |||
self.offline_topic: (AnalysisType.OFFLINE.value, lambda x: self.offline(x)), | |||
self.image_topic: (AnalysisType.IMAGE.value, lambda x: self.image(x)), | |||
self.recording_task_topic: (AnalysisType.RECORDING.value, lambda x: self.recording(x)) | |||
self.online_topic: (AnalysisType.ONLINE.value, | |||
lambda x: self.online(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
self.offline_topic: (AnalysisType.OFFLINE.value, | |||
lambda x: self.offline(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
self.image_topic: (AnalysisType.IMAGE.value, | |||
lambda x: self.image(x), | |||
lambda x, y, z, t: self.identify_method(x, y, z, t)), | |||
self.recording_task_topic: (AnalysisType.RECORDING.value, | |||
lambda x: self.recording(x), | |||
lambda x, y, z, t: self.recording_method(x, y, z, t)) | |||
} | |||
# 服务调用启动方法 | |||
def start_service(self): | |||
# 启动问题反馈线程 | |||
feedbackThread = self.start_feedback_thread() | |||
# 初始化kafka监听者 | |||
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.content, topics=self.topics) | |||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
# 循环消息处理 | |||
while True: | |||
try: | |||
# 检查任务进程运行情况,去除活动的任务 | |||
self.check_process_task() | |||
# 校验问题反馈线程是否正常 | |||
if not feedbackThread.is_alive(): | |||
logger.error("======================问题反馈线程异常停止======================") | |||
break | |||
self.start_feedback_thread() | |||
msg = customerKafkaConsumer.poll() | |||
if msg is not None and len(msg) > 0: | |||
for k, v in msg.items(): | |||
for m in v: | |||
message = m.value | |||
analysisType = self.analysisType.get(m.topic)[0] | |||
# 录屏逻辑 | |||
print("aaaaaaaaaaaaaaaaaaaaaaaaaa", analysisType) | |||
if analysisType == AnalysisType.RECORDING.value: | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
if analysisType is not None: | |||
recording = {"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)} | |||
self.fbQueue.put(recording) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
recording = { | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])} | |||
self.fbQueue.put(recording) | |||
else: | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
self.analysisType.get(m.topic)[2](customerKafkaConsumer, m, message, analysisType) | |||
else: | |||
time.sleep(1) | |||
except Exception as e: | |||
logger.exception("主线程异常:", e) | |||
def checkGPU(self, msgId): | |||
gpu_ids = None | |||
while True: | |||
GPUtil.showUtilization() | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0: | |||
logger.warning("暂无可用GPU资源,5秒后重试, 可用gpu数: {}, msgId: {}", len(gpu_ids), msgId) | |||
time.sleep(5) | |||
continue | |||
else: | |||
break | |||
return gpu_ids | |||
def identify_method(self, customerKafkaConsumer, m, message, analysisType): | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info( | |||
"当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
def recording_method(self, customerKafkaConsumer, m, message, analysisType): | |||
try: | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info( | |||
"当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
self.fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}) | |||
# 开启实时进程 | |||
def startOnlineProcess(self, msg, gpu_ids): | |||
@@ -152,7 +145,8 @@ class DispatcherService: | |||
if self.onlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.ONLINE.value} | |||
# 创建在线识别进程并启动 | |||
oirp = OnlineIntelligentRecognitionProcess(cfg) | |||
oirp.start() | |||
@@ -169,18 +163,10 @@ class DispatcherService: | |||
# 检查实时、离线进程任务运行情况,去除不活动的任务 | |||
def check_process_task(self): | |||
for requestId in list(self.onlineProcesses.keys()): | |||
if not self.onlineProcesses[requestId].is_alive(): | |||
del self.onlineProcesses[requestId] | |||
for requestId in list(self.offlineProcesses.keys()): | |||
if not self.offlineProcesses[requestId].is_alive(): | |||
del self.offlineProcesses[requestId] | |||
for requestId in list(self.photoProcesses.keys()): | |||
if not self.photoProcesses[requestId].is_alive(): | |||
del self.photoProcesses[requestId] | |||
for requestId in list(self.recordingProcesses.keys()): | |||
if not self.recordingProcesses[requestId].is_alive(): | |||
del self.recordingProcesses[requestId] | |||
for process in self.listeningProcesses: | |||
for requestId in list(process.keys()): | |||
if not process[requestId].is_alive(): | |||
del process[requestId] | |||
# 开启离线进程 | |||
def startOfflineProcess(self, msg, gpu_ids): | |||
@@ -188,7 +174,8 @@ class DispatcherService: | |||
if self.offlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.OFFLINE.value} | |||
# 创建在线识别进程并启动 | |||
ofirp = OfflineIntelligentRecognitionProcess(cfg) | |||
ofirp.start() | |||
@@ -205,128 +192,56 @@ class DispatcherService: | |||
# 开启图片分析进程 | |||
def startImageProcess(self, msg, gpu_ids): | |||
# 相同的requestId不在执行 | |||
if self.photoProcesses.get(msg.get("request_id")): | |||
pp = self.photoProcesses.get(msg.get("request_id")) | |||
if pp is not None: | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids, | |||
"analyse_type": AnalysisType.IMAGE.value} | |||
# 创建在线识别进程并启动 | |||
imagep = PhotosIntelligentRecognitionProcess(cfg) | |||
imagep.start() | |||
self.photoProcesses[msg.get("request_id")] = imagep | |||
# 校验实时kafka消息 | |||
def check_online_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
command = msg.get("command") | |||
models = msg.get("models") | |||
pull_url = msg.get("pull_url") | |||
push_url = msg.get("push_url") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == "start" and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == "start" and pull_url is None: | |||
return False | |||
if command == "start" and push_url is None: | |||
return False | |||
if command == "start" and results_base_dir is None: | |||
return False | |||
return True | |||
# 校验实时kafka消息 | |||
def check_offline_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
models = msg.get("models") | |||
command = msg.get("command") | |||
original_url = msg.get("original_url") | |||
original_type = msg.get("original_type") | |||
push_url = msg.get("push_url") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == 'start' and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == 'start' and original_url is None: | |||
return False | |||
if command == 'start' and push_url is None: | |||
return False | |||
if command == 'start' and original_type is None: | |||
return False | |||
if command == 'start' and results_base_dir is None: | |||
return False | |||
return True | |||
# 校验图片kafka消息 | |||
def check_image_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
models = msg.get("models") | |||
command = msg.get("command") | |||
image_urls = msg.get("image_urls") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == 'start' and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == 'start' and image_urls is None: | |||
return False | |||
if command == 'start' and results_base_dir is None: | |||
return False | |||
return True | |||
''' | |||
开启问题反馈线程 | |||
校验kafka消息 | |||
''' | |||
def check_msg(self, msg): | |||
try: | |||
v = Validator(schema, allow_unknown=True) | |||
result = v.validate(msg) | |||
if not result: | |||
logger.error("参数校验异常: {}", v.errors) | |||
if msg.get("request_id") is not None and len(msg.get("request_id")) > 0: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], v.errors) | |||
return result | |||
except ServiceException as s: | |||
raise s | |||
except Exception as e: | |||
logger.exception("参数校验异常: {}", e) | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
def start_feedback_thread(self): | |||
feedbackThread = FeedbackThread(self.fbQueue, self.content) | |||
feedbackThread.setDaemon(True) | |||
feedbackThread.start() | |||
return feedbackThread | |||
if self.feedbackThread is None or not self.feedbackThread.is_alive(): | |||
self.feedbackThread = FeedbackThread(self.fbQueue, self.content) | |||
self.feedbackThread.setDaemon(True) | |||
self.feedbackThread.start() | |||
def online(self, message): | |||
check_result = self.check_online_msg(message) | |||
''' | |||
在线分析逻辑 | |||
''' | |||
def online(self, message): | |||
# 参数校验 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
return | |||
if 'start' == message.get("command"): | |||
logger.info("开始实时分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
AnalysisType.ONLINE.value, | |||
ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
return | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startOnlineProcess(message, gpu_ids) | |||
elif 'stop' == message.get("command"): | |||
self.stopOnlineProcess(message) | |||
@@ -334,82 +249,46 @@ class DispatcherService: | |||
pass | |||
def offline(self, message): | |||
check_result = self.check_offline_msg(message) | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
return | |||
if 'start' == message.get("command"): | |||
logger.info("开始离线分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
AnalysisType.OFFLINE.value, | |||
ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
return | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startOfflineProcess(message, gpu_ids) | |||
time.sleep(3) | |||
elif 'stop' == message.get("command"): | |||
self.stopOfflineProcess(message) | |||
else: | |||
pass | |||
def image(self, message): | |||
check_result = self.check_image_msg(message) | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
return | |||
if 'start' == message.get("command"): | |||
logger.info("开始图片分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0: | |||
raise ServiceException(ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1]) | |||
gpu_ids = GPUtils.check_gpu_resource(self.content) | |||
self.startImageProcess(message, gpu_ids) | |||
# elif 'stop' == message.get("command"): | |||
# self.stopImageProcess(message) | |||
else: | |||
pass | |||
# 校验录屏参数 | |||
def check_recording_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
command = msg.get("command") | |||
pullUrl = msg.get("pull_url") | |||
pushUrl = msg.get("push_url") | |||
if not command or command not in ["start", "stop"]: | |||
return False | |||
if not requestId: | |||
return False | |||
if command == 'start' and not pullUrl: | |||
return False | |||
return True | |||
def recording(self, message): | |||
# 校验入参 | |||
check_result = self.check_recording_msg(message) | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
return | |||
if 'start' == message.get("command"): | |||
logger.info("开始实时分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
self.startRecordingProcess(message, gpu_ids) | |||
logger.info("开始录屏") | |||
self.startRecordingProcess(message) | |||
elif 'stop' == message.get("command"): | |||
self.stopRecordingProcess(message) | |||
else: | |||
pass | |||
# 开启录屏进程 | |||
def startRecordingProcess(self, msg, gpu_ids): | |||
def startRecordingProcess(self, msg): | |||
if self.recordingProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, | |||
"analyse_type": AnalysisType.RECORDING.value} | |||
srp = ScreenRecordingProcess(cfg) | |||
srp.start() | |||
self.recordingProcesses[msg.get("request_id")] = srp | |||
@@ -421,3 +300,97 @@ class DispatcherService: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
return | |||
rdp.sendEvent({'command': 'stop'}) | |||
# 校验schema规则定义 | |||
schema = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'string', | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': { | |||
'type': 'list', | |||
'required': False, | |||
'nullable': True, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} |
@@ -0,0 +1,223 @@ | |||
import copy | |||
import json | |||
import os | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Queue, Process | |||
from loguru import logger | |||
import subprocess as sp | |||
import cv2 | |||
import numpy as np | |||
from aip import AipImageClassify | |||
import sys | |||
from enums.BaiduSdkEnum import BAIDUERRORDATA, VehicleEnumVALUE | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.ModelTypeEnum import ModelType | |||
from exception.CustomerException import ServiceException | |||
from util.ModelUtils import Model | |||
def get_recording_video_info(url): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % url | |||
p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) | |||
out, err = p.communicate(timeout=17) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!") | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!:") | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!") | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
up, down = str(fps).split('/') | |||
fps = int(eval(up) / eval(down)) | |||
return (width, height, nb_frames, fps) | |||
except Exception as e: | |||
raise e | |||
client = AipImageClassify(str(31096670), 'Dam3O4tgPRN3qh4OYE82dbg7', '1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa') | |||
def vehicleDetect(client, iamge, options={}): | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
options["show"] = "true" | |||
res_image = client.vehicleDetect(iamge,options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云车辆检测异常!error_code:{}", error_code) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = 10 | |||
logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}", enum.value[0], enum.value[2], reply_num) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = 10 | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = 10 | |||
if reply_num >= reply_value: | |||
return None | |||
return res_image | |||
except ServiceException as s: | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", s.msg) | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
logger.exception("车辆检测失败: {}, 当前重试次数:{}", e, reply_num) | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", e) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def mark(content, info, img, color): | |||
score = info.get("probability") | |||
if score is None: | |||
score = info.get("location").get("score") | |||
text = "%s: %.2f]" % (content, score) | |||
text_xy = (info.get("location").get("left"), info.get("location").get("top") - 25) | |||
img_lu = (info.get("location").get("left"), info.get("location").get("top")) | |||
img_rd = (info.get("location").get("left") + info.get("location").get("width"), | |||
info.get("location").get("top") + info.get("location").get("height")) | |||
cv2.putText(img, text, text_xy, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2, cv2.LINE_AA) | |||
count = 1 | |||
if img.shape[1] > 1600: | |||
count = 2 | |||
cv2.rectangle(img, img_lu, img_rd, color, count) | |||
return img | |||
def pull_stream(url, queue, nb_frames): | |||
command = ['ffmpeg -re -y -i ' + url +' -f rawvideo -pix_fmt bgr24 -an -'] | |||
pull_p = sp.Popen(command, stdout=sp.PIPE, shell=True) | |||
aa = 0 | |||
try: | |||
while True: | |||
if queue.qsize() == 200: | |||
time.sleep(1) | |||
continue | |||
in_bytes = pull_p.stdout.read(width*height*3) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
img = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) | |||
queue.put({"status": "1", "img": img}) | |||
aa+=1 | |||
else: | |||
if aa -10 > nb_frames: | |||
queue.put({"status": "2"}) | |||
pull_p.terminate() | |||
pull_p.wait() | |||
break; | |||
except Exception as e: | |||
logger.exception("拉流异常: {}", e) | |||
finally: | |||
pull_p.terminate() | |||
pull_p.wait() | |||
def getQueue(queue): | |||
eBody = None | |||
try: | |||
eBody = queue.get(block=False) | |||
return eBody | |||
except Exception as e: | |||
pass | |||
return eBody | |||
def buildFrame(queue, senlin_mod, client, width, height, nb_frames, fps): | |||
frames = [] | |||
status = None | |||
for i in range(queue.qsize()): | |||
frame_result = getQueue(queue) | |||
if frame_result is None: | |||
time.sleep(0.01) | |||
continue | |||
if frame_result.get("status") == '1': | |||
frames.append((frame_result.get("img"), senlin_mod, client, width, height, nb_frames, fps)) | |||
else: | |||
status = frame_result.get("status") | |||
return frames, status | |||
def process(frame): | |||
try: | |||
p_result, timeOut = frame[1].process(copy.deepcopy(frame[0]), frame[3]) | |||
or_result, or_image = cv2.imencode(".jpg", frame[0]) | |||
result = vehicleDetect(frame[2], or_image) | |||
if result is not None: | |||
vehicleInfo = result.get("vehicle_info") | |||
if vehicleInfo is not None and len(vehicleInfo) > 0: | |||
for i, info in enumerate(vehicleInfo): | |||
value = VehicleEnumVALUE.get(info.get("type")) | |||
if value is None: | |||
logger.error("车辆识别出现未支持的目标类型!type:{}", info.get("type")) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
p_result[1] = mark(value.value[1], info, p_result[1], (255, 0, 255)) | |||
frame_merge = np.hstack((frame[0], p_result[1])) | |||
return frame_merge | |||
except Exception as e: | |||
logger.exception("模型分析异常: {}", e) | |||
return None | |||
queue = Queue(200) | |||
url ='/home/th/tuo_heng/dev/11.mp4' | |||
width, height, nb_frames, fps = get_recording_video_info(url) | |||
my_process = Process(target = pull_stream, args=(url, queue, nb_frames)) | |||
#启动子进程 | |||
my_process.start() | |||
current_path = os.path.abspath(os.path.dirname(__file__)) | |||
import GPUtil | |||
senlin_mod = Model(str(GPUtil.getAvailable()[0]), [2,3,4], logger, "11111", ModelType.FOREST_FARM_MODEL) | |||
or_video_file = cv2.VideoWriter("aaa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps, | |||
(int(width) * 2, int(height))) | |||
with ThreadPoolExecutor(max_workers=3) as t: | |||
task_frame = None | |||
while True: | |||
frames = [] | |||
status = None | |||
if task_frame is not None: | |||
frames, status = task_frame.result() | |||
task_frame = t.submit(buildFrame, queue, senlin_mod, client, width, height, nb_frames, fps) | |||
if len(frames) == 0 and status is None: | |||
time.sleep(0.02) | |||
continue | |||
if frames is not None and len(frames) > 0: | |||
for result in t.map(process, frames): | |||
if result is not None: | |||
or_video_file.write(result) | |||
if status is None: | |||
continue | |||
if status.get("status") == "2": | |||
t.shutdown(wait=False) | |||
or_video_file.release() | |||
t.shutdown(wait=False) | |||
or_video_file.release() | |||
@@ -0,0 +1,189 @@ | |||
import asyncio | |||
import copy | |||
import json | |||
import os | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Queue, Process | |||
from loguru import logger | |||
import subprocess as sp | |||
import cv2 | |||
import numpy as np | |||
from aip import AipImageClassify | |||
import sys | |||
from enums.BaiduSdkEnum import BAIDUERRORDATA, VehicleEnumVALUE | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.ModelTypeEnum import ModelType | |||
from exception.CustomerException import ServiceException | |||
from util.ModelUtils import Model | |||
def get_recording_video_info(url): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % url | |||
p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) | |||
out, err = p.communicate(timeout=17) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!") | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!:") | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!") | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
up, down = str(fps).split('/') | |||
fps = int(eval(up) / eval(down)) | |||
return (width, height, nb_frames, fps) | |||
except Exception as e: | |||
raise e | |||
client = AipImageClassify(str(31096670), 'Dam3O4tgPRN3qh4OYE82dbg7', '1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa') | |||
def vehicleDetect(client, iamge, options={}): | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
options["show"] = "true" | |||
res_image = client.vehicleDetect(iamge,options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云车辆检测异常!error_code:{}", error_code) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = 10 | |||
logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}", enum.value[0], enum.value[2], reply_num) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = 10 | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = 10 | |||
if reply_num >= reply_value: | |||
return None | |||
return res_image | |||
except ServiceException as s: | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", s.msg) | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
logger.exception("车辆检测失败: {}, 当前重试次数:{}", e, reply_num) | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", e) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def mark(content, info, img, color): | |||
score = info.get("probability") | |||
if score is None: | |||
score = info.get("location").get("score") | |||
text = "%s: %.2f" % (content, score) | |||
text_xy = (info.get("location").get("left"), info.get("location").get("top") - 25) | |||
img_lu = (info.get("location").get("left"), info.get("location").get("top")) | |||
img_rd = (info.get("location").get("left") + info.get("location").get("width"), | |||
info.get("location").get("top") + info.get("location").get("height")) | |||
cv2.putText(img, text, text_xy, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2, cv2.LINE_AA) | |||
count = 1 | |||
if img.shape[1] > 1600: | |||
count = 2 | |||
cv2.rectangle(img, img_lu, img_rd, color, count) | |||
return img | |||
async def mode_handler(img, width): | |||
return senlin_mod.process(copy.deepcopy(img), width) | |||
async def modprocess(img, width): | |||
p_result, timeOut = await mode_handler(img, width) | |||
return p_result, timeOut | |||
async def car_handler(img, width): | |||
return car_mod.process(copy.deepcopy(img), width) | |||
async def carprocess(img, width): | |||
p_result, timeOut = await car_handler(img, width) | |||
return p_result, timeOut | |||
async def baidu_handler(img, client): | |||
or_result, or_image = cv2.imencode(".jpg", img) | |||
return vehicleDetect(client, or_image) | |||
async def baiduprocess(img, client): | |||
result = await baidu_handler(img, client) | |||
return result | |||
url ='/home/th/tuo_heng/dev/11.mp4' | |||
width, height, nb_frames, fps = get_recording_video_info(url) | |||
current_path = os.path.abspath(os.path.dirname(__file__)) | |||
import GPUtil | |||
senlin_mod = Model(str(GPUtil.getAvailable()[0]), [2,3,4], logger, "11112", ModelType.FOREST_FARM_MODEL) | |||
car_mod = Model(str(GPUtil.getAvailable()[0]), [0], logger, "11112", ModelType.VEHICLE_MODEL) | |||
or_video_file = cv2.VideoWriter("aaa2.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps, | |||
(int(width) * 2, int(height))) | |||
command = ['ffmpeg -re -y -i ' + url +' -f rawvideo -pix_fmt bgr24 -an -'] | |||
pull_p = sp.Popen(command, stdout=sp.PIPE, shell=True) | |||
num = 0 | |||
loop = asyncio.new_event_loop() | |||
asyncio.set_event_loop(loop) | |||
try: | |||
while True: | |||
print(num, nb_frames) | |||
in_bytes = pull_p.stdout.read(width*height*3) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
img = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) | |||
# r = loop.run_until_complete(asyncio.gather(modprocess(img, width), carprocess(img, width))) | |||
p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width) | |||
p_result1, timeOut1 = car_mod.process(copy.deepcopy(p_result[1]), width) | |||
# r = loop.run_until_complete(asyncio.gather(modprocess(img, width), baiduprocess(img, client))) | |||
# p_result, timeOut = r[0] | |||
# result = r[1] | |||
# p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width) | |||
# if result is not None: | |||
# vehicleInfo = result.get("vehicle_info") | |||
# if vehicleInfo is not None and len(vehicleInfo) > 0: | |||
# for i, info in enumerate(vehicleInfo): | |||
# value = VehicleEnumVALUE.get(info.get("type")) | |||
# if value is None: | |||
# logger.error("车辆识别出现未支持的目标类型!type:{}", info.get("type")) | |||
# raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
# ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
# p_result[1] = mark(value.value[1], info, p_result[1], (255, 0, 255)) | |||
frame_merge = np.hstack((img, p_result1[1])) | |||
or_video_file.write(frame_merge) | |||
num+=1 | |||
else: | |||
if num -10 > nb_frames: | |||
break; | |||
finally: | |||
or_video_file.release() | |||
pull_p.terminate() | |||
pull_p.wait() | |||
@@ -1,9 +1,6 @@ | |||
import copy | |||
import subprocess as sp | |||
from enum import Enum, unique | |||
from PIL import Image | |||
import time | |||
import cv2 | |||
import sys | |||
sys.path.extend(['..','../AIlib' ]) | |||
@@ -15,8 +12,6 @@ from models.experimental import attempt_load | |||
from utils.torch_utils import select_device | |||
from utilsK.queRiver import get_labelnames,get_label_arrays | |||
import numpy as np | |||
import torch | |||
from utilsK.masterUtils import get_needed_objectsIndex | |||
@@ -0,0 +1,139 @@ | |||
import re | |||
from cerberus import Validator | |||
# pattern = re.compile('^[a-zA-Z0-9]{1,36}$') # 用于匹配至少一个数字 | |||
# m = pattern.match('111aaa3213213123123213123a222') | |||
# print(m) | |||
# | |||
# schema = { | |||
# 'name': {'type': 'string', 'required': True}, | |||
# 'age': {'type': 'integer', 'required': True, 'min': 18}, | |||
# 'email': {'type': 'string', 'required': True, 'regex': r'\w+@\w+\.\w+'} | |||
# } | |||
# v = Validator(schema) | |||
# print(v.validate({ 'name': '11', 'age': 20, 'email': '764784960@qq.com'})) | |||
# aa = str({ 'name': '11', 'age': 20, 'email': '764784960@qq.com'}) | |||
# print(isinstance(aa, dict)) | |||
# schema = {'name': {'type': 'string'}} | |||
# v = Validator(schema) | |||
# document = {'name1': 'john doe'} | |||
# print(v.validate(document)) | |||
# print(v.validate(document, schema)) | |||
schema = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'string', | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'dict', | |||
'required': False, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
v = Validator(schema, allow_unknown=True) | |||
aa={ | |||
'request_id': "111", | |||
'command': 'start', | |||
'pull_url': None, | |||
'push_url': None, | |||
'original_url': '', | |||
'original_type': '', | |||
'image_urls': ['1','1'], | |||
'results_base_dir': '111', | |||
'models': [ | |||
# { | |||
# 'code': '1', | |||
# 'categories': [ | |||
# # { | |||
# # 'id': '1', | |||
# # 'config': {} | |||
# # } | |||
# ] | |||
# } | |||
] | |||
} | |||
print(v.validate(aa)) | |||
print(v.errors) |
@@ -0,0 +1,34 @@ | |||
import asyncio | |||
import time | |||
async def sleep(delay): | |||
time.sleep(delay) | |||
print("1111") | |||
async def say_after(delay, what): | |||
await sleep(delay) | |||
print(what) | |||
async def main(): | |||
task1 = asyncio.create_task(say_after(1, 'hello')) | |||
task2 = asyncio.create_task(say_after(2, 'world')) | |||
# await task1 | |||
# await task2 | |||
await asyncio.gather(task1, task2) | |||
# await say_after(1, 'hello') | |||
# await say_after(2, 'world') | |||
start = time.time() | |||
loop = asyncio.new_event_loop() | |||
print(loop) | |||
asyncio.set_event_loop(loop) | |||
task1 = loop.create_task(say_after(1, 'hello')) | |||
task2 = loop.create_task(say_after(2, 'world')) | |||
loop.run_until_complete(asyncio.wait([task1, task2])) | |||
loop.close() | |||
# asyncio.run(main()) | |||
print(time.time() - start) | |||
@@ -0,0 +1,44 @@ | |||
# -*- coding: utf-8 -*- | |||
import threading | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
class Test(object): | |||
def __init__(self): | |||
# threading.Thread.__init__(self) | |||
self._sName = "machao" | |||
def process(self): | |||
#args是关键字参数,需要加上名字,写成args=(self,) | |||
th1 = threading.Thread(target=self.buildList, args=()) | |||
th1.start() | |||
th1.join() | |||
def buildList(self): | |||
while True: | |||
print("start") | |||
print(self._sName) | |||
self._sName = "1111111" | |||
time.sleep(3) | |||
def bb(): | |||
print("!1111111111") | |||
def aa(t): | |||
while True: | |||
t.submit(bb) | |||
# test = Test() | |||
# test.process() | |||
# print(3//2) | |||
# with ThreadPoolExecutor(max_workers=10) as t: | |||
# t.submit(aa, t) | |||
# time.sleep(1000) | |||
# codeArray=[''] | |||
# codeStr = ','.join(codeArray) | |||
# print(codeStr) | |||
aa={'aaaa': []} | |||
aa["aaaa"].append("1111111") | |||
aa["aaaa"].append("1111111") | |||
aa["aaaa"].append("1111111") | |||
print(aa) |
@@ -0,0 +1,5 @@ | |||
import os | |||
print(os.getcwd()) | |||
print(os.path.relpath(__file__)) |
@@ -0,0 +1,7 @@ | |||
list1 = [1, 2, 3, 4] | |||
list2 = [1,2,4] | |||
if set(list2) == set(list1): | |||
print("1111111") | |||
else: | |||
print("222222") | |||
@@ -22,7 +22,8 @@ class AliyunOssSdk(): | |||
if self.bucket is None: | |||
self.logger.info("初始化oss桶, requestId:{}", self.requestId) | |||
auth = oss2.Auth(self.content["aliyun"]["access_key"], self.content["aliyun"]["access_secret"]) | |||
self.bucket = oss2.Bucket(auth, self.content["aliyun"]["oss"]["endpoint"], | |||
self.bucket = oss2.Bucket(auth, | |||
self.content["aliyun"]["oss"]["endpoint"], | |||
self.content["aliyun"]["oss"]["bucket"], | |||
connect_timeout=self.content["aliyun"]["oss"]["connect_timeout"]) | |||
@@ -45,28 +46,6 @@ class AliyunOssSdk(): | |||
raise e | |||
async def put_object(self, updatePath, fileByte): | |||
self.bucket.put_object(updatePath, fileByte) | |||
async def upload_file(self, updatePath, fileByte): | |||
self.logger.info("开始上传文件到oss, requestId:{}", self.requestId) | |||
self.get_oss_bucket() | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
await self.put_object(updatePath, fileByte) | |||
self.logger.info("上传文件到oss成功! requestId:{}", self.requestId) | |||
break | |||
except Exception as e: | |||
retry_count += 1 | |||
time.sleep(1) | |||
self.logger.info("上传文件到oss失败, 重试次数:{}, requestId:{}", retry_count, self.requestId) | |||
if retry_count > MAX_RETRIES: | |||
self.logger.exception("上传文件到oss重试失败:{}, requestId:{}", e, self.requestId) | |||
raise e | |||
class ThAliyunVodSdk(): | |||
def __init__(self, content, logger, requestId): | |||
@@ -107,7 +86,8 @@ class ThAliyunVodSdk(): | |||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1]) | |||
diff_time = current_time - start | |||
if diff_time > 60 * 60 * 2: | |||
self.logger.exception("获取视频地址失败超时异常: {},超时时间:{}, requestId: {}", e, diff_time, self.requestId) | |||
self.logger.exception("获取视频地址失败超时异常: {},超时时间:{}, requestId: {}", e, diff_time, | |||
self.requestId) | |||
raise ServiceException(ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[1]) | |||
@@ -7,13 +7,16 @@ import subprocess as sp | |||
import numpy as np | |||
from loguru import logger | |||
from common import Constant | |||
from exception.CustomerException import ServiceException | |||
from enums.ExceptionEnum import ExceptionType | |||
class Cv2Util(): | |||
def __init__(self, pullUrl=None, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None, content=None, gpu_ids=None): | |||
def __init__(self, pullUrl=None, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None, content=None, | |||
gpu_ids=None): | |||
self.pullUrl = pullUrl | |||
self.pushUrl = pushUrl | |||
self.orFilePath = orFilePath | |||
@@ -25,18 +28,14 @@ class Cv2Util(): | |||
self.fps = None | |||
self.width = None | |||
self.height = None | |||
self.wah = None | |||
self.wh = None | |||
self.h = None | |||
self.hn = None | |||
self.w = None | |||
self.all_frames = None | |||
self.bit_rate = None | |||
self.pull_p = None | |||
self.requestId = requestId | |||
self.p_push_retry_num = 0 | |||
self.resize_status = False | |||
self.current_frame = 0 | |||
self.isGpu = False | |||
self.read_w_h = None | |||
self.content = content | |||
@@ -44,54 +43,16 @@ class Cv2Util(): | |||
self.isGpu = True | |||
def getFrameConfig(self, fps, width, height): | |||
if self.fps is None: | |||
if self.fps is None or self.width != width or self.height != height: | |||
self.fps = fps | |||
self.width = width | |||
self.height = height | |||
if width > 1600: | |||
self.wh = int(width * height * 3 // 4) | |||
self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
self.h = int(self.height // 2) | |||
self.w = int(self.width // 2) | |||
self.hn = int(self.height // 2) | |||
self.wn = int(self.width // 2) | |||
w_f = self.wh != width * height * 3 / 4 | |||
h_f = self.h != self.height / 2 | |||
wd_f = self.w != self.width / 2 | |||
if w_f or h_f or wd_f: | |||
self.resize_status = True | |||
self.wh = int(width * height * 3) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
# self.wh = int(width * height * 3 // 8) | |||
# self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
# self.h = int(self.height * 3 // 4) | |||
# self.w = int(self.width // 2) | |||
# self.hn = int(self.height // 2) | |||
# self.wn = int(self.width // 2) | |||
# w_f = self.wh != width * height * 3 / 8 | |||
# h_f = self.h != self.height * 3 / 4 | |||
# wd_f = self.w != self.width / 2 | |||
# if w_f or h_f or wd_f: | |||
# self.resize_status = True | |||
# self.wh = int(width * height * 3 // 2) | |||
# self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
# self.h = int(self.height * 3 // 2) | |||
# self.w = int(self.width) | |||
if width > Constant.width: | |||
self.h = int(self.height//2) | |||
self.w = int(self.width//2) | |||
else: | |||
self.wh = int(width * height * 3) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
self.hn = int(self.height) | |||
self.wn = int(self.width) | |||
# self.wh = int(width * height * 3 // 2) | |||
# self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
# self.h = int(self.height * 3 // 2) | |||
# self.w = int(self.width) | |||
# self.hn = int(self.height) | |||
# self.wn = int(self.width) | |||
def clear_video_info(self): | |||
self.fps = None | |||
@@ -104,7 +65,7 @@ class Cv2Util(): | |||
def get_video_info(self): | |||
try: | |||
if self.pullUrl is None: | |||
if self.pullUrl is None or len(self.pullUrl) == 0: | |||
logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0], | |||
ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1]) | |||
@@ -128,61 +89,25 @@ class Cv2Util(): | |||
fps = video_stream.get('r_frame_rate') | |||
# duration = video_stream.get('duration') | |||
# bit_rate = video_stream.get('bit_rate') | |||
self.width = int(width) | |||
self.height = int(height) | |||
if width > 1600: | |||
self.wh = int(width * height * 3) | |||
self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
self.h = int(self.height // 2) | |||
self.w = int(self.width // 2) | |||
self.hn = int(self.height // 2) | |||
self.wn = int(self.width // 2) | |||
w_f = self.wh != width * height * 3 / 4 | |||
h_f = self.h != self.height / 2 | |||
wd_f = self.w != self.width / 2 | |||
if w_f or h_f or wd_f: | |||
self.resize_status = True | |||
self.wh = int(width * height * 3) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
if width is not None and height is not None: | |||
self.width = int(width) | |||
self.height = int(height) | |||
self.wh = self.width * self.height * 3 | |||
if width > Constant.width: | |||
self.h = int(self.height//2) | |||
self.w = int(self.width//2) | |||
else: | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
# self.wh = int(width * height * 3 // 8) | |||
# self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
# self.h = int(self.height * 3 // 4) | |||
# self.w = int(self.width / 2) | |||
# self.hn = int(self.height / 2) | |||
# self.wn = int(self.width // 2) | |||
# w_f = self.wh != width * height * 3 / 8 | |||
# h_f = self.h != self.height * 3 / 4 | |||
# wd_f = self.w != self.width / 2 | |||
# if w_f or h_f or wd_f: | |||
# self.resize_status = True | |||
# self.wh = int(width * height * 3 // 2) | |||
# self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
# self.h = int(self.height * 3 // 2) | |||
# self.w = int(self.width) | |||
else: | |||
self.wh = int(width * height * 3) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
self.hn = int(self.height) | |||
self.wn = int(self.width) | |||
# self.wh = int(width * height * 3 // 2) | |||
# self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
# self.h = int(self.height * 3 // 2) | |||
# self.w = int(self.width) | |||
# self.hn = int(self.height) | |||
# self.wn = int(self.width) | |||
if nb_frames: | |||
self.all_frames = int(nb_frames) | |||
up, down = str(fps).split('/') | |||
self.fps = int(eval(up) / eval(down)) | |||
# if duration: | |||
# self.duration = float(video_stream['duration']) | |||
# self.bit_rate = int(bit_rate) / 1000 | |||
logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}|bit_rate:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.bit_rate, self.requestId) | |||
if nb_frames: | |||
self.all_frames = int(nb_frames) | |||
up, down = str(fps).split('/') | |||
self.fps = int(eval(up) / eval(down)) | |||
# if duration: | |||
# self.duration = float(video_stream['duration']) | |||
# self.bit_rate = int(bit_rate) / 1000 | |||
logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}|bit_rate:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.bit_rate, self.requestId) | |||
except ServiceException as s: | |||
logger.error("获取视频信息异常: {}, requestId:{}", s.msg, self.requestId) | |||
self.clear_video_info() | |||
@@ -194,7 +119,6 @@ class Cv2Util(): | |||
''' | |||
录屏任务获取视频信息 | |||
''' | |||
def get_recording_video_info(self): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % self.pullUrl | |||
@@ -205,9 +129,6 @@ class Cv2Util(): | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
# 视频大小 | |||
# format = probe['format'] | |||
# size = int(format['size'])/1024/1024 | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
@@ -215,28 +136,18 @@ class Cv2Util(): | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
# duration = video_stream.get('duration') | |||
# bit_rate = video_stream.get('bit_rate') | |||
if width and int(width) > 0: | |||
self.width = int(width) | |||
if height and int(height) > 0: | |||
self.height = int(height) | |||
if self.width and self.height: | |||
if self.isGpu: | |||
self.wh = int(width * height * 3) | |||
self.read_w_h = ([self.height, self.width, 3]) | |||
# self.read_w_h = (self.height, self.width) | |||
else: | |||
self.wh = int(width * height * 3) | |||
self.read_w_h = ([self.height, self.width, 3]) | |||
self.wh = int(width * height * 3) | |||
self.read_w_h = ([self.height, self.width, 3]) | |||
if nb_frames and int(nb_frames) > 0: | |||
self.all_frames = int(nb_frames) | |||
if fps: | |||
up, down = str(fps).split('/') | |||
self.fps = int(eval(up) / eval(down)) | |||
# if duration: | |||
# self.duration = float(video_stream['duration']) | |||
# self.bit_rate = int(bit_rate) / 1000 | |||
logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.requestId) | |||
except ServiceException as s: | |||
@@ -255,7 +166,6 @@ class Cv2Util(): | |||
''' | |||
录屏拉取视频 | |||
''' | |||
def recording_pull_p(self): | |||
try: | |||
# 如果视频信息不存在, 不初始化拉流 | |||
@@ -264,31 +174,18 @@ class Cv2Util(): | |||
# 如果已经初始化, 不再初始化 | |||
if self.pull_p: | |||
return | |||
# 有GPU资源使用GPU, 没有GPU资源使用CPU | |||
if not self.isGpu: | |||
command = ['ffmpeg -re', '-y' | |||
# '-hide_banner', | |||
] | |||
if self.pullUrl.startswith('rtsp://'): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
command.extend(['-i', self.pullUrl, | |||
# '-vf', f'scale=iw*{scale_percent//100}:ih*{scale_percent//100}', | |||
'-f', 'rawvideo', | |||
'-f' | |||
'-an', | |||
'-']) | |||
else: | |||
command = ['ffmpeg'] | |||
if self.pullUrl.startswith('rtsp://'): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
command.extend(['-re', | |||
'-y', | |||
'-hwaccel', 'cuda', | |||
'-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-']) | |||
command = ['ffmpeg -re', '-y' | |||
# '-hide_banner', | |||
] | |||
if self.pullUrl.startswith('rtsp://'): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
if self.isGpu: | |||
command.extend(['-hwaccel', 'cuda']) | |||
command.extend(['-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-']) | |||
self.pull_p = sp.Popen(command, stdout=sp.PIPE) | |||
except ServiceException as s: | |||
logger.exception("构建拉流管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
@@ -313,24 +210,31 @@ class Cv2Util(): | |||
def recording_read(self): | |||
result = None | |||
try: | |||
self.recording_pull_p() | |||
in_bytes = self.pull_p.stdout.read(self.wh) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
try: | |||
img = np.frombuffer(in_bytes, np.uint8).reshape(self.read_w_h) | |||
result = np.frombuffer(in_bytes, np.uint8).reshape(self.read_w_h) | |||
except Exception as ei: | |||
logger.exception("视频格式异常:{}, requestId:{}", ei, self.requestId) | |||
raise ServiceException(ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[1]) | |||
result = img | |||
# if self.isGpu: | |||
# result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) | |||
# else: | |||
# result = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |||
self.current_frame += 1 | |||
except ServiceException as s: | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
raise s | |||
except Exception as e: | |||
logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
return result | |||
''' | |||
@@ -339,57 +243,27 @@ class Cv2Util(): | |||
def build_pull_p(self): | |||
try: | |||
if self.wah is None: | |||
if self.pull_p is not None: | |||
return | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
# command = ['ffmpeg', | |||
# # '-b:v', '3000k', | |||
# '-i', self.pullUrl, | |||
# '-f', 'rawvideo', | |||
# '-vcodec', 'rawvideo', | |||
# '-pix_fmt', 'bgr24', | |||
# # '-s', "{}x{}".format(int(width), int(height)), | |||
# '-an', | |||
# '-'] | |||
# input_config = {'c:v': 'h264_cuvid', 'resize': self.wah} | |||
# process = ( | |||
# ffmpeg | |||
# .input(self.pullUrl, **input_config) | |||
# .output('pipe:', format='rawvideo', r=str(self.fps)) # pix_fmt='bgr24' | |||
# .overwrite_output() | |||
# .global_args('-an') | |||
# .run_async(pipe_stdout=True) | |||
# ) | |||
command = ['ffmpeg'] | |||
if self.pullUrl.startswith("rtsp://"): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
command.extend(['-re', | |||
'-y', | |||
'-hwaccel', 'cuda', | |||
'-resize', self.wah, | |||
# '-resize', self.wah, | |||
'-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-']) | |||
self.pull_p = sp.Popen(command, stdout=sp.PIPE) | |||
# self.pull_p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE) | |||
# self.pull_p = process | |||
except ServiceException as s: | |||
logger.exception("构建拉流管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
raise s | |||
except Exception as e: | |||
logger.exception("构建拉流管道异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
@@ -405,11 +279,9 @@ class Cv2Util(): | |||
def read(self): | |||
result = None | |||
try: | |||
# if self.pull_p is None: | |||
# logger.error("拉流管道为空, requestId:{}", self.requestId) | |||
# raise ServiceException(ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[0], | |||
# ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1]) | |||
# in_bytes = self.pull_p.stdout.read(self.wh) | |||
self.build_pull_p() | |||
if self.pull_p is None: | |||
return result | |||
in_bytes = self.pull_p.stdout.read(self.wh) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
try: | |||
@@ -421,14 +293,18 @@ class Cv2Util(): | |||
ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[1]) | |||
# result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) | |||
# result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR) | |||
if self.resize_status: | |||
if self.width > 1600: | |||
result = cv2.resize(result, (int(self.width / 2), int(self.height / 2)), | |||
interpolation=cv2.INTER_LINEAR) | |||
self.current_frame += 1 | |||
if self.width > Constant.width: | |||
result = cv2.resize(result, self.w, self.h, interpolation=cv2.INTER_LINEAR) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as e: | |||
self.clear_video_info() | |||
if self.pull_p: | |||
logger.info("关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||
return result | |||
@@ -512,21 +388,10 @@ class Cv2Util(): | |||
# 构建 cv2 | |||
def build_p(self): | |||
try: | |||
if self.p: | |||
logger.info("重试, 关闭管道, requestId:{}", self.requestId) | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
# self.p.communicate() | |||
# self.p.kill() | |||
if self.pushUrl is None: | |||
if self.pushUrl is None or len(self.pushUrl) == 0: | |||
logger.error("推流地址不能为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[0], | |||
ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[1]) | |||
width = int(self.width) | |||
if width <= 1600: | |||
width = 2 * int(self.width) | |||
command = ['ffmpeg', | |||
# '-loglevel', 'debug', | |||
'-y', | |||
@@ -534,8 +399,7 @@ class Cv2Util(): | |||
'-vcodec', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-thread_queue_size', '1024', | |||
# '-s', "{}x{}".format(self.width * 2, self.height), | |||
'-s', "{}x{}".format(width, int(self.hn)), | |||
'-s', "{}x{}".format(self.w * 2, self.h), | |||
'-r', str(self.fps), | |||
'-i', '-', # 指定输入文件 | |||
'-g', str(self.fps), | |||
@@ -556,62 +420,34 @@ class Cv2Util(): | |||
"-an", | |||
# '-flvflags', 'no_duration_filesize', | |||
# '-preset', 'fast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-preset', 'p6', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-tune', 'll', | |||
'-preset', 'llhq', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
'-f', 'flv', | |||
self.pushUrl] | |||
# command = 'ffmpeg -loglevel debug -y -f rawvideo -vcodec rawvideo -pix_fmt bgr24' +\ | |||
# ' -s ' + "{}x{}".format(int(self.width), int(self.height/2))\ | |||
# + ' -i - ' + '-g ' + str(self.fps)+\ | |||
# ' -b:v 6000k -tune zerolatency -c:v libx264 -pix_fmt yuv420p -preset ultrafast'+\ | |||
# ' -f flv ' + self.pushUrl | |||
# kwargs = {'format': 'rawvideo', | |||
# # 'vcodec': 'rawvideo', | |||
# 'pix_fmt': 'bgr24', | |||
# 's': '{}x{}'.format(int(self.width), int(self.height/2))} | |||
# out = { | |||
# 'r': str(self.fps), | |||
# 'g': str(self.fps), | |||
# 'b:v': '5500k', # 恒定码率 | |||
# # 'maxrate': '15000k', | |||
# # 'crf': '18', | |||
# 'bufsize': '5500k', | |||
# 'tune': 'zerolatency', # 加速编码速度 | |||
# 'c:v': 'libx264', # 指定视频编码器 | |||
# 'sc_threshold': '0', | |||
# 'pix_fmt': 'yuv420p', | |||
# # 'flvflags': 'no_duration_filesize', | |||
# 'preset': 'medium', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
# # superfast, veryfast, faster, fast, medium, slow, slower, veryslow。 | |||
# 'format': 'flv'} | |||
# 管道配置 | |||
# process2 = ( | |||
# ffmpeg | |||
# .input('pipe:', **kwargs) | |||
# .output(self.pushUrl, **out) | |||
# .global_args('-y', '-an') | |||
# .overwrite_output() | |||
# .run_async(pipe_stdin=True) | |||
# ) | |||
logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||
self.p = sp.Popen(command, stdin=sp.PIPE, shell=False) | |||
# self.p = process2 | |||
except ServiceException as s: | |||
if self.p: | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
logger.exception("构建p管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
if self.p: | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
logger.exception("初始化p管道异常:{}, requestId:{}", e, self.requestId) | |||
async def push_stream_write(self, frame): | |||
self.p.stdin.write(frame.tostring()) | |||
async def push_stream(self, frame): | |||
if self.p is None: | |||
self.build_p() | |||
def push_stream(self, frame): | |||
try: | |||
await self.push_stream_write(frame) | |||
return True | |||
if self.p is None: | |||
self.build_p() | |||
self.p.stdin.write(frame.tostring()) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as ex: | |||
logger.exception("推流进管道异常:{}, requestId: {}", ex, self.requestId) | |||
current_retry_num = 0 | |||
@@ -621,116 +457,104 @@ class Cv2Util(): | |||
self.p_push_retry_num += 1 | |||
current_retry_num += 1 | |||
if current_retry_num > 3 or self.p_push_retry_num > 600: | |||
return False | |||
raise ServiceException(ExceptionType.PUSH_STREAMING_CHANNEL_IS_OCCUPIED.value[0], | |||
ExceptionType.PUSH_STREAMING_CHANNEL_IS_OCCUPIED.value[1]) | |||
self.build_p() | |||
await self.push_stream_write(frame) | |||
self.p.stdin.write(frame.tostring()) | |||
logger.info("构建p管道重试成功, 当前重试次数: {}, requestId: {}", current_retry_num, | |||
self.requestId) | |||
return True | |||
except ServiceException as ss: | |||
raise ss | |||
except Exception as e: | |||
logger.exception("构建p管道异常:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
current_retry_num, self.requestId) | |||
return False | |||
async def video_frame_write(self, or_frame, ai_frame): | |||
if or_frame is not None: | |||
self.or_video_file.write(or_frame) | |||
if ai_frame is not None: | |||
self.ai_video_file.write(ai_frame) | |||
def build_or_write(self): | |||
try: | |||
if self.orFilePath is not None and self.or_video_file is None: | |||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(self.w, self.h)) | |||
if self.or_video_file is None: | |||
logger.error("or_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
if self.or_video_file is not None: | |||
self.or_video_file.release() | |||
logger.exception("构建OR文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
if self.or_video_file is not None: | |||
self.or_video_file.release() | |||
logger.exception("构建OR文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
async def video_write(self, or_frame, ai_frame): | |||
def build_ai_write(self): | |||
try: | |||
self.build_write() | |||
if or_frame is not None and len(or_frame) > 0: | |||
await self.video_frame_write(or_frame, None) | |||
if ai_frame is not None and len(ai_frame) > 0: | |||
await self.video_frame_write(None, ai_frame) | |||
return True | |||
if self.aiFilePath is not None and self.ai_video_file is None: | |||
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(self.w * 2, self.h)) | |||
if self.ai_video_file is None: | |||
logger.error("ai_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
if self.ai_video_file is not None: | |||
self.ai_video_file.release() | |||
logger.exception("构建AI文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
if self.ai_video_file is not None: | |||
self.ai_video_file.release() | |||
logger.exception("构建AI文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
def video_or_write(self, frame): | |||
try: | |||
if self.or_video_file is None: | |||
self.build_or_write() | |||
self.or_video_file.write(frame) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as ex: | |||
ai_retry_num = 0 | |||
while True: | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
logger.exception("重新写入离线分析后视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
return False | |||
if or_frame is not None and len(or_frame) > 0: | |||
await self.or_video_file.write(or_frame) | |||
if ai_frame is not None and len(ai_frame) > 0: | |||
await self.ai_video_file.write(ai_frame) | |||
logger.info("重新写入离线分析后视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
logger.exception("重新写入原视频视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.or_video_file.write(frame) | |||
logger.info("重新写入原视频视到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
return True | |||
except Exception as e: | |||
logger.exception("重新写入离线分析后视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
logger.exception("重新写入原视频视到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) | |||
def revording_video_write(self, or_frame): | |||
def video_ai_write(self, frame): | |||
try: | |||
self.recording_build_write() | |||
if or_frame is not None and len(or_frame) > 0: | |||
self.or_video_file.write(or_frame) | |||
if self.ai_video_file is None: | |||
self.build_ai_write() | |||
self.ai_video_file.write(frame) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as ex: | |||
ai_retry_num = 0 | |||
while True: | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
logger.exception("重新写入离线分析后视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
raise ServiceException(ExceptionType.WRITE_STREAM_EXCEPTION.value[0], | |||
ExceptionType.WRITE_STREAM_EXCEPTION.value[1]) | |||
if or_frame is not None and len(or_frame) > 0: | |||
self.or_video_file.write(or_frame) | |||
logger.info("重新写入离线分析后视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
logger.exception("重新写入分析后的视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.ai_video_file.write(frame) | |||
logger.info("重新写入分析后的视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
break | |||
except Exception as e: | |||
logger.exception("重新写入离线分析后视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
logger.exception("重新写入分析后的视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) | |||
def recording_build_write(self): | |||
try: | |||
if self.fps is None or self.width is None or self.height is None: | |||
logger.error("fps、 width、 height为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if self.orFilePath is not None and self.or_video_file is None: | |||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(int(self.width), int(self.height))) | |||
except ServiceException as s: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
def build_write(self): | |||
try: | |||
if self.fps is None or self.width is None or self.height is None: | |||
logger.error("fps、 width、 height为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if self.orFilePath is not None and self.or_video_file is None: | |||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(int(self.wn), int(self.hn))) | |||
if self.or_video_file is None: | |||
logger.error("or_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if self.aiFilePath is not None and self.ai_video_file is None: | |||
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(int(self.wn * 2), int(self.hn))) | |||
if self.ai_video_file is None: | |||
logger.error("ai_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
def video_merge(self, frame1, frame2): | |||
# frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) | |||
# frameRight = cv2.resize(frame2, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) |
@@ -8,7 +8,6 @@ from loguru import logger | |||
def create_dir_not_exist(path): | |||
logger.info("检查文件夹是否存在: {}", path) | |||
if not os.path.exists(path): | |||
logger.info("开始创建文件夹: {}", path) | |||
os.makedirs(path) |
@@ -1,5 +1,8 @@ | |||
import GPUtil | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
# order- 确定返回可用 GPU 设备 ID 的顺序。order应指定为以下字符串之一: | |||
# 'first'- 按升序排列可用的 GPU 设备 ID(默认) | |||
@@ -28,3 +31,11 @@ def get_gpu_ids(content): | |||
def get_all_gpu_ids(): | |||
return GPUtil.getGPUs() | |||
def check_gpu_resource(content): | |||
gpu_ids = get_gpu_ids(content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
raise ServiceException(ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1]) | |||
return gpu_ids |
@@ -5,6 +5,10 @@ import cv2 | |||
import requests | |||
from PIL import Image, ImageDraw, ImageFont | |||
import numpy as np | |||
from loguru import logger | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
''' | |||
文字水印 | |||
@@ -134,33 +138,38 @@ class PictureWaterMark: | |||
return cv2.cvtColor(np.asarray(b), cv2.COLOR_BGR2RGB) | |||
def common_water_1(self, image, logo, alpha=1): | |||
h, w = image.shape[0], image.shape[1] | |||
# if w >= h: | |||
rate = int(w * 0.1) / logo.shape[1] | |||
# else: | |||
# rate = int(h * 0.1) / logo.shape[0] | |||
mask = cv2.resize(logo, None, fx=rate, fy=rate, interpolation=cv2.INTER_NEAREST) | |||
mask_h, mask_w = mask.shape[0], mask.shape[1] | |||
mask_channels = cv2.split(mask) | |||
dst_channels = cv2.split(image) | |||
# b, g, r, a = cv2.split(mask) | |||
# 计算mask在图片的坐标 | |||
# if w >= h: | |||
ul_points = (int(h * 0.95) - mask_h, int(w - h * 0.05 - mask_w)) | |||
dr_points = (int(h * 0.95), int(w - h * 0.05)) | |||
# else: | |||
# ul_points = (int(h * 0.95) - mask_h, int(w - h * 0.05 - mask_w)) | |||
# dr_points = (int(h * 0.95), int(w - h * 0.05)) | |||
for i in range(3): | |||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] = dst_channels[i][ | |||
ul_points[0]: dr_points[0], | |||
ul_points[1]: dr_points[1]] * ( | |||
255.0 - mask_channels[ | |||
3] * alpha) / 255 | |||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] += np.array( | |||
mask_channels[i] * (mask_channels[3] * alpha / 255), dtype=np.uint8) | |||
dst_img = cv2.merge(dst_channels) | |||
return dst_img | |||
try: | |||
h, w = image.shape[0], image.shape[1] | |||
# if w >= h: | |||
rate = int(w * 0.1) / logo.shape[1] | |||
# else: | |||
# rate = int(h * 0.1) / logo.shape[0] | |||
mask = cv2.resize(logo, None, fx=rate, fy=rate, interpolation=cv2.INTER_NEAREST) | |||
mask_h, mask_w = mask.shape[0], mask.shape[1] | |||
mask_channels = cv2.split(mask) | |||
dst_channels = cv2.split(image) | |||
# b, g, r, a = cv2.split(mask) | |||
# 计算mask在图片的坐标 | |||
# if w >= h: | |||
ul_points = (int(h * 0.95) - mask_h, int(w - h * 0.05 - mask_w)) | |||
dr_points = (int(h * 0.95), int(w - h * 0.05)) | |||
# else: | |||
# ul_points = (int(h * 0.95) - mask_h, int(w - h * 0.05 - mask_w)) | |||
# dr_points = (int(h * 0.95), int(w - h * 0.05)) | |||
for i in range(3): | |||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] = dst_channels[i][ | |||
ul_points[0]: dr_points[0], | |||
ul_points[1]: dr_points[ | |||
1]] * ( | |||
255.0 - mask_channels[ | |||
3] * alpha) / 255 | |||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] += np.array( | |||
mask_channels[i] * (mask_channels[3] * alpha / 255), dtype=np.uint8) | |||
dst_img = cv2.merge(dst_channels) | |||
return dst_img | |||
except Exception as e: | |||
logger.exception("加水印异常", e) | |||
return image | |||
# 差值感知算法 | |||
@@ -191,16 +200,23 @@ def Hamming_distance(hash1, hash2): | |||
def url2Array(url): | |||
response = requests.get(url) | |||
image = Image.open(BytesIO(response.content)) | |||
image1 = np.array(image) | |||
img_bgr = cv2.cvtColor(image1, cv2.COLOR_RGB2BGR) | |||
return img_bgr | |||
try: | |||
response = requests.get(url) | |||
image = Image.open(BytesIO(response.content)) | |||
image1 = np.array(image) | |||
img_bgr = cv2.cvtColor(image1, cv2.COLOR_RGB2BGR) | |||
return img_bgr | |||
except Exception as e: | |||
logger.exception("url地址请求异常: {}", e) | |||
raise ServiceException(ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0], | |||
ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1]) | |||
def url2Content(url): | |||
response = requests.get(url) | |||
return response.content | |||
def url2Image(url): | |||
response = requests.get(url) | |||
image = Image.open(BytesIO(response.content)) | |||
@@ -214,9 +230,10 @@ def url2Byte(url): | |||
response = requests.get(url) | |||
return BytesIO(response.content) | |||
def markRectangle(url, text, textCoordinate, imageLeftUpCoordinate, imageRightDownCoordinate, color): | |||
img = url2Array(url) | |||
#( 蓝, 绿, 红) | |||
# ( 蓝, 绿, 红) | |||
# 红色 (0, 0, 255) | |||
# 洋红色 (255, 0, 255) | |||
# 青色 (255, 255, 0) | |||
@@ -229,6 +246,72 @@ def markRectangle(url, text, textCoordinate, imageLeftUpCoordinate, imageRightDo | |||
cv2.rectangle(img, imageLeftUpCoordinate, imageRightDownCoordinate, color, 2) | |||
return img | |||
# def draw_painting_joint(img, xywh, score=0.5, color=None, | |||
# font={'line_thickness': None, 'boxLine_thickness': None, 'fontSize': None}): | |||
# imh, imw, imc = img.shape | |||
# tl = font['line_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 | |||
# box_tl = font['boxLine_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # 根据图像的尺寸计算出适合用于绘制图像边框的线宽。 | |||
# c1, c2 = (int(xywh[0]), int(xywh[1])), (int(xywh[2]), int(xywh[3])) | |||
# cv2.rectangle(img, c1, c2, color, thickness=box_tl, lineType=cv2.LINE_AA) | |||
# | |||
# label = ' %.2f' % (score) | |||
# tf = max(tl, 1) # font thickness | |||
# fontScale = font['fontSize'] or tl * 0.33 | |||
# t_size = cv2.getTextSize(label, 0, fontScale=fontScale, thickness=tf)[0] | |||
# cv2.rectangle(img, (int(box[0]) + lw, int(box[1])), c2, color, -1, cv2.LINE_AA) # filled | |||
# cv2.putText(img, label, (c1[0] + lw, c1[1] - (lh - t_size[1]) // 2), 0, fontScale, [225, 255, 255], thickness=tf, | |||
# lineType=cv2.LINE_AA) | |||
# # print('#####line224 fontScale:',fontScale,' thickness:',tf,' line_thickness:',font['line_thickness'],' boxLine thickness:',box_tl) | |||
# return img | |||
def img_pad(img, size, pad_value=[114, 114, 114]): | |||
###填充成固定尺寸 | |||
H, W, _ = img.shape | |||
r = max(H / size[0], W / size[1]) | |||
img_r = cv2.resize(img, (int(W / r), int(H / r))) | |||
tb = size[0] - img_r.shape[0] | |||
lr = size[1] - img_r.shape[1] | |||
top = int(tb / 2) | |||
bottom = tb - top | |||
left = int(lr / 2) | |||
right = lr - left | |||
pad_image = cv2.copyMakeBorder(img_r, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_value) | |||
return pad_image, (top, left, r) | |||
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): | |||
# 调整图像大小和填充图像,同时满足步幅多重约束 | |||
shape = img.shape[:2] # current shape [height, width] 当前形状 [高度、宽度] | |||
if isinstance(new_shape, int): | |||
new_shape = (new_shape, new_shape) | |||
# Scale ratio (new / old) 缩放比例(新/旧) | |||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) | |||
if not scaleup: # 仅缩减,不纵向扩展(为了更好的测试 mAP) | |||
r = min(r, 1.0) | |||
ratio = r, r # width, height ratios 宽度、高度比 | |||
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) | |||
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding | |||
if auto: # 最小矩形 | |||
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding | |||
elif scaleFill: # stretch | |||
dw, dh = 0.0, 0.0 | |||
new_unpad = (new_shape[1], new_shape[0]) | |||
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios | |||
dw /= 2 # divide padding into 2 sides | |||
dh /= 2 | |||
if shape[::-1] != new_unpad: # resize | |||
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) | |||
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) | |||
left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) | |||
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border | |||
return img, ratio, (dw, dh) | |||
# if __name__ == '__main__': | |||
# # img = cv2.imread("../test/a.jpg", -1) | |||
# # fontcolor = 'yellow' |
@@ -61,20 +61,15 @@ class AipImageClassifyClient: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except ServiceException as s: | |||
time.sleep(1) | |||
reply_num += 1 | |||
self.init_client() | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}, request_id: {}", s.msg, request_id) | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
logger.exception("车辆检测失败: {}, 当前重试次数:{}, request_id: {}", e, reply_num, request_id) | |||
time.sleep(1) | |||
reply_num += 1 | |||
self.init_client() | |||
if reply_num > reply_value: | |||
if isinstance(e, ServiceException): | |||
raise ServiceException(e.code, e.msg) | |||
logger.exception("车辆检测识别失败: {}, request_id: {}", e, request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
@@ -85,6 +80,58 @@ class AipImageClassifyClient: | |||
# finally: | |||
# self.lock.release() | |||
''' | |||
车辆检测 | |||
''' | |||
def vehicleDetect(self, iamge, request_id, options={}): | |||
self.init_client() | |||
# try: | |||
# self.lock.acquire() | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
options["show"] = "true" | |||
res_image = self.__client.vehicleDetect(iamge, options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云车辆检测异常!error_code:{}, request_id: {}", error_code, request_id) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}, request_id: {}", | |||
enum.value[0], enum.value[2], reply_num, request_id) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except Exception as e: | |||
time.sleep(1) | |||
reply_num += 1 | |||
self.init_client() | |||
if reply_num > reply_value: | |||
if isinstance(e, ServiceException): | |||
raise ServiceException(e.code, e.msg) | |||
logger.exception("车辆检测识别失败: {}, request_id: {}", e, request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
class AipBodyAnalysisClient: | |||
@@ -140,6 +187,7 @@ class AipBodyAnalysisClient: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except Exception as e: | |||
time.sleep(0.5) | |||
@@ -198,9 +246,8 @@ class AipBodyAnalysisClient: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except ServiceException as e: | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
time.sleep(0.5) | |||
reply_num += 1 | |||
@@ -209,8 +256,8 @@ class AipBodyAnalysisClient: | |||
if isinstance(e, ServiceException): | |||
raise ServiceException(e.code, e.msg) | |||
logger.exception("人流量统计失败: {}, request_id: {}", e, request_id) | |||
raise ServiceException(ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[0], | |||
ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[1]) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
# except Exception as ee: | |||
# logger.exception("车辆检测加锁异常: {}, request_id: {}", ee, request_id) | |||
# raise ServiceException(ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[0], |
@@ -57,9 +57,9 @@ class CustomerKafkaProducer(): | |||
raise e | |||
break | |||
except Exception as e: | |||
logger.exception("kafka发送消息异常: {}, requestId:{}", e, message.get("request_id")) | |||
self.customerProducer = None | |||
retry_send_num += 1 | |||
logger.error("kafka发送消息异常, 开始重试, 当前重试次数:{} requestId:{}", retry_send_num, message.get("request_id")) | |||
self.customerProducer = None | |||
if retry_send_num > 3: | |||
logger.exception("kafka发送消息重试失败: {}, requestId:{}", e, message.get("request_id")) | |||
raise e |
@@ -6,6 +6,7 @@ from loguru import logger | |||
# 初始化日志配置 | |||
def init_log(content): | |||
# 判断日志文件是否存在,不存在创建 | |||
if not os.path.exists(content["log"]["base_path"]): | |||
os.makedirs(content["log"]["base_path"]) | |||
# 移除日志设置 |
@@ -9,12 +9,13 @@ from util.GPUtils import get_all_gpu_ids | |||
from util.ImgBaiduSdk import AipBodyAnalysisClient, AipImageClassifyClient | |||
sys.path.extend(['..', '../AIlib2']) | |||
from AI import AI_process, AI_process_forest, get_postProcess_para | |||
from AI import AI_process, AI_process_forest, get_postProcess_para, AI_Seg_process, ocr_process | |||
import cv2, os, time | |||
from segutils.segmodel import SegModel | |||
from models.experimental import attempt_load | |||
from utils.torch_utils import select_device | |||
from utilsK.queRiver import get_labelnames, get_label_arrays, save_problem_images | |||
from ocrUtils.ocrUtils import CTCLabelConverter, AlignCollate | |||
import numpy as np | |||
import torch, glob | |||
import tensorrt as trt | |||
@@ -28,6 +29,8 @@ class Model: | |||
try: | |||
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], | |||
requestId) | |||
self.logger = logger | |||
self.requestId = requestId | |||
self.modeType = modeType | |||
self.allowedList = allowedList | |||
self.trtFlag_det = True, # 检测模型是否采用TRT | |||
@@ -52,7 +55,8 @@ class Model: | |||
detweights = "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3] | |||
segweights = '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3] | |||
par = { | |||
'device': device, # 显卡号,如果用TRT模型,只支持0(单显卡) | |||
# 'device': 'cuda:%s' % device, | |||
'device': str(device), # 显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames': "../AIlib2/weights/conf/%s/labelnames.json" % modeType.value[3], # 检测类别对照表 | |||
'trtFlag_det': self.trtFlag_det, # 检测模型是否采用TRT | |||
'trtFlag_seg': self.trtFlag_seg, # 分割模型是否采用TRT | |||
@@ -68,17 +72,46 @@ class Model: | |||
'Segweights': segweights, # 分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json' % modeType.value[3] # 后处理参数文件 | |||
} | |||
# 如果是河道模型 | |||
if ModelType.WATER_SURFACE_MODEL == modeType: | |||
par['segRegionCnt'] = 1 | |||
par['slopeIndex'] = [5, 6, 7] | |||
# 如果是交通模型 | |||
if ModelType.TRAFFIC_FARM_MODEL == modeType: | |||
par['segRegionCnt'] = 2 | |||
par['slopeIndex'] = [5, 6, 7] | |||
par['seg_nclass'] = 3 | |||
par['slopeIndex'] = [] | |||
par['segPar'] = { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True | |||
} | |||
par['postPar'] = { | |||
'label_csv': '../AIlib2/weights/conf/%s/class_dict.csv' % modeType.value[3], | |||
'speedRoadArea': 16000, | |||
'vehicleArea': 200, | |||
'speedRoadVehicleAngleMin': 15, | |||
'speedRoadVehicleAngleMax': 75, | |||
'vehicleLengthWidthThreshold': 4, | |||
'vehicleSafeDistance': 7, | |||
'vehicleLengthWidthRatio': 0.7, | |||
'cls': 9 | |||
} | |||
par['mode'] = 'highWay3.0' | |||
else: | |||
self.trtFlag_seg = False | |||
par['trtFlag_seg'] = False | |||
par['segRegionCnt'] = 0 | |||
par['slopeIndex'] = [] | |||
par['segPar'] = None | |||
par['Segweights'] = None | |||
self.mode = par['mode'] if 'mode' in par.keys() else 'others' | |||
self.postPar = par['postPar'] if 'postPar' in par.keys() else None | |||
self.device = select_device(par.get('device')) | |||
self.names = get_labelnames(par.get('labelnames')) | |||
self.half = self.device.type != 'cpu' | |||
@@ -96,8 +129,7 @@ class Model: | |||
Segweights = par.get('Segweights') | |||
if Segweights is not None: | |||
if self.trtFlag_seg: | |||
log = trt.Logger(trt.Logger.ERROR) | |||
with open(Segweights, "rb") as f, trt.Runtime(log) as runtime: | |||
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
self.segmodel = runtime.deserialize_cuda_engine(f.read()) # 输入trt本地文件,返回ICudaEngine对象 | |||
print('############locad seg model trt success#######') | |||
else: | |||
@@ -120,34 +152,183 @@ class Model: | |||
self.label_arraylist = None | |||
self.digitFont = None | |||
except Exception as ee: | |||
logger.exception("异常:{}, requestId:{}", ee, requestId) | |||
raise ee | |||
logger.exception("模型加载异常:{}, requestId:{}", ee, requestId) | |||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], | |||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) | |||
''' | |||
#输入参数 | |||
# im0s---原始图像列表 | |||
# model---检测模型,segmodel---分割模型(如若没有用到,则为None) | |||
#输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout | |||
# [im0s[0],im0,det_xywh,iframe]中, | |||
# im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 | |||
# det_xywh--检测结果,是一个列表。 | |||
# 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)] | |||
# #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 | |||
# #strout---统计AI处理个环节的时间 | |||
''' | |||
def process(self, frame, width=1920): | |||
if self.label_arraylist is None: | |||
fontsize = int(width / 1920 * 40) | |||
line_thickness = 1 | |||
boxLine_thickness = 1 | |||
waterLineWidth = 1 | |||
if width >= 960: | |||
line_thickness = int(round(width / 1920 * 3) - 1) | |||
boxLine_thickness = int(round(width / 1920 * 3)) | |||
waterLineWidth = int(round(width / 1920 * 3)) | |||
numFontSize = float(format(width / 1920 * 1.1, '.1f')) # 文字大小 | |||
self.digitFont = {'line_thickness': line_thickness, | |||
'boxLine_thickness': boxLine_thickness, | |||
'fontSize': numFontSize, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': waterLineWidth} | |||
self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=fontsize, | |||
fontpath="../AIlib2/conf/platech.ttf") | |||
if ModelType.WATER_SURFACE_MODEL == self.modeType or ModelType.TRAFFIC_FARM_MODEL == self.modeType: | |||
return AI_process([frame], self.model, self.segmodel, self.names, self.label_arraylist, | |||
self.rainbows, objectPar=self.objectPar, font=self.digitFont, segPar=self.segPar) | |||
else: | |||
return AI_process_forest([frame], self.model, self.segmodel, self.names, self.label_arraylist, | |||
self.rainbows, self.half, self.device, self.conf_thres, self.iou_thres, | |||
self.allowedList, font=self.digitFont, trtFlag_det=self.trtFlag_det) | |||
try: | |||
if self.label_arraylist is None: | |||
fontsize = int(width / 1920 * 40) | |||
line_thickness = 1 | |||
boxLine_thickness = 1 | |||
waterLineWidth = 1 | |||
if width >= 960: | |||
line_thickness = int(round(width / 1920 * 3) - 1) | |||
boxLine_thickness = int(round(width / 1920 * 3)) | |||
waterLineWidth = int(round(width / 1920 * 3)) | |||
numFontSize = float(format(width / 1920 * 1.1, '.1f')) # 文字大小 | |||
self.digitFont = {'line_thickness': line_thickness, | |||
'boxLine_thickness': boxLine_thickness, | |||
'fontSize': numFontSize, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': waterLineWidth} | |||
self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=fontsize, | |||
fontpath="../AIlib2/conf/platech.ttf") | |||
if ModelType.WATER_SURFACE_MODEL == self.modeType or ModelType.TRAFFIC_FARM_MODEL == self.modeType: | |||
return AI_process([frame], self.model, self.segmodel, self.names, self.label_arraylist, | |||
self.rainbows, objectPar=self.objectPar, font=self.digitFont, segPar=self.segPar, | |||
mode=self.mode, postPar=self.postPar) | |||
else: | |||
return AI_process_forest([frame], self.model, self.segmodel, self.names, self.label_arraylist, | |||
self.rainbows, self.half, self.device, self.conf_thres, self.iou_thres, | |||
self.allowedList, font=self.digitFont, trtFlag_det=self.trtFlag_det) | |||
except Exception as ee: | |||
self.logger.exception("算法模型分析异常:{}, requestId:{}", ee, self.requestId) | |||
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], | |||
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) | |||
# ''' | |||
# 交通事故模型 | |||
# ''' | |||
# | |||
# | |||
# class TrafficaccidentModel: | |||
# def __init__(self, device, allowedList=None, logger=None, requestId=None, modeType=None, content=None): | |||
# try: | |||
# logger.info("########################加载{}########################, requestId:{}", modeType.value[2], | |||
# requestId) | |||
# self.logger = logger | |||
# self.requestId = requestId | |||
# self.modeType = modeType | |||
# self.allowedList = allowedList | |||
# self.trtFlag_seg = True # 分割模型是否采用TRT | |||
# if self.trtFlag_seg: | |||
# gpu = get_all_gpu_ids()[int(device)] | |||
# if '3090' in gpu.name: | |||
# segweights = '../AIlib2/weights/%s/stdc_1440X810_fp16_3090.engine' % modeType.value[3] | |||
# elif '2080' in gpu.name: | |||
# segweights = '../AIlib2/weights/%s/stdc_1440X810_fp16_2080Ti.engine' % modeType.value[3] | |||
# elif '4090' in gpu.name: | |||
# segweights = '../AIlib2/weights/%s/stdc_1440X810_fp16_4090.engine' % modeType.value[3] | |||
# elif 'A10' in gpu.name: | |||
# segweights = '../AIlib2/weights/%s/stdc_1440X810_fp16_A10.engine' % modeType.value[3] | |||
# else: | |||
# raise Exception("未匹配到该GPU名称的模型, GPU: " + gpu.name) | |||
# else: | |||
# segweights = '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3] | |||
# par = { | |||
# # 'device': 'cuda:%s' % device, | |||
# 'device': device, # 显卡号,如果用TRT模型,只支持0(单显卡) | |||
# 'labelnames': "../AIlib2/weights/conf/%s/labelnames.json" % modeType.value[3], # 检测类别对照表 | |||
# 'trtFlag_seg': self.trtFlag_seg, # 分割模型是否采用TRT | |||
# # 'seg_nclass': 2, # 分割模型类别数目,默认2类 | |||
# 'segPar': { | |||
# 'modelSize': (1440, 810), | |||
# 'mean': (0.485, 0.456, 0.406), | |||
# 'std': (0.229, 0.224, 0.225), | |||
# 'nclass': 3, | |||
# 'predResize': False, | |||
# 'numpy': False, | |||
# 'RGB_convert_first': True | |||
# }, # 分割模型预处理参数 | |||
# 'postPar': { | |||
# 'label_csv': '../AIlib2/weights/conf/%s/class_dict.csv' % modeType.value[3], | |||
# 'speedRoadArea': 5100, | |||
# 'vehicleArea': 100, | |||
# 'speedRoadVehicleAngleMin': 15, | |||
# 'speedRoadVehicleAngleMax': 75, | |||
# 'vehicleLengthWidthThreshold': 12, | |||
# 'vehicleSafeDistance': 7 | |||
# }, | |||
# 'digitWordFont': {'line_thickness': 2, 'boxLine_thickness': 1, 'wordSize': 40, 'fontSize': 1.0}, | |||
# 'Segweights': segweights, # 分割模型权重位置 | |||
# 'postFile': '../AIlib2/weights/conf/%s/para.json' % modeType.value[3] # 后处理参数文件 | |||
# } | |||
# self.device = select_device(par.get('device')) | |||
# self.names = get_labelnames(par.get('labelnames')) | |||
# | |||
# # 加载分割模型 | |||
# seg_nclass = par['segPar']['nclass'] | |||
# self.segPar = par['segPar'] | |||
# self.postPar = par['postPar'] | |||
# Segweights = par.get('Segweights') | |||
# if self.trtFlag_seg: | |||
# log = trt.Logger(trt.Logger.ERROR) | |||
# with open(Segweights, "rb") as f, trt.Runtime(log) as runtime: | |||
# self.segmodel = runtime.deserialize_cuda_engine(f.read()) # 输入trt本地文件,返回ICudaEngine对象 | |||
# print('############locad seg model trt success#######') | |||
# else: | |||
# self.segmodel = SegModel(nclass=seg_nclass, weights=Segweights, device=self.device) | |||
# print('############locad seg model pth success#######') | |||
# | |||
# self.digitWordFont = par["digitWordFont"] | |||
# self.conf_thres, self.iou_thres, self.classes, self.rainbows = get_postProcess_para(par["postFile"]) | |||
# self.label_arraylist = None | |||
# except Exception as ee: | |||
# logger.exception("模型加载异常:{}, requestId:{}", ee, requestId) | |||
# raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], | |||
# ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) | |||
# ''' | |||
# 输入参数 | |||
# im0s---原始图像列表 | |||
# segmodel---分割模型,segmodel---分割模型(如若没有用到,则为None) | |||
# digitWordFont--显示字体,数字等参数 | |||
# trtFlag_seg--模型是否是TRT格式 | |||
# segPar--分割模型的参数 | |||
# postPar--后处理参数 | |||
# 输出 | |||
# seg_pred--返回语义分割的结果图(0,1,2...表示) | |||
# img_draw--原图上带有矩形框的图 | |||
# segstr-----文本数据包括时间信息 | |||
# list1-----返回目标的坐标结果,每一个目标用[ cls, x0,y0,x1,y1,conf ] | |||
# ''' | |||
# | |||
# def process(self, frame, width=1920): | |||
# try: | |||
# if self.label_arraylist is None: | |||
# line_thickness = 1 | |||
# boxLine_thickness = 1 | |||
# wordSize = 20 | |||
# if width >= 960: | |||
# line_thickness = int(round(width / 1920 * 3) - 1) | |||
# boxLine_thickness = int(round(width / 1920 * 3)) | |||
# wordSize = int(round(width / 1920 * 40)) | |||
# numFontSize = float(format(width / 1920 * 1.1, '.1f')) # 文字大小 | |||
# self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=wordSize, | |||
# fontpath="../AIlib2/conf/platech.ttf") | |||
# self.digitWordFont['names'] = self.names | |||
# self.digitWordFont['rainbows'] = self.rainbows | |||
# self.digitWordFont['label_arraylist'] = self.label_arraylist | |||
# # 'digitWordFont': { 'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0}, | |||
# self.digitWordFont['line_thickness'] = line_thickness | |||
# self.digitWordFont['boxLine_thickness'] = boxLine_thickness | |||
# self.digitWordFont['wordSize'] = wordSize | |||
# self.digitWordFont['fontSize'] = numFontSize | |||
# seg_pred, img_draw, segstr, list1 = AI_Seg_process([frame], self.segmodel, self.digitWordFont, | |||
# self.trtFlag_seg, | |||
# self.segPar, self.postPar) | |||
# result = [seg_pred, img_draw, list1] | |||
# return result, segstr | |||
# except Exception as ee: | |||
# self.logger.exception("算法模型分析异常:{}, requestId:{}", ee, self.requestId) | |||
# raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], | |||
# ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) | |||
# # 河道模型 | |||
@@ -691,95 +872,188 @@ class Model: | |||
class IMModel: | |||
def __init__(self, device, allowedList=None, logger=None, requestId=None, modeType=None, content=None): | |||
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], requestId) | |||
self.allowedList = allowedList | |||
if ModelType.EPIDEMIC_PREVENTION_MODEL == modeType: | |||
self.img_type = 'code' ## code,plate | |||
if ModelType.PLATE_MODEL == modeType: | |||
self.img_type = 'plate' ## code,plate | |||
self.par = { | |||
'code': {'weights': '../AIlib2/weights/conf/jkm/health_yolov5s_v3.jit', 'img_type': 'code', 'nc': 10}, | |||
'plate': {'weights': '../AIlib2/weights/conf/jkm/plate_yolov5s_v3.jit', 'img_type': 'plate', 'nc': 1}, | |||
'conf_thres': 0.4, | |||
'iou_thres': 0.45, | |||
'device': 'cuda:%s' % device, | |||
'plate_dilate': (0.5, 0.3) | |||
try: | |||
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], | |||
requestId) | |||
self.logger = logger | |||
self.requestId = requestId | |||
self.allowedList = allowedList | |||
if ModelType.EPIDEMIC_PREVENTION_MODEL == modeType: | |||
self.img_type = 'code' ## code,plate | |||
if ModelType.PLATE_MODEL == modeType: | |||
self.img_type = 'plate' ## code,plate | |||
self.par = { | |||
'code': {'weights': '../AIlib2/weights/conf/jkm/health_yolov5s_v3.jit', 'img_type': 'code', 'nc': 10}, | |||
'plate': {'weights': '../AIlib2/weights/conf/jkm/plate_yolov5s_v3.jit', 'img_type': 'plate', 'nc': 1}, | |||
'conf_thres': 0.4, | |||
'iou_thres': 0.45, | |||
'device': 'cuda:%s' % device, | |||
'plate_dilate': (0.5, 0.3) | |||
} | |||
self.device = torch.device(self.par['device']) | |||
self.model = torch.jit.load(self.par[self.img_type]['weights']) | |||
self.device = torch.device(self.par['device']) | |||
self.model = torch.jit.load(self.par[self.img_type]['weights']) | |||
except Exception as ee: | |||
logger.exception("模型加载异常:{}, requestId:{}", ee, requestId) | |||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], | |||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) | |||
def process(self, frame): | |||
img, padInfos = pre_process(frame, self.device) ##预处理 | |||
pred = self.model(img) ##模型推理 | |||
boxes = post_process(pred, padInfos, self.device, conf_thres=self.par['conf_thres'], | |||
iou_thres=self.par['iou_thres'], nc=self.par[self.img_type]['nc']) # 后处理 | |||
dataBack = get_return_data(frame, boxes, modelType=self.img_type, plate_dilate=self.par['plate_dilate']) | |||
return dataBack | |||
try: | |||
img, padInfos = pre_process(frame, self.device) ##预处理 | |||
pred = self.model(img) ##模型推理 | |||
boxes = post_process(pred, padInfos, self.device, conf_thres=self.par['conf_thres'], | |||
iou_thres=self.par['iou_thres'], nc=self.par[self.img_type]['nc']) # 后处理 | |||
dataBack = get_return_data(frame, boxes, modelType=self.img_type, plate_dilate=self.par['plate_dilate']) | |||
return dataBack | |||
except Exception as ee: | |||
self.logger.exception("算法模型分析异常:{}, requestId:{}", ee, self.requestId) | |||
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], | |||
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) | |||
class OCR_Model: | |||
def __init__(self, device=None, logger=None, requestId=None): | |||
try: | |||
logger.info("######################## 加载OCR模型 ########################, requestId:{}", requestId) | |||
self.requestId = requestId | |||
self.__logger = logger | |||
self.trtFlag_det = True | |||
if self.trtFlag_det: | |||
gpu = get_all_gpu_ids()[int(device)] | |||
if '3090' in gpu.name: | |||
TRTfile = "../AIlib2/weights/ocr_en/english_g2_3090_fp16_448X32.engine" | |||
elif '2080' in gpu.name: | |||
TRTfile = "../AIlib2/weights/ocr_en/english_2080Ti_g2_h64_fp16.engine" | |||
elif '4090' in gpu.name: | |||
TRTfile = "../AIlib2/weights/ocr_en/english_g2_4090_fp16_448X32.engine" | |||
elif 'A10' in gpu.name: | |||
TRTfile = "../AIlib2/weights/ocr_en/english_g2_A10_fp16_448X32.engine" | |||
else: | |||
raise Exception("未匹配到该GPU名称的模型, GPU: " + gpu.name) | |||
else: | |||
TRTfile = "../AIlib2/weights/conf/ocr_en/english_g2.pth" | |||
par = { | |||
'TRTfile': TRTfile, | |||
'device': 'cuda: %s' % device, | |||
'dict_list': {'en': '../AIlib2/weights/conf/ocr_en/en.txt'}, | |||
'char_file': '../AIlib2/weights/conf/ocr_en/en_character.csv', | |||
'imgH': 100, | |||
'imgW': 400 | |||
} | |||
TRTfile = par['TRTfile'] | |||
self.device = par['device'] | |||
dict_list = par['dict_list'] | |||
char_file = par['char_file'] | |||
imgH = par['imgH'] | |||
imgW = par['imgW'] | |||
logger = trt.Logger(trt.Logger.ERROR) | |||
with open(TRTfile, "rb") as f, trt.Runtime(logger) as runtime: | |||
self.engine = runtime.deserialize_cuda_engine(f.read()) # 输入trt本地文件,返回ICudaEngine对象 | |||
print('#####load TRT file:', TRTfile, 'success #####') | |||
self.context = self.engine.create_execution_context() | |||
with open(char_file, 'r') as fp: | |||
characters = fp.readlines()[0].strip() | |||
self.converter = CTCLabelConverter(characters, {}, dict_list) | |||
self.AlignCollate_normal = AlignCollate(imgH=imgH, imgW=imgW, keep_ratio_with_pad=True) | |||
except Exception as ee: | |||
self.__logger.exception("模型加载异常:{}, requestId:{}", ee, requestId) | |||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], | |||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) | |||
def process(self, frame): | |||
try: | |||
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |||
par = [gray_frame, self.engine, self.context, self.converter, self.AlignCollate_normal, self.device] | |||
return ocr_process(par) | |||
except Exception as ee: | |||
self.__logger.exception("ocr坐标识别异常:{}, requestId:{}", ee, self.requestId) | |||
raise ServiceException(ExceptionType.COORDINATE_ACQUISITION_FAILED.value[0], | |||
ExceptionType.COORDINATE_ACQUISITION_FAILED.value[1]) | |||
# 百度AI图片识别模型 | |||
class BaiduAiImageModel: | |||
def __init__(self, device=None, allowedList=None, logger=None, requestId=None, modeType=None, content=None): | |||
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], requestId) | |||
self.allowedList = allowedList | |||
self.__requestId = requestId | |||
self.__logger = logger | |||
self.__aipBodyAnalysisClient = AipBodyAnalysisClient(content) | |||
self.__aipImageClassifyClient = AipImageClassifyClient(content) | |||
try: | |||
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], | |||
requestId) | |||
self.logger = logger | |||
self.requestId = requestId | |||
self.allowedList = allowedList | |||
self.__requestId = requestId | |||
self.__logger = logger | |||
self.__aipBodyAnalysisClient = AipBodyAnalysisClient(content) | |||
self.__aipImageClassifyClient = AipImageClassifyClient(content) | |||
except Exception as ee: | |||
logger.exception("模型加载异常:{}, requestId:{}", ee, requestId) | |||
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], | |||
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) | |||
def process(self, target, url): | |||
baiduEnum = BAIDU_MODEL_TARGET_CONFIG.get(target) | |||
if baiduEnum is None: | |||
raise ServiceException(ExceptionType.DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED.value[0], | |||
ExceptionType.DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED.value[1] | |||
+ " target: " + target) | |||
return baiduEnum.value[2](self.__aipImageClassifyClient, self.__aipBodyAnalysisClient, url, self.__requestId) | |||
try: | |||
baiduEnum = BAIDU_MODEL_TARGET_CONFIG.get(target) | |||
if baiduEnum is None: | |||
raise ServiceException(ExceptionType.DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED.value[0], | |||
ExceptionType.DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED.value[1] | |||
+ " target: " + target) | |||
return baiduEnum.value[2](self.__aipImageClassifyClient, self.__aipBodyAnalysisClient, url, | |||
self.__requestId) | |||
except Exception as ee: | |||
self.logger.exception("算法模型分析异常:{}, requestId:{}", ee, self.requestId) | |||
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], | |||
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) | |||
MODEL_CONFIG = { | |||
# 加载河道模型 | |||
ModelType.WATER_SURFACE_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.WATER_SURFACE_MODEL, t), | |||
ModelType.WATER_SURFACE_MODEL.value[1]), | |||
# 加载林场模型 | |||
ModelType.WATER_SURFACE_MODEL), | |||
# 加载森林模型 | |||
ModelType.FOREST_FARM_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.FOREST_FARM_MODEL, t), | |||
ModelType.FOREST_FARM_MODEL.value[1]), | |||
ModelType.FOREST_FARM_MODEL), | |||
# 加载交通模型 | |||
ModelType.TRAFFIC_FARM_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.TRAFFIC_FARM_MODEL, t), | |||
ModelType.TRAFFIC_FARM_MODEL.value[1]), | |||
ModelType.TRAFFIC_FARM_MODEL), | |||
# 加载防疫模型 | |||
ModelType.EPIDEMIC_PREVENTION_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: IMModel(x, y, z, r, ModelType.EPIDEMIC_PREVENTION_MODEL, t), | |||
ModelType.EPIDEMIC_PREVENTION_MODEL.value[1]), | |||
ModelType.EPIDEMIC_PREVENTION_MODEL), | |||
# 加载车牌模型 | |||
ModelType.PLATE_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: IMModel(x, y, z, r, ModelType.PLATE_MODEL, t), ModelType.PLATE_MODEL.value[1]), | |||
lambda x, y, z, r, t: IMModel(x, y, z, r, ModelType.PLATE_MODEL, t), ModelType.PLATE_MODEL), | |||
# 加载车辆模型 | |||
ModelType.VEHICLE_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.VEHICLE_MODEL, t), ModelType.VEHICLE_MODEL.value[1]), | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.VEHICLE_MODEL, t), ModelType.VEHICLE_MODEL), | |||
# 加载行人模型 | |||
ModelType.PEDESTRIAN_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.PEDESTRIAN_MODEL, t), | |||
ModelType.PEDESTRIAN_MODEL.value[1]), | |||
ModelType.PEDESTRIAN_MODEL), | |||
# 加载烟火模型 | |||
ModelType.SMOGFIRE_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.SMOGFIRE_MODEL, t), ModelType.SMOGFIRE_MODEL.value[1]), | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.SMOGFIRE_MODEL, t), ModelType.SMOGFIRE_MODEL), | |||
# 加载钓鱼游泳模型 | |||
ModelType.ANGLERSWIMMER_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.ANGLERSWIMMER_MODEL, t), | |||
ModelType.ANGLERSWIMMER_MODEL.value[1]), | |||
ModelType.ANGLERSWIMMER_MODEL), | |||
# 加载乡村模型 | |||
ModelType.COUNTRYROAD_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.COUNTRYROAD_MODEL, t), | |||
ModelType.COUNTRYROAD_MODEL.value[1]), | |||
ModelType.COUNTRYROAD_MODEL), | |||
# 加载船只模型 | |||
ModelType.SHIP_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.SHIP_MODEL, t), ModelType.SHIP_MODEL.value[1]), | |||
lambda x, y, z, r, t: Model(x, y, z, r, ModelType.SHIP_MODEL, t), ModelType.SHIP_MODEL), | |||
# 百度AI图片识别模型 | |||
ModelType.BAIDU_MODEL.value[1]: ( | |||
lambda x, y, z, r, t: BaiduAiImageModel(x, y, z, r, ModelType.BAIDU_MODEL, t), | |||
ModelType.BAIDU_MODEL.value[1]) | |||
} | |||
ModelType.BAIDU_MODEL) | |||
# # 交通事故模型 | |||
# ModelType.TRAFFICACCIDENT_MODEL.value[1]: ( | |||
# lambda x, y, z, r, t: TrafficaccidentModel(x, y, z, r, ModelType.TRAFFICACCIDENT_MODEL, t), | |||
# ModelType.TRAFFICACCIDENT_MODEL) | |||
} |
@@ -1,10 +1,10 @@ | |||
import time | |||
from threading import Lock | |||
import cv2 | |||
from aip import AipOcr | |||
from loguru import logger | |||
from enums.BaiduSdkEnum import BAIDUERRORDATA | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
@@ -13,11 +13,12 @@ class OcrBaiduSdk: | |||
def __init__(self, content): | |||
self.content = content | |||
self.client = None | |||
self.init_client() | |||
# self.lock = Lock() | |||
def init_client(self): | |||
self.client = AipOcr(str(self.content["baidu"]["orc"]["APP_ID"]), self.content["baidu"]["orc"]["API_KEY"], | |||
if self.client is None: | |||
self.client = AipOcr(str(self.content["baidu"]["orc"]["APP_ID"]), self.content["baidu"]["orc"]["API_KEY"], | |||
self.content["baidu"]["orc"]["SECRET_KEY"]) | |||
''' | |||
@@ -33,41 +34,55 @@ class OcrBaiduSdk: | |||
''' | |||
def universal_text_recognition(self, image, request_id): | |||
# try: | |||
# self.lock.acquire() | |||
reply_num = 1 | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
or_result, or_image = cv2.imencode(".jpg", image) | |||
res_image = self.client.basicGeneral(or_image.tobytes()) | |||
if res_image.get("error_code") == 216630 or res_image.get("error_msg") == 'recognize error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") == 282403 or res_image.get("error_msg") == 'target recognize error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") == 216202 or res_image.get("error_msg") == 'image size error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") is not None: | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
raise Exception("百度云调接口失败") | |||
options = { | |||
"language_type": "CHN_ENG", | |||
"detect_direction": "true", | |||
"detect_language": "true", | |||
"probability": "true" | |||
} | |||
res_image = self.client.basicGeneral(or_image.tobytes(), options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云人流量统计异常!error_code:{}, request_id: {}", error_code, request_id) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
logger.error("百度云人流量统计异常!error_code:{}, error_msg:{}, reply_num:{}, request_id: {}", | |||
enum.value[0], enum.value[2], reply_num, request_id) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except Exception as e: | |||
logger.exception("通用文字识别失败: {}, 当前重试次数:{}, request_id: {}", e, reply_num, request_id) | |||
time.sleep(1) | |||
reply_num += 1 | |||
reply_num += 0.5 | |||
self.init_client() | |||
if reply_num > 5: | |||
if reply_num > reply_value: | |||
if isinstance(e, ServiceException): | |||
raise ServiceException(e.code, e.msg) | |||
logger.exception("通用文字识别失败: {}, request_id: {}", e, request_id) | |||
raise ServiceException(ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[0], | |||
ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[1]) | |||
# except Exception as ee: | |||
# logger.exception("通用文字识别加锁异常: {}, request_id: {}", ee, request_id) | |||
# raise ServiceException(ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[0], | |||
# ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[1]) | |||
# finally: | |||
# self.lock.release() | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
''' | |||
{ | |||
@@ -78,40 +93,48 @@ class OcrBaiduSdk: | |||
} | |||
} | |||
''' | |||
def license_plate_recognition(self, image, request_id): | |||
# try: | |||
# self.lock.acquire() | |||
reply_num = 1 | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
or_result, or_image = cv2.imencode(".jpg", image) | |||
res_image = self.client.licensePlate(or_image.tobytes()) | |||
if res_image.get("error_code") == 282403 or res_image.get("error_msg") == 'target recognize error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") == 216630 or res_image.get("error_msg") == 'recognize error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") == 216202 or res_image.get("error_msg") == 'image size error': | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
return None | |||
if res_image.get("error_code") is not None: | |||
logger.error("百度云调接口失败: {}, 当前重试次数:{}, request_id: {}", res_image, reply_num, request_id) | |||
raise Exception("百度云调接口失败") | |||
res_image = self.client.licensePlate(or_image.tobytes(), {"multi_detect": "true"}) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云人流量统计异常!error_code:{}, request_id: {}", error_code, request_id) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
logger.error("百度云人流量统计异常!error_code:{}, error_msg:{}, reply_num:{}, request_id: {}", | |||
enum.value[0], enum.value[2], reply_num, request_id) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = enum.value[4] | |||
if reply_num >= reply_value: | |||
return None | |||
raise Exception() | |||
return res_image | |||
except Exception as e: | |||
logger.exception("车牌识别失败: {}, 当前重试次数:{}, request_id: {}", e, reply_num, request_id) | |||
time.sleep(1) | |||
reply_num += 1 | |||
self.init_client() | |||
if reply_num > 5: | |||
if reply_num > reply_value: | |||
if isinstance(e, ServiceException): | |||
raise ServiceException(e.code, e.msg) | |||
logger.exception("车牌识别失败: {}, request_id: {}", e, request_id) | |||
raise ServiceException(ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[0], | |||
ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[1]) | |||
# except Exception as ee: | |||
# logger.exception("车牌识别加锁异常: {}, request_id: {}", ee, request_id) | |||
# raise ServiceException(ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[0], | |||
# ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[1]) | |||
# finally: | |||
# self.lock.release() | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
@@ -4,15 +4,13 @@ from common import Constant | |||
# 从配置文件读取所有配置信息 | |||
def getConfigs(): | |||
print("开始读取配置文件,获取配置消息:", Constant.APPLICATION_CONFIG) | |||
applicationConfigPath = os.path.abspath(Constant.APPLICATION_CONFIG) | |||
def getConfigs(base_dir): | |||
applicationConfigPath = os.path.join(base_dir, Constant.APPLICATION_CONFIG) | |||
if not os.path.exists(applicationConfigPath): | |||
raise Exception("未找到配置文件:{}".format(Constant.APPLICATION_CONFIG)) | |||
raise Exception("未找到配置文件:%s" % applicationConfigPath) | |||
with open(applicationConfigPath, Constant.R, encoding=Constant.UTF_8) as f: | |||
file_content = f.read() | |||
content = yaml.load(file_content, yaml.FullLoader) | |||
if not content: | |||
raise Exception("配置项不能为空:{}".format(Constant.APPLICATION_CONFIG)) | |||
print("读取配置文件完成!") | |||
raise Exception("配置项不能为空:%s" % applicationConfigPath) | |||
return content |