@@ -0,0 +1,40 @@ | |||
HELP.md | |||
target/ | |||
!.mvn/wrapper/maven-wrapper.jar | |||
!**/src/main/**/target/ | |||
!**/src/test/**/target/ | |||
### STS ### | |||
.apt_generated | |||
.classpath | |||
.factorypath | |||
.project | |||
.settings | |||
.springBeans | |||
.sts4-cache | |||
### IntelliJ IDEA ### | |||
.idea | |||
*.iws | |||
*.iml | |||
*.ipr | |||
### NetBeans ### | |||
/nbproject/private/ | |||
/nbbuild/ | |||
/dist/ | |||
/nbdist/ | |||
/.nb-gradle/ | |||
build/ | |||
!**/src/main/**/build/ | |||
!**/src/test/**/build/ | |||
### VS Code ### | |||
.vscode/ | |||
/.idea | |||
/.vscode | |||
/.svn | |||
tuoheng-ui | |||
target/ | |||
HELP.md |
@@ -1,6 +1,6 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="PublishConfigData" autoUpload="Always" serverName="192.168.11.8" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<component name="PublishConfigData" serverName="192.168.11.8" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<serverData> | |||
<paths name="10.21"> | |||
<serverdata> | |||
@@ -16,6 +16,13 @@ | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="192.168.11.7"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/th/tuo_heng/test/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="192.168.11.8"> | |||
<serverdata> | |||
<mappings> | |||
@@ -39,49 +46,6 @@ | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="dell@192.168.10.12:22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/chenyukun/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="root@212.129.223.66:20653"> | |||
<serverdata> | |||
<mappings> | |||
<mapping local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw2@192.168.10.66:22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/chenyukun/dev/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw2@212.129.223.66:6500"> | |||
<serverdata> | |||
<mappings> | |||
<mapping local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw@192.168.10.11:22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/thsw/chenyukun/algSch/" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="thsw@212.129.223.66:6000"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/thsw/chenyukun/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
</serverData> | |||
<option name="myAutoUpload" value="ALWAYS" /> | |||
</component> | |||
</project> |
@@ -32,6 +32,42 @@ | |||
<option name="IGNORE_POINT_TO_ITSELF" value="false" /> | |||
<option name="myAdditionalJavadocTags" value="date" /> | |||
</inspection_tool> | |||
<inspection_tool class="JavadocDeclaration" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="ADDITIONAL_TAGS" value="date" /> | |||
</inspection_tool> | |||
<inspection_tool class="MissingJavadoc" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="PACKAGE_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="MODULE_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="TOP_LEVEL_CLASS_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="INNER_CLASS_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="METHOD_SETTINGS"> | |||
<Options> | |||
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" /> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="FIELD_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
</inspection_tool> | |||
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true"> | |||
<option name="ignoredErrors"> | |||
<list> | |||
@@ -41,5 +77,12 @@ | |||
</list> | |||
</option> | |||
</inspection_tool> | |||
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="ignoredIdentifiers"> | |||
<list> | |||
<option value="str.*" /> | |||
</list> | |||
</option> | |||
</inspection_tool> | |||
</profile> | |||
</component> |
@@ -1,9 +1,4 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Remote Python 3.8.8 (sftp://root@212.129.223.66:20653/opt/conda/bin/python3.8)" project-jdk-type="Python SDK" /> | |||
<component name="SwUserDefinedSpecifications"> | |||
<option name="specTypeByUrl"> | |||
<map /> | |||
</option> | |||
</component> | |||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Remote Python 3.8.15 (sftp://th@192.168.11.8:32178/home/th/anaconda3/envs/chenyukun/bin/python3.8)" project-jdk-type="Python SDK" /> | |||
</project> |
@@ -1,10 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="RunConfigurationProducerService"> | |||
<option name="ignoredProducers"> | |||
<set> | |||
<option value="com.android.tools.idea.compose.preview.runconfiguration.ComposePreviewRunConfigurationProducer" /> | |||
</set> | |||
</option> | |||
</component> | |||
</project> |
@@ -6,7 +6,7 @@ | |||
<sshConfig authType="PASSWORD" host="192.168.10.21" id="adf5e1da-4910-4668-bfbb-432f4e2ae77c" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" host="192.168.10.22" id="ac18a75e-ff42-4875-a5da-ad98d2d695ea" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" connectionConfig="{"serverAliveInterval":300}" host="192.168.10.66" id="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" port="32782" nameFormat="DESCRIPTIVE" username="root" /> | |||
<sshConfig authType="PASSWORD" host="192.168.11.8" id="34e9c3c2-edbc-42f0-8c89-cb75bfdf55e1" port="32178" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" host="192.168.11.7" id="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
</configs> | |||
</component> | |||
</project> |
@@ -31,7 +31,14 @@ | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="b761b5c5-5f66-4c6a-ad49-4783ff5df619" name="192.168.11.8"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="34e9c3c2-edbc-42f0-8c89-cb75bfdf55e1" sshConfig="th@192.168.11.8:32178 password"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="080a8ea2-04ef-404c-8202-a30cad7668a2" sshConfig="th@192.168.11.8:32178 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="d52d4eb1-ad07-4dd6-adac-d5e84d4a0f0c" name="192.168.11.7"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.7" port="22" sshConfigId="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" sshConfig="th@192.168.11.7:22 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> |
@@ -5,12 +5,9 @@ | |||
</component> | |||
<component name="ChangeListManager"> | |||
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes"> | |||
<change beforePath="$PROJECT_DIR$/.idea/deployment.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/sshConfigs.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/sshConfigs.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/webServers.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/webServers.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/common/Constant.py" beforeDir="false" afterPath="$PROJECT_DIR$/common/Constant.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" /> | |||
</list> | |||
<option name="SHOW_DIALOG" value="false" /> | |||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> | |||
@@ -105,6 +102,9 @@ | |||
</RecentBranches> | |||
</option> | |||
</component> | |||
<component name="MarkdownSettingsMigration"> | |||
<option name="stateVersion" value="1" /> | |||
</component> | |||
<component name="MavenImportPreferences"> | |||
<option name="generalSettings"> | |||
<MavenGeneralSettings> | |||
@@ -126,56 +126,58 @@ | |||
<option name="hideEmptyMiddlePackages" value="true" /> | |||
<option name="showLibraryContents" value="true" /> | |||
</component> | |||
<component name="PropertiesComponent"> | |||
<property name="RunOnceActivity.OpenProjectViewOnStart" value="true" /> | |||
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" /> | |||
<property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" /> | |||
<property name="WebServerToolWindowFactoryState" value="true" /> | |||
<property name="WebServerToolWindowPanel.toolwindow.highlight.mappings" value="true" /> | |||
<property name="WebServerToolWindowPanel.toolwindow.highlight.symlinks" value="true" /> | |||
<property name="WebServerToolWindowPanel.toolwindow.show.date" value="false" /> | |||
<property name="WebServerToolWindowPanel.toolwindow.show.permissions" value="false" /> | |||
<property name="WebServerToolWindowPanel.toolwindow.show.size" value="false" /> | |||
<property name="last_opened_file_path" value="$PROJECT_DIR$" /> | |||
<property name="node.js.detected.package.eslint" value="true" /> | |||
<property name="node.js.detected.package.tslint" value="true" /> | |||
<property name="node.js.selected.package.eslint" value="(autodetect)" /> | |||
<property name="node.js.selected.package.tslint" value="(autodetect)" /> | |||
<property name="project.structure.last.edited" value="Project" /> | |||
<property name="project.structure.proportion" value="0.15429688" /> | |||
<property name="project.structure.side.proportion" value="0.2" /> | |||
<property name="run.code.analysis.last.selected.profile" value="pProject Default" /> | |||
<property name="settings.editor.selected.configurable" value="preferences.pluginManager" /> | |||
<property name="vue.rearranger.settings.migration" value="true" /> | |||
</component> | |||
<component name="PropertiesComponent"><![CDATA[{ | |||
"keyToString": { | |||
"RunOnceActivity.OpenProjectViewOnStart": "true", | |||
"RunOnceActivity.ShowReadmeOnStart": "true", | |||
"WebServerToolWindowFactoryState": "true", | |||
"WebServerToolWindowPanel.toolwindow.highlight.mappings": "true", | |||
"WebServerToolWindowPanel.toolwindow.highlight.symlinks": "true", | |||
"WebServerToolWindowPanel.toolwindow.show.date": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.permissions": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.size": "false", | |||
"last_opened_file_path": "D:/tuoheng/fanbojiaoyu", | |||
"node.js.detected.package.eslint": "true", | |||
"node.js.detected.package.tslint": "true", | |||
"node.js.selected.package.eslint": "(autodetect)", | |||
"node.js.selected.package.tslint": "(autodetect)", | |||
"project.structure.last.edited": "SDK", | |||
"project.structure.proportion": "0.15", | |||
"project.structure.side.proportion": "0.2816092", | |||
"settings.editor.selected.configurable": "preferences.pluginManager", | |||
"vue.rearranger.settings.migration": "true" | |||
} | |||
}]]></component> | |||
<component name="RecentsManager"> | |||
<key name="CopyFile.RECENT_KEYS"> | |||
<recent name="D:\work\alg_new\tuoheng_alg\test\ffmpeg11" /> | |||
<recent name="D:\work\alg_new\tuoheng_alg\util" /> | |||
<recent name="D:\work\alg\tuoheng_alg\test\ffmpeg11" /> | |||
<recent name="D:\work\alg\tuoheng_alg\data" /> | |||
<recent name="D:\work\alg\tuoheng_alg\test" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\color" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\cuda" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\util" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\aliyun" /> | |||
</key> | |||
<key name="MoveFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\font" /> | |||
<recent name="D:\work\alg_new\tuoheng_alg\test\image" /> | |||
<recent name="D:\work\alg\tuoheng_alg\test\水印" /> | |||
<recent name="D:\work\alg\tuoheng_alg\image" /> | |||
</key> | |||
</component> | |||
<component name="RunManager" selected="Python.ffmpeg13"> | |||
<configuration name="aa" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<component name="RunManager" selected="Python.color_test"> | |||
<configuration name="IntelligentRecognitionProcess" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="sftp://dell@192.168.10.12:22/home/dell/anaconda3/envs/prod/bin/python3.8" /> | |||
<option name="WORKING_DIRECTORY" value="/home/chenyukun/algSch/test/ffmpeg11" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/concurrency" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="/home/chenyukun/algSch/test/ffmpeg11/aa.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -184,20 +186,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="ffmpeg12" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="color_test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/color" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/ffmpeg11/ffmpeg12.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/color/color_test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -206,20 +208,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="ffmpeg13" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/ffmpeg11/ffmpeg13.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -250,20 +252,20 @@ | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="producer_start1" type="PythonConfigurationType" factoryName="Python" temporary="true"> | |||
<configuration name="test (2)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/kafka" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/进程" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/kafka/producer_start.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/进程/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -279,13 +281,35 @@ | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="D:\software\anaconda\envs\chenyukun\python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/str" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/集合" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/集合/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="test1" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/cuda" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/str/test.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/cuda/test1.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
@@ -295,20 +319,21 @@ | |||
<method v="2" /> | |||
</configuration> | |||
<list> | |||
<item itemvalue="Python.editImage" /> | |||
<item itemvalue="Python.mysqltest" /> | |||
<item itemvalue="Python.aa" /> | |||
<item itemvalue="Python.producer_start1" /> | |||
<item itemvalue="Python.color_test" /> | |||
<item itemvalue="Python.test (2)" /> | |||
<item itemvalue="Python.IntelligentRecognitionProcess" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.ffmpeg12" /> | |||
<item itemvalue="Python.ffmpeg13" /> | |||
<item itemvalue="Python.test1" /> | |||
</list> | |||
<recent_temporary> | |||
<list> | |||
<item itemvalue="Python.ffmpeg13" /> | |||
<item itemvalue="Python.ffmpeg12" /> | |||
<item itemvalue="Python.color_test" /> | |||
<item itemvalue="Python.test (2)" /> | |||
<item itemvalue="Python.IntelligentRecognitionProcess" /> | |||
<item itemvalue="Python.test" /> | |||
<item itemvalue="Python.producer_start1" /> | |||
<item itemvalue="Python.aa" /> | |||
<item itemvalue="Python.test1" /> | |||
</list> | |||
</recent_temporary> | |||
</component> | |||
@@ -400,7 +425,71 @@ | |||
<workItem from="1675298111671" duration="1710000" /> | |||
<workItem from="1675388395566" duration="5304000" /> | |||
<workItem from="1675643763842" duration="771000" /> | |||
<workItem from="1676269822235" duration="1871000" /> | |||
<workItem from="1676269822235" duration="1954000" /> | |||
<workItem from="1676362382024" duration="821000" /> | |||
<workItem from="1676424351744" duration="4050000" /> | |||
<workItem from="1676506502236" duration="585000" /> | |||
<workItem from="1676871078953" duration="337000" /> | |||
<workItem from="1676895744433" duration="4418000" /> | |||
<workItem from="1676944131792" duration="515000" /> | |||
<workItem from="1677036599171" duration="4605000" /> | |||
<workItem from="1677112353743" duration="588000" /> | |||
<workItem from="1677574708616" duration="34000" /> | |||
<workItem from="1677632498068" duration="4279000" /> | |||
<workItem from="1677654510460" duration="2082000" /> | |||
<workItem from="1677727307545" duration="438000" /> | |||
<workItem from="1678153491396" duration="9573000" /> | |||
<workItem from="1678253386456" duration="45394000" /> | |||
<workItem from="1678668097364" duration="2754000" /> | |||
<workItem from="1678760898640" duration="1320000" /> | |||
<workItem from="1678791733686" duration="531000" /> | |||
<workItem from="1678839507873" duration="595000" /> | |||
<workItem from="1678885439785" duration="444000" /> | |||
<workItem from="1678925915104" duration="595000" /> | |||
<workItem from="1678927031601" duration="987000" /> | |||
<workItem from="1678928413253" duration="6728000" /> | |||
<workItem from="1679013228398" duration="17427000" /> | |||
<workItem from="1679039229464" duration="9832000" /> | |||
<workItem from="1679118299629" duration="17688000" /> | |||
<workItem from="1679289612196" duration="5820000" /> | |||
<workItem from="1679297557058" duration="1333000" /> | |||
<workItem from="1679359163976" duration="1997000" /> | |||
<workItem from="1679444345433" duration="1190000" /> | |||
<workItem from="1679633582926" duration="1979000" /> | |||
<workItem from="1679876991879" duration="1396000" /> | |||
<workItem from="1680136325711" duration="24199000" /> | |||
<workItem from="1680250415691" duration="1353000" /> | |||
<workItem from="1680486532876" duration="8132000" /> | |||
<workItem from="1680502907387" duration="10960000" /> | |||
<workItem from="1680527121128" duration="3411000" /> | |||
<workItem from="1680577929248" duration="5512000" /> | |||
<workItem from="1680741123267" duration="14728000" /> | |||
<workItem from="1680826640176" duration="21580000" /> | |||
<workItem from="1680914030055" duration="14971000" /> | |||
<workItem from="1680952718810" duration="967000" /> | |||
<workItem from="1681086404430" duration="27714000" /> | |||
<workItem from="1681170492379" duration="39568000" /> | |||
<workItem from="1681220684404" duration="2140000" /> | |||
<workItem from="1681258113350" duration="32577000" /> | |||
<workItem from="1681301257655" duration="429000" /> | |||
<workItem from="1681344786746" duration="5993000" /> | |||
<workItem from="1681363389283" duration="5626000" /> | |||
<workItem from="1681431288218" duration="25974000" /> | |||
<workItem from="1681690599771" duration="2894000" /> | |||
<workItem from="1681696465772" duration="30396000" /> | |||
<workItem from="1681826261843" duration="1474000" /> | |||
<workItem from="1681863254347" duration="13207000" /> | |||
<workItem from="1681950317514" duration="23460000" /> | |||
<workItem from="1682036333722" duration="651000" /> | |||
<workItem from="1682405963588" duration="37651000" /> | |||
<workItem from="1682554149580" duration="33878000" /> | |||
<workItem from="1682640444831" duration="10674000" /> | |||
<workItem from="1683244481879" duration="9171000" /> | |||
<workItem from="1683332505792" duration="23325000" /> | |||
<workItem from="1683506530261" duration="919000" /> | |||
<workItem from="1683507482567" duration="15434000" /> | |||
<workItem from="1683591783960" duration="1186000" /> | |||
<workItem from="1683677260592" duration="8827000" /> | |||
</task> | |||
<servers /> | |||
</component> | |||
@@ -417,7 +506,6 @@ | |||
</entry> | |||
</map> | |||
</option> | |||
<option name="oldMeFiltersMigrated" value="true" /> | |||
</component> | |||
<component name="XDebuggerManager"> | |||
<breakpoint-manager> | |||
@@ -429,9 +517,19 @@ | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/util/Cv2Utils.py</url> | |||
<line>1</line> | |||
<line>2</line> | |||
<option name="timeStamp" value="2" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/test/aliyun/ossdemo.py</url> | |||
<line>4</line> | |||
<option name="timeStamp" value="4" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/util/Cv2Utils.py</url> | |||
<line>1</line> | |||
<option name="timeStamp" value="5" /> | |||
</line-breakpoint> | |||
</breakpoints> | |||
</breakpoint-manager> | |||
</component> | |||
@@ -440,40 +538,54 @@ | |||
<select /> | |||
</component> | |||
<component name="com.intellij.coverage.CoverageDataManagerImpl"> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1675048794635" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$color_test.coverage" NAME="color_test 覆盖结果" MODIFIED="1683683775604" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/color" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo1.coverage" NAME="demo1 覆盖结果" MODIFIED="1680162882599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/demo" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="协程1 覆盖结果" MODIFIED="1667866542122" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="字典 覆盖结果" MODIFIED="1668089121018" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/字典" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test2.coverage" NAME="test2 覆盖结果" MODIFIED="1669178077956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg12.coverage" NAME="ffmpeg12 覆盖结果" MODIFIED="1675391366890" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681796501563" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test1.coverage" NAME="test1 覆盖结果" MODIFIED="1681988279624" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/cuda" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ossdemo.coverage" NAME="ossdemo 覆盖结果" MODIFIED="1681715255761" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__1_.coverage" NAME="test (1) 覆盖结果" MODIFIED="1681969578447" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/cuda" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$editImage.coverage" NAME="editImage 覆盖结果" MODIFIED="1678348350574" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/editimage" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="协程2 覆盖结果" MODIFIED="1668066168428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng/algSch/test/协程/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImgBaiduSdk.coverage" NAME="ImgBaiduSdk 覆盖结果" MODIFIED="1678355024003" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master 覆盖结果" MODIFIED="1680503755624" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1682582986112" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$IntelligentRecognitionProcess.coverage" NAME="IntelligentRecognitionProcess 覆盖结果" MODIFIED="1682651444560" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/concurrency" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1681810213173" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/序列化" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test2.coverage" NAME="test2 覆盖结果" MODIFIED="1669178077956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa.coverage" NAME="aa 覆盖结果" MODIFIED="1670490313339" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/chenyukun/algSch/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils__1_.coverage" NAME="KafkaUtils (1) Coverage Results" MODIFIED="1663464961001" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$voddemo.coverage" NAME="voddemo 覆盖结果" MODIFIED="1681722102430" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master Coverage Results" MODIFIED="1663403978477" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="协程笔记 覆盖结果" MODIFIED="1680926972744" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$gputest.coverage" NAME="gputest 覆盖结果" MODIFIED="1681950938970" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/gpu" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="协程1 覆盖结果" MODIFIED="1667866542122" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$3.coverage" NAME="协程3 覆盖结果" MODIFIED="1668147029048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$TimeUtils.coverage" NAME="TimeUtils Coverage Results" MODIFIED="1661222768678" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__1_.coverage" NAME="Test (1) 覆盖结果" MODIFIED="1681199611277" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__2_.coverage" NAME="test (2) 覆盖结果" MODIFIED="1683355406740" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/进程" /> | |||
</component> | |||
</project> |
@@ -1,11 +1,35 @@ | |||
# 配置文件名称 | |||
APPLICATION_CONFIG="dsp_application.yml" | |||
APPLICATION_CONFIG = "dsp_application.yml" | |||
# 编码格式 | |||
UTF_8="utf-8" | |||
UTF_8 = "utf-8" | |||
# 文件读模式 | |||
R='r' | |||
R = 'r' | |||
# 进度100% | |||
success_progess="1.0000" | |||
success_progess = "1.0000" | |||
# 拉流每帧图片缩小宽度大小限制, 大于1400像素缩小一半, 小于1400像素不变 | |||
width = 1400 | |||
COLOR = [ | |||
[0, 0, 255], | |||
[255, 0, 0], | |||
[211, 0, 148], | |||
[0, 127, 0], | |||
[0, 69, 255], | |||
[0, 255, 0], | |||
[255, 0, 255], | |||
[0, 0, 127], | |||
[127, 0, 255], | |||
[255, 129, 0], | |||
[139, 139, 0], | |||
[255, 255, 0], | |||
[127, 255, 0], | |||
[0, 127, 255], | |||
[0, 255, 127], | |||
[255, 127, 255], | |||
[8, 101, 139], | |||
[171, 130, 255], | |||
[139, 112, 74], | |||
[205, 205, 180]] |
@@ -0,0 +1,256 @@ | |||
# -*- coding: utf-8 -*- | |||
model = { | |||
'type': 'list', | |||
'required': True, | |||
'nullable': False, | |||
'minlength': 1, | |||
'maxlength': 3, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
# 在线参数校验 | |||
ONLINE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 在线停止参数校验 | |||
ONLINE_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} | |||
# 离线开始参数校验 | |||
OFFLINE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 离线停止参数校验 | |||
OFFLINE_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} | |||
# 图片开始参数校验 | |||
IMAGE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': True, | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 录屏参数校验 | |||
RECORDING_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
} | |||
} | |||
# 录屏停止参数校验 | |||
RECORDING_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} |
@@ -0,0 +1,294 @@ | |||
# -*- coding: utf-8 -*- | |||
BASE_DIR = 'base_dir' | |||
GPU_CODES = ['3090', '2080', '4090', 'A10'] | |||
GPU_NAME = 'gpu_name' | |||
GPU_2080 = '2080' | |||
GPU_2080_Ti = '2080Ti' | |||
KAFKA_ACKS = "acks" | |||
KAFKA_RETRIES = "retries" | |||
KAFKA_LINGER_MS = "linger_ms" | |||
KAFKA_RETRY_BACKOFF_MS = "retry_backoff_ms" | |||
KAFKA_MAX_IN_FLIGHT_REQUESTS = "max_in_flight_requests_per_connection" | |||
KAFKA_CLIENT_ID = "client_id" | |||
KAFKA_GROUP_ID = "group_id" | |||
KAFKA_AUTO_OFFSET_RESET = "auto_offset_reset" | |||
KAFKA_ENABLE_AUTO_COMMIT = "enable_auto_commit" | |||
KAFKA_MAX_POLL_RECORDS = "max_poll_records" | |||
REQUEST_ID = "request_id" | |||
FEEDBACK = "feedback" | |||
RECORDING = "recording" | |||
FBQUEUE = "fbQueue" | |||
CONTEXT = "context" | |||
MSG = "msg" | |||
GPU_IDS = "gpu_ids" | |||
ANALYSE_TYPE = "analyse_type" | |||
COMMAND= "command" | |||
START = "start" | |||
STOP = "stop" | |||
# 校验schema规则定义 | |||
SCHEMA = { | |||
REQUEST_ID: { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
COMMAND: { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': [START, STOP] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'string', | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': { | |||
'type': 'list', | |||
'required': False, | |||
'nullable': True, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
def get_file_path(context): | |||
return context["video"]["file_path"] | |||
def get_online_tasks_topic(context): | |||
return context["kafka"]["topic"]["dsp-alg-online-tasks-topic"] | |||
def get_offline_tasks_topic(context): | |||
return context["kafka"]["topic"]["dsp-alg-offline-tasks-topic"] | |||
def get_image_tasks_topic(context): | |||
return context["kafka"]["topic"]["dsp-alg-image-tasks-topic"] | |||
def get_recording_tasks_topic(context): | |||
return context["kafka"]["topic"]["dsp-recording-task-topic"] | |||
def get_kafka_producer_config(context): | |||
return context["kafka"][context["dsp"]["active"]]["producer"] | |||
def get_kafka_consumer_config(context): | |||
return context["kafka"][context["dsp"]["active"]]["consumer"] | |||
def get_kafka_bootstrap_servers(context): | |||
return context["kafka"][context["dsp"]["active"]]["bootstrap_servers"] | |||
def get_kafka_results_topic(context): | |||
return context["kafka"]["topic"]["dsp-alg-results-topic"] | |||
def get_kafka_recording_result_topic(context): | |||
return context["kafka"]["topic"]["dsp-recording-result-topic"] | |||
def get_aliyun_access_key(context): | |||
return context["aliyun"]["access_key"] | |||
def get_aliyun_access_secret(context): | |||
return context["aliyun"]["access_secret"] | |||
def get_aliyun_oss_endpoint(context): | |||
return context["aliyun"]["oss"]["endpoint"] | |||
def get_aliyun_oss_bucket(context): | |||
return context["aliyun"]["oss"]["bucket"] | |||
def get_aliyun_oss_connect_timeout(context): | |||
return context["aliyun"]["oss"]["connect_timeout"] | |||
def get_aliyun_vod_ecsRegionId(context): | |||
return context["aliyun"]["vod"]["ecsRegionId"] | |||
def get_aliyun_vod_cateId(context): | |||
return context["aliyun"]["vod"][context["dsp"]["active"]]["CateId"] | |||
def get_gpu_order(context): | |||
return context["gpu"]["order"] | |||
def get_gpu_limit(context): | |||
return context["gpu"]["limit"] | |||
def get_gpu_maxLoad(context): | |||
return context["gpu"]["maxLoad"] | |||
def get_gpu_maxMemory(context): | |||
return context["gpu"]["maxMemory"] | |||
def get_gpu_includeNan(context): | |||
return context["gpu"]["includeNan"] | |||
def get_gpu_excludeID(context): | |||
return context["gpu"]["excludeID"] | |||
def get_gpu_excludeUUID(context): | |||
return context["gpu"]["excludeUUID"] | |||
def get_baidu_vehicle_APP_ID(context): | |||
return context["baidu"]["vehicle"]["APP_ID"] | |||
def get_baidu_vehicle_API_KEY(context): | |||
return context["baidu"]["vehicle"]["API_KEY"] | |||
def get_baidu_vehicle_SECRET_KEY(context): | |||
return context["baidu"]["vehicle"]["SECRET_KEY"] | |||
def get_baidu_person_APP_ID(context): | |||
return context["baidu"]["person"]["APP_ID"] | |||
def get_baidu_person_API_KEY(context): | |||
return context["baidu"]["person"]["API_KEY"] | |||
def get_baidu_person_SECRET_KEY(context): | |||
return context["baidu"]["person"]["SECRET_KEY"] | |||
def get_baidu_ocr_APP_ID(context): | |||
return context["baidu"]["orc"]["APP_ID"] | |||
def get_baidu_ocr_API_KEY(context): | |||
return context["baidu"]["orc"]["API_KEY"] | |||
def get_baidu_ocr_SECRET_KEY(context): | |||
return context["baidu"]["orc"]["SECRET_KEY"] | |||
def get_log_base_path(context): | |||
return context["log"]["base_path"] | |||
def get_log_enable_file(context): | |||
return context["log"]["enable_file_log"] | |||
def get_log_log_name(context): | |||
return context["log"]["log_name"] | |||
def get_log_rotation(context): | |||
return context["log"]["rotation"] | |||
def get_log_retention(context): | |||
return context["log"]["retention"] | |||
def get_log_log_fmt(context): | |||
return context["log"]["log_fmt"] | |||
def get_log_level(context): | |||
return context["log"]["level"] | |||
def get_log_enqueue(context): | |||
return context["log"]["enqueue"] | |||
def get_log_encoding(context): | |||
return context["log"]["encoding"] | |||
def get_log_enable_stderr(context): | |||
return context["log"]["enable_stderr"] | |||
@@ -2,6 +2,8 @@ | |||
import time | |||
from threading import Thread | |||
from loguru import logger | |||
from common import YmlConstant | |||
from util import KafkaUtils | |||
''' | |||
@@ -11,29 +13,36 @@ from util import KafkaUtils | |||
class FeedbackThread(Thread): | |||
def __init__(self, fbQueue, content): | |||
def __init__(self, fbQueue, context): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
self.__fbQueue = fbQueue | |||
self.__context = context | |||
self.__dsp_alg_results_topic = YmlConstant.get_kafka_results_topic(context) | |||
self.__dsp_recording_result_topic = YmlConstant.get_kafka_recording_result_topic(context) | |||
''' | |||
阻塞获取反馈消息 | |||
''' | |||
def getFeedback(self): | |||
return self.fbQueue.get() | |||
return self.__fbQueue.get() | |||
def run(self): | |||
logger.info("启动问题反馈线程") | |||
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.content) | |||
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.__context) | |||
while True: | |||
logger.info("问题反馈发送消息循环") | |||
feedback = {} | |||
try: | |||
fb = self.getFeedback() | |||
if fb is not None and len(fb) > 0: | |||
feedback = fb.get("feedback") | |||
feedback = fb.get(YmlConstant.FEEDBACK) | |||
recording = fb.get(YmlConstant.RECORDING) | |||
if feedback is not None and len(feedback) > 0: | |||
kafkaProducer.sender(self.content["kafka"]["topic"]["dsp-alg-results-topic"], | |||
feedback["request_id"], feedback, 1) | |||
kafkaProducer.sender(self.__dsp_alg_results_topic, feedback[YmlConstant.REQUEST_ID], feedback, 1) | |||
if recording is not None and len(recording) > 0: | |||
kafkaProducer.sender(self.__dsp_recording_result_topic, recording[YmlConstant.REQUEST_ID], recording, 1) | |||
else: | |||
time.sleep(1) | |||
except Exception as e: | |||
logger.exception("问题反馈异常:{}, requestId:{}", e, feedback.get("request_id")) | |||
logger.info("问题反馈进程执行完成") | |||
logger.exception("问题反馈异常:{}, requestId:{}", e, feedback.get(YmlConstant.REQUEST_ID)) | |||
logger.info("问题反馈线程执行完成") |
@@ -1,108 +0,0 @@ | |||
import asyncio | |||
import time | |||
from threading import Thread | |||
from loguru import logger | |||
import cv2 | |||
from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
import numpy as np | |||
from PIL import Image | |||
class FileUpdate(Thread): | |||
def __init__(self, fbQueue, content, msg, imageQueue, mode_service): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
self.imageQueue = imageQueue | |||
self.mode_service = mode_service | |||
self.msg = msg | |||
# # 获取下一个事件 | |||
def getImageQueue(self): | |||
eBody = None | |||
try: | |||
eBody = self.imageQueue.get() | |||
except Exception as e: | |||
pass | |||
return eBody | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def build_image_name(base_dir, time_now, current_frame, last_frame, random_num, mode_type, requestId, image_type): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
image_name = image_format.format( | |||
base_dir=base_dir, | |||
time_now=time_now, | |||
current_frame=current_frame, | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
requestId=requestId, | |||
image_type=image_type) | |||
return image_name | |||
class ImageFileUpdate(FileUpdate): | |||
def run(self): | |||
logger.info("开始启动图片上传线程, requestId:{}", self.msg.get("request_id")) | |||
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id")) | |||
aliyunOssSdk.get_oss_bucket() | |||
loop = asyncio.new_event_loop() | |||
asyncio.set_event_loop(loop) | |||
while True: | |||
try: | |||
image_msg = self.getImageQueue() | |||
if image_msg is not None and len(image_msg) > 0: | |||
image_dict = image_msg.get("image") | |||
command = image_msg.get("command") | |||
if command == 'stop': | |||
logger.info("触发文件上传停止指令!") | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
# 图片帧数编码 | |||
or_result, or_image = cv2.imencode(".jpg", image_dict.get("or_frame")) | |||
ai_result, ai_image = cv2.imencode(".jpg", image_dict.get("ai_frame")) | |||
# 定义上传图片名称 | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
# 图片名称待后期修改 | |||
or_image_name = build_image_name(self.msg.get('results_base_dir'), time_now, | |||
str(image_dict.get("current_frame")), | |||
str(image_dict.get("last_frame")), | |||
random_num, | |||
image_dict.get("mode_service"), | |||
self.msg.get('request_id'), "OR") | |||
ai_image_name = build_image_name(self.msg.get('results_base_dir'), time_now, | |||
str(image_dict.get("current_frame")), | |||
str(image_dict.get("last_frame")), | |||
random_num, | |||
image_dict.get("mode_service"), | |||
self.msg.get('request_id'), "AI") | |||
task = loop.create_task(aliyunOssSdk.upload_file(or_image_name, or_image.tobytes())) | |||
task1 = loop.create_task(aliyunOssSdk.upload_file(ai_image_name, ai_image.tobytes())) | |||
loop.run_until_complete(asyncio.wait([task, task1])) | |||
# 上传原图片 | |||
# aliyunOssSdk.upload_file(or_image_name, Image.fromarray(np.uint8(or_image)).tobytes()) | |||
# aliyunOssSdk.upload_file(ai_image_name, Image.fromarray(np.uint8(ai_image)).tobytes()) | |||
# 发送kafka消息 | |||
self.sendResult({"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.mode_service, "", "", | |||
image_dict.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
image_dict.get("model_type_code"), | |||
image_dict.get("model_detection_code"), | |||
TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id")) | |||
loop.close() | |||
logger.info("结束图片上传线程, requestId:{}", self.msg.get("request_id")) |
@@ -0,0 +1,173 @@ | |||
import copy | |||
from concurrent.futures import ThreadPoolExecutor, as_completed | |||
from threading import Thread | |||
from loguru import logger | |||
import cv2 | |||
from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from util.PlotsUtils import draw_painting_joint | |||
class FileUpload(Thread): | |||
def __init__(self, fbQueue, content, msg, imageQueue, analyse_type): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.content = content | |||
self.imageQueue = imageQueue | |||
self.analyse_type = analyse_type | |||
self.msg = msg | |||
self.similarity = self.content["service"]["filter"]["similarity"] | |||
self.picture_similarity = self.content["service"]["filter"]["picture_similarity"] | |||
self.frame_step = int(self.content["service"]["filter"]["frame_step"]) | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
''' | |||
比如原图检测目标等信息,target用O表示 | |||
''' | |||
def build_image_name(self, current_frame, last_frame, mode_type, image_type, target): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \ | |||
"-{target}-{requestId}_{image_type}.jpg" | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_name = image_format.format( | |||
base_dir=self.msg.get('results_base_dir'), | |||
time_now=time_now, | |||
current_frame=current_frame, | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
target=target, | |||
requestId=self.msg.get('request_id'), | |||
image_type=image_type) | |||
return image_name | |||
''' | |||
图片上传线程 | |||
''' | |||
class ImageFileUpload(FileUpload): | |||
def handle_image(self, high_score_image, frame_all): | |||
flag = True | |||
if self.picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame_all.get("frame")) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity = 1 - dist * 1.0 / 64 | |||
if similarity >= self.similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_all.get("cct_frame") - high_score_image.get("current_frame") | |||
if diff_frame_num < self.frame_step: | |||
flag = False | |||
det_result = frame_all.get("det_xywh") | |||
model_info = [] | |||
if flag and det_result is not None and len(det_result) > 0: | |||
''' | |||
det_xywh:{ | |||
'code':{ | |||
1: [[detect_targets_code, box, score, label_array, color]] | |||
} | |||
} | |||
模型编号:modeCode | |||
检测目标:detectTargetCode | |||
''' | |||
# 更加模型编码解析数据 | |||
for modelCode in list(det_result.keys()): | |||
# 模型编号下面的检测目标对象 | |||
det_info = det_result.get(modelCode) | |||
if det_info is not None and len(det_info) > 0: | |||
for detectTargetCode in list(det_info.keys()): | |||
target_list = det_info.get(detectTargetCode) | |||
if target_list is not None and len(target_list) > 0: | |||
orFrame = copy.deepcopy(frame_all.get("frame")) | |||
for target in target_list: | |||
draw_painting_joint(target[1], orFrame, target[3], target[2], target[4], "leftTop") | |||
model_info.append({ | |||
"modelCode": str(modelCode), | |||
"detectTargetCode": str(detectTargetCode), | |||
"frame": orFrame | |||
}) | |||
if len(model_info) > 0: | |||
high_score_image["or_frame"] = frame_all.get("frame") | |||
high_score_image["current_frame"] = frame_all.get("cct_frame") | |||
image_result = { | |||
"or_frame": frame_all.get("frame"), | |||
"model_info": model_info, | |||
"current_frame": frame_all.get("cct_frame"), | |||
"last_frame": frame_all.get("cct_frame") + self.frame_step, | |||
"progress": "", | |||
"mode_service": self.analyse_type, | |||
} | |||
return image_result | |||
return None | |||
def run(self): | |||
logger.info("启动图片上传线程, requestId:{}", self.msg.get("request_id")) | |||
# 初始化oss客户端 | |||
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id")) | |||
aliyunOssSdk.get_oss_bucket() | |||
high_score_image = {} | |||
with ThreadPoolExecutor(max_workers=5) as t: | |||
try: | |||
while True: | |||
try: | |||
# 获取队列中的消息 | |||
image_msg = self.imageQueue.get() | |||
if image_msg is not None and len(image_msg) > 0: | |||
image_dict = image_msg.get("image") | |||
command = image_msg.get("command") | |||
if command == 'stop': | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
image_result = self.handle_image(high_score_image, image_dict) | |||
if image_result is not None: | |||
# 图片帧数编码 | |||
task = [] | |||
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame")) | |||
or_image_name = self.build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"OR", "O") | |||
or_future = t.submit(aliyunOssSdk.sync_upload_file, or_image_name, | |||
or_image.tobytes()) | |||
task.append(or_future) | |||
model_info_list = image_result.get("model_info") | |||
msg_list = [] | |||
for model_info in model_info_list: | |||
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame")) | |||
ai_image_name = self.build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"AI", model_info.get("detectTargetCode")) | |||
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name, | |||
ai_image.tobytes()) | |||
task.append(ai_future) | |||
msg_list.append( | |||
{"feedback": FeedBack.message_feedback(self.msg.get('request_id'), | |||
AnalysisStatus.RUNNING.value, | |||
self.analyse_type, "", "", | |||
image_result.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
model_info.get('modelCode'), | |||
model_info.get('detectTargetCode'), | |||
TimeUtils.now_date_to_str())}) | |||
for thread_result in as_completed(task): | |||
thread_result.result() | |||
for msg in msg_list: | |||
self.sendResult(msg) | |||
except Exception as e: | |||
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id")) | |||
finally: | |||
high_score_image.clear() | |||
logger.info("停止图片上传线程, requestId:{}", self.msg.get("request_id")) |
@@ -9,12 +9,12 @@ from entity.FeedBack import message_feedback | |||
class Heartbeat(Thread): | |||
def __init__(self, fbQueue, hbQueue, request_id, mode_service): | |||
def __init__(self, fbQueue, hbQueue, request_id, analyse_type): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.request_id = request_id | |||
self.mode_service = mode_service | |||
self.analyse_type = analyse_type | |||
self.progress = "0.0000" | |||
def getHbQueue(self): | |||
@@ -35,7 +35,7 @@ class Heartbeat(Thread): | |||
def sendhbMessage(self, analysisStatus): | |||
self.sendResult({"feedback": message_feedback(self.request_id, | |||
analysisStatus, | |||
self.mode_service, | |||
self.analyse_type, | |||
progress=self.progress, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
@@ -0,0 +1,122 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
from queue import Queue | |||
from threading import Thread | |||
from loguru import logger | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import GPUtils | |||
from util.Cv2Utils import Cv2Util | |||
class PullStreamThread(Thread): | |||
def __init__(self, msg, content, pullQueue, fbQueue): | |||
super().__init__() | |||
self.command = Queue() | |||
self.msg = msg | |||
self.content = content | |||
self.pullQueue = pullQueue | |||
self.fbQueue = fbQueue | |||
self.recording_pull_stream_timeout = int(self.content["service"]["recording_pull_stream_timeout"]) | |||
def getCommand(self): | |||
eBody = None | |||
try: | |||
eBody = self.command.get(block=False) | |||
except Exception as e: | |||
pass | |||
return eBody | |||
def sendCommand(self, result): | |||
self.command.put(result) | |||
def sendPullQueue(self, result): | |||
self.pullQueue.put(result) | |||
class RecordingPullStreamThread(PullStreamThread): | |||
def run(self): | |||
cv2tool = None | |||
try: | |||
logger.info("录屏任务, 开启拉流, requestId:{}", self.msg.get("request_id")) | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), content=self.content, | |||
gpu_ids=gpu_ids, log=logger) | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
start_time_2 = time.time() | |||
concurrent_frame = 1 | |||
cv2tool.get_recording_video_info() | |||
while True: | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop' == body.get("command"): | |||
logger.info("录屏任务, 拉流线程停止中, reuqestId:{}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
break | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
pull_stream_init_timeout = time.time() - start_time | |||
if pull_stream_init_timeout > self.recording_pull_stream_timeout: | |||
logger.info("录屏拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.get_recording_video_info() | |||
continue | |||
start_time = time.time() | |||
cv2_init_num = 1 | |||
frame = cv2tool.recording_read() | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id")) | |||
pull_stream_read_timeout = time.time() - start_time_2 | |||
if pull_stream_read_timeout > self.recording_pull_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
if cv2tool.all_frames is not None and len(cv2tool.all_frames) > 0: | |||
if concurrent_frame < cv2tool.all_frames - 100: | |||
logger.info("流异常结束:requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) | |||
break | |||
logger.info("拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
break | |||
init_pull_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.recording_pull_p() | |||
continue | |||
init_pull_num = 1 | |||
start_time_2 = time.time() | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
"cct_frame": cv2tool.current_frame, | |||
"width": cv2tool.width, | |||
"height": cv2tool.height, | |||
"fps": cv2tool.fps, | |||
"all_frame": cv2tool.all_frames}) | |||
concurrent_frame += 1 | |||
except ServiceException as s: | |||
self.sendPullQueue({"status": "1", "error": {"code": s.code, "msg": s.msg}}) | |||
except Exception as e: | |||
logger.exception("实时拉流异常: {}, requestId:{}", e, self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "1", "error": {"code": ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
"msg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1]}}) | |||
finally: | |||
if cv2tool: | |||
cv2tool.close() | |||
logger.info("录屏拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
@@ -4,16 +4,16 @@ from multiprocessing import Process, Queue | |||
from loguru import logger | |||
from concurrency.FileUpdateThread import ImageFileUpdate | |||
from concurrency.FileUploadThread import ImageFileUpload | |||
from concurrency.HeartbeatThread import Heartbeat | |||
from enums.AnalysisTypeEnum import AnalysisType | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import LogUtils | |||
from util.Cv2Utils import Cv2Util | |||
class PullVideoStreamProcess(Process): | |||
def __init__(self, msg, content, pullQueue, fbQueue, hbQueue, imageQueue): | |||
def __init__(self, msg, content, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type): | |||
super().__init__() | |||
self.command = Queue() | |||
self.msg = msg | |||
@@ -22,12 +22,11 @@ class PullVideoStreamProcess(Process): | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.imageQueue = imageQueue | |||
self.step = int(self.content["service"]["frame_step"]) | |||
self.analyse_type = analyse_type | |||
self.pull_stream_timeout = int(self.content["service"]["cv2_pull_stream_timeout"]) | |||
self.read_stream_timeout = int(self.content["service"]["cv2_read_stream_timeout"]) | |||
self.service_timeout = int(self.content["service"]["timeout"]) | |||
def getCommand(self): | |||
eBody = None | |||
try: | |||
@@ -45,104 +44,114 @@ class PullVideoStreamProcess(Process): | |||
def sendImageResult(self, result): | |||
self.imageQueue.put(result) | |||
def start_File_upload(self): | |||
imageFileUpload = ImageFileUpload(self.fbQueue, self.content, self.msg, self.imageQueue, self.analyse_type) | |||
imageFileUpload.setDaemon(True) | |||
imageFileUpload.start() | |||
return imageFileUpload | |||
def start_heartbeat(self): | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), self.analyse_type) | |||
hb.setDaemon(True) | |||
hb.start() | |||
return hb | |||
def check(self, start_time, imageFileUpload, hb): | |||
create_task_time = time.time() - start_time | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
# 检测图片上传线程是否正常运行 | |||
if not imageFileUpload.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
# 检测心跳线程是否正常运行 | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
def run(self): | |||
cv2tool = None | |||
imageFileUpdate = None | |||
imageFileUpload = None | |||
hb = None | |||
try: | |||
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.ONLINE.value) | |||
imageFileUpdate.setDaemon(True) | |||
imageFileUpdate.start() | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), AnalysisType.ONLINE.value) | |||
hb.setDaemon(True) | |||
hb.start() | |||
logger.info("开启视频拉流线程, requestId:{}", self.msg.get("request_id")) | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id")) | |||
# 加载日志框架 | |||
LogUtils.init_log(self.content) | |||
logger.info("开启视频拉流进程, requestId:{}", self.msg.get("request_id")) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload() | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat() | |||
# 初始化拉流工具类 | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), log=logger) | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
start_time_1 = time.time() | |||
start_time_2 = time.time() | |||
pull_stream_start_time = time.time() | |||
pull_stream_read_start_time = time.time() | |||
concurrent_frame = 1 | |||
stop_imageFile = False | |||
stop_pull_stream_step = False | |||
while True: | |||
end_time = time.time() | |||
create_task_time = int(end_time - start_time) | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
if not imageFileUpdate.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop_pull_stream' == body.get("command"): | |||
# 检测任务执行是否超时、心跳线程是否正常、图片上传线程是否正常 | |||
self.check(start_time, imageFileUpload, hb) | |||
# 获取指令信息 | |||
command = self.getCommand() | |||
if command is not None and len(command) > 0: | |||
# 停止拉流 | |||
if 'stop_pull_stream' == command.get("command"): | |||
self.sendPullQueue({"status": "9"}) # 9 停止拉流 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
if 'stop_image' == body.get("command"): | |||
time.sleep(5) | |||
# 停止图片上传线程 | |||
if 'stop_image_hb' == command.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if 'stop_ex' == body.get("command"): | |||
time.sleep(5) | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
# self.pullQueue.cancel_join_thread() | |||
logger.error("拉流、图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if stop_imageFile: | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
if cv2tool.checkconfig() or cv2tool.pull_p is None: | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
pull_stream_init_timeout = time.time() - start_time_1 | |||
pull_stream_init_timeout = time.time() - pull_stream_start_time | |||
if pull_stream_init_timeout > self.pull_stream_timeout: | |||
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(2) | |||
time.sleep(0.5) | |||
cv2tool.get_video_info() | |||
cv2tool.build_pull_p() | |||
continue | |||
start_time_1 = time.time() | |||
pull_stream_start_time = time.time() | |||
cv2_init_num = 1 | |||
frame = cv2tool.read() | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id")) | |||
pull_stream_read_timeout = time.time() - start_time_2 | |||
pull_stream_read_timeout = time.time() - pull_stream_read_start_time | |||
if pull_stream_read_timeout > self.read_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) # 3 超时 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
init_pull_num += 1 | |||
time.sleep(1) | |||
cv2tool.build_pull_p() | |||
time.sleep(0.1) | |||
continue | |||
init_pull_num = 1 | |||
start_time_2 = time.time() | |||
pull_stream_read_start_time = time.time() | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
"cct_frame": concurrent_frame, | |||
@@ -160,12 +169,12 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
finally: | |||
if cv2tool: | |||
cv2tool.close() | |||
if imageFileUpdate: | |||
if imageFileUpload: | |||
self.sendImageResult({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
self.hbQueue.put({"command": "stop"}) | |||
hb.join(60*3) | |||
hb.join(60 * 3) | |||
logger.info("实时拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
@@ -173,72 +182,53 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
def run(self): | |||
cv2tool = None | |||
imageFileUpdate = None | |||
imageFileUpload = None | |||
hb = None | |||
try: | |||
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.OFFLINE.value) | |||
imageFileUpdate.setDaemon(True) | |||
imageFileUpdate.start() | |||
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), AnalysisType.OFFLINE.value) | |||
hb.setDaemon(True) | |||
hb.start() | |||
cv2tool = Cv2Util(pullUrl=self.msg.get('original_url'), requestId=self.msg.get("request_id")) | |||
# 初始化日志 | |||
LogUtils.init_log(self.content) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload() | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat() | |||
cv2tool = Cv2Util(pullUrl=self.msg.get('original_url'), requestId=self.msg.get("request_id"), log=logger) | |||
cv2_init_num = 1 | |||
start_time = time.time() | |||
cv2tool.get_video_info() | |||
concurrent_frame = 1 | |||
stop_imageFile = False | |||
stop_pull_stream_step = False | |||
while True: | |||
end_time = time.time() | |||
create_task_time = int(end_time - start_time) | |||
if create_task_time > self.service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
if not imageFileUpdate.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
if not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id")) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
self.check(start_time, imageFileUpload, hb) | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop_pull_stream' == body.get("command"): | |||
self.sendPullQueue({"status": "9"}) # 9 停止拉流 | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
if 'stop_image' == body.get("command"): | |||
if 'stop_image_hb' == body.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.info("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if 'stop_ex' == body.get("command"): | |||
self.sendImageResult({"command": "stop"}) | |||
self.hbQueue.put({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
hb.join(60*3) | |||
# self.pullQueue.cancel_join_thread() | |||
logger.info("图片线程停止完成, reuqestId:{}", self.msg.get("request_id")) | |||
break | |||
if stop_imageFile: | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
if cv2tool.checkconfig() or cv2tool.pull_p is None: | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
if cv2_init_num > 3: | |||
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(2) | |||
time.sleep(1) | |||
cv2tool.get_video_info() | |||
cv2tool.build_pull_p() | |||
continue | |||
frame = cv2tool.read() | |||
if frame is None: | |||
@@ -248,13 +238,13 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
if concurrent_frame < cv2tool.all_frames - 100: | |||
logger.info("离线拉流异常结束:requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
cv2tool.close() | |||
continue | |||
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
cv2tool.close() | |||
stop_imageFile = True | |||
stop_pull_stream_step = True | |||
continue | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
@@ -273,10 +263,10 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
finally: | |||
if cv2tool is not None: | |||
cv2tool.close() | |||
if imageFileUpdate: | |||
if imageFileUpload: | |||
self.sendImageResult({"command": "stop"}) | |||
imageFileUpdate.join(60*3) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
self.hbQueue.put({"command": "stop"}) | |||
hb.join(60*3) | |||
hb.join(60 * 3) | |||
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id")) |
@@ -0,0 +1,51 @@ | |||
# -*- coding: utf-8 -*- | |||
from threading import Thread | |||
import time | |||
from loguru import logger | |||
from entity.FeedBack import recording_feedback | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
class RecordingHeartbeat(Thread): | |||
def __init__(self, fbQueue, hbQueue, request_id): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.request_id = request_id | |||
def getHbQueue(self): | |||
eBody = None | |||
try: | |||
eBody = self.hbQueue.get(block=False) | |||
except Exception as e: | |||
pass | |||
return eBody | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def sendHbQueue(self, result): | |||
self.hbQueue.put(result) | |||
def sendhbMessage(self, statusl): | |||
self.sendResult({"recording": recording_feedback(self.request_id, statusl)}) | |||
def run(self): | |||
logger.info("开始启动录屏心跳线程!requestId:{}", self.request_id) | |||
hb_init_num = 0 | |||
while True: | |||
try: | |||
time.sleep(3) | |||
hb_msg = self.getHbQueue() | |||
if hb_msg is not None and len(hb_msg) > 0: | |||
command = hb_msg.get("command") | |||
if 'stop' == command: | |||
logger.info("开始终止心跳线程, requestId:{}", self.request_id) | |||
break | |||
if hb_init_num % 30 == 0: | |||
self.sendhbMessage(RecordingStatus.RECORDING_RUNNING.value[0]) | |||
hb_init_num += 3 | |||
except Exception as e: | |||
logger.exception("心跳线程异常:{}, requestId:{}", e, self.request_id) | |||
logger.info("心跳线程停止完成!requestId:{}", self.request_id) |
@@ -1,43 +0,0 @@ | |||
# -*- coding: utf-8 -*- | |||
import sys | |||
sys.path.extend(['..', '../AIlib']) | |||
from segutils.segmodel import SegModel, get_largest_contours | |||
from utils.torch_utils import select_device | |||
from models.experimental import attempt_load | |||
from utilsK.queRiver import get_labelnames, get_label_arrays, post_process_ | |||
from AI import AI_process, AI_process_forest, get_postProcess_para | |||
class ModelConfig(): | |||
def __init__(self): | |||
postFile = '../AIlib/conf/para.json' | |||
self.conf_thres, self.iou_thres, self.classes, self.rainbows = get_postProcess_para(postFile) | |||
class SZModelConfig(ModelConfig): | |||
def __init__(self): | |||
super(SZModelConfig, self).__init__() | |||
labelnames = "../AIlib/weights/yolov5/class8/labelnames.json" ##对应类别表 | |||
self.names = get_labelnames(labelnames) | |||
self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=40, | |||
fontpath="../AIlib/conf/platech.ttf") | |||
class LCModelConfig(ModelConfig): | |||
def __init__(self): | |||
super(LCModelConfig, self).__init__() | |||
labelnames = "../AIlib/weights/forest/labelnames.json" | |||
self.names = get_labelnames(labelnames) | |||
self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=40, fontpath="../AIlib/conf/platech.ttf") | |||
class RFModelConfig(ModelConfig): | |||
def __init__(self): | |||
super(RFModelConfig, self).__init__() | |||
labelnames = "../AIlib/weights/road/labelnames.json" | |||
self.names = get_labelnames(labelnames) | |||
imageW = 1536 | |||
outfontsize=int(imageW/1920*40) | |||
self.label_arraylist = get_label_arrays(self.names, self.rainbows, outfontsize=outfontsize, fontpath="../AIlib/conf/platech.ttf") |
@@ -6,6 +6,8 @@ kafka: | |||
dsp-alg-offline-tasks-topic: dsp-alg-offline-tasks | |||
dsp-alg-image-tasks-topic: dsp-alg-image-tasks | |||
dsp-alg-results-topic: dsp-alg-task-results | |||
dsp-recording-task-topic: dsp-recording-task | |||
dsp-recording-result-topic: dsp-recording-result | |||
local: | |||
bootstrap_servers: ['192.168.10.11:9092'] | |||
# dsp-alg-online-tasks: | |||
@@ -96,9 +98,9 @@ gpu: | |||
# 获取可用gpu数量 | |||
limit: 10 | |||
# 最大负载 | |||
maxLoad: 0.8 | |||
maxLoad: 0.85 | |||
# 最大内存 | |||
maxMemory: 0.8 | |||
maxMemory: 0.85 | |||
includeNan: False | |||
excludeID: [] | |||
excludeUUID: [] | |||
@@ -117,21 +119,43 @@ aliyun: | |||
vod: | |||
host_address: https://vod.play.t-aaron.com/ | |||
ecsRegionId: "cn-shanghai" | |||
dev: | |||
CateId: 1000468341 | |||
test: | |||
CateId: 1000468338 | |||
prod: | |||
CateId: 1000468340 | |||
service: | |||
frame_step: 300 # 多少帧数步长之间获取一次分析图片 | |||
frame_score: 0.4 # 获取最低得分以上的图片 | |||
filter: | |||
# 识别相似度是否开启 | |||
picture_similarity: True | |||
# 相似度阀值 | |||
similarity: 0.65 | |||
frame_step: 160 | |||
timeout: 21600 # 一次识别任务超时时间,单位秒,默认6个小时 | |||
cv2_pull_stream_timeout: 3600 # 直播开始视频未推流超时时间 | |||
cv2_read_stream_timeout: 1800 # 直播读流中超时时间 | |||
baiduocr: | |||
APP_ID: 28173504 | |||
API_KEY: kqrFE7VuygIaFer7z6cRxzoi | |||
SECRET_KEY: yp7xBokyl4TItyGhay7skAN1cMwfvEXf | |||
cv2_pull_stream_timeout: 1000 # 直播开始视频未推流超时时间 | |||
cv2_read_stream_timeout: 1000 # 直播读流中超时时间 | |||
recording_pull_stream_timeout: 600 # 录屏拉流超时时间 | |||
baidu: | |||
orc: | |||
APP_ID: 28173504 | |||
API_KEY: kqrFE7VuygIaFer7z6cRxzoi | |||
SECRET_KEY: yp7xBokyl4TItyGhay7skAN1cMwfvEXf | |||
# 车辆检测 | |||
vehicle: | |||
APP_ID: 31096670 | |||
API_KEY: Dam3O4tgPRN3qh4OYE82dbg7 | |||
SECRET_KEY: 1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa | |||
# 人 | |||
person: | |||
APP_ID: 31096755 | |||
API_KEY: CiWrt4iyxOly36n3kR7utiAG | |||
SECRET_KEY: K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v | |||
# 模型相关配置 | |||
model: | |||
limit: 3 # 模型组合个数限制 | |||
# 日志设置 | |||
log: | |||
# 是否开启文件输出 True:开启 False:关闭 |
@@ -1,11 +1,16 @@ | |||
# -*- coding: utf-8 -*- | |||
import os | |||
import sys | |||
from service import Dispatcher | |||
# import torch | |||
import torch | |||
''' | |||
dsp主程序入口 | |||
''' | |||
if __name__ == '__main__': | |||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||
# torch.multiprocessing.set_start_method('spawn') | |||
Dispatcher.DispatcherService().start_service() | |||
# 获取主程序执行根路径 | |||
base_dir = os.path.dirname(os.path.realpath(sys.argv[0])) | |||
torch.multiprocessing.set_start_method('spawn') | |||
Dispatcher.DispatcherService(base_dir).start_service() |
@@ -1,6 +1,5 @@ | |||
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="", | |||
model_type_code="", model_detection_code='', analyse_time="", analyse_results=""): | |||
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="", | |||
modelCode="", detectTargetCode="", analyse_time="", analyse_results=""): | |||
taskfb = {} | |||
results = [] | |||
result_msg = {} | |||
@@ -13,9 +12,19 @@ def message_feedback(requestId, status, type, error_code="", error_msg="", prog | |||
result_msg["original_url"] = original_url | |||
result_msg["sign_url"] = sign_url | |||
result_msg["analyse_results"] = analyse_results | |||
result_msg["model_type_code"] = model_type_code | |||
result_msg["model_detection_code"] = model_detection_code | |||
result_msg["model_code"] = modelCode | |||
result_msg["detect_targets_code"] = detectTargetCode | |||
result_msg["analyse_time"] = analyse_time | |||
results.append(result_msg) | |||
taskfb["results"] = results | |||
return taskfb | |||
return taskfb | |||
def recording_feedback(requestId, status, error_code="", error_msg="", recording_video_url=""): | |||
rdfb = {} | |||
rdfb["request_id"] = requestId | |||
rdfb["status"] = status | |||
rdfb["error_code"] = error_code | |||
rdfb["error_msg"] = error_msg | |||
rdfb["recording_video_url"] = recording_video_url | |||
return rdfb |
@@ -13,6 +13,9 @@ class AnalysisType(Enum): | |||
# 图片 | |||
IMAGE = "3" | |||
# 录屏 | |||
RECORDING = "9999" | |||
@@ -0,0 +1,188 @@ | |||
from enum import Enum, unique | |||
''' | |||
ocr官方文档: https://ai.baidu.com/ai-doc/OCR/zkibizyhz | |||
官方文档: https://ai.baidu.com/ai-doc/VEHICLE/rk3inf9tj | |||
参数1: 异常编号 | |||
参数2: 异常英文描述 | |||
参数3: 异常中文描述 | |||
参数4: 0-异常信息统一输出为内部异常 | |||
1-异常信息可以输出 | |||
2-输出空的异常信息 | |||
参数5: 指定异常重试的次数 | |||
''' | |||
# 异常枚举 | |||
@unique | |||
class BaiduSdkErrorEnum(Enum): | |||
UNKNOWN_ERROR = (1, "Unknown error", "未知错误", 0, 0) | |||
SERVICE_TEMPORARILY_UNAVAILABLE = (2, "Service temporarily unavailable", "服务暂不可用,请再次请求", 0, 3) | |||
UNSUPPORTED_OPENAPI_METHOD = (3, "Unsupported openapi method", "调用的API不存在", 0, 0) | |||
API_REQUEST_LIMIT_REACHED = (4, "Open api request limit reached", "请求量限制, 请稍后再试!", 1, 5) | |||
NO_PERMISSION_TO_ACCESS_DATA = (6, "No permission to access data", "无权限访问该用户数据", 1, 0) | |||
GET_SERVICE_TOKEN_FAILED = (13, "Get service token failed", "获取token失败", 0, 2) | |||
IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 1) | |||
APP_NOT_EXSITS_OR_CREATE_FAILED = (15, "app not exsits or create failed", "应用不存在或者创建失败", 0, 0) | |||
API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 2) | |||
API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 10) | |||
API_TOTAL_REQUEST_LIMIT_REACHED = (19, "Open api total request limit reached", "请求总量超限额!", 1, 2) | |||
INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数,token拉取失败", 0, 1) | |||
ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效,token有效期为30天", 0, 1) | |||
ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期,token有效期为30天", 0, 1) | |||
INTERNAL_ERROR = (282000, "internal error", "服务器内部错误", 0, 1) | |||
INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 1) | |||
NOT_ENOUGH_PARAM = (216101, "not enough param", "缺少必须的参数!", 0, 0) | |||
SERVICE_NOT_SUPPORT = (216102, "service not support", "请求了不支持的服务,请检查调用的url", 0, 0) | |||
PARAM_TOO_LONG = (216103, "param too long", "请求中某些参数过长!", 1, 0) | |||
APPID_NOT_EXIST = (216110, "appid not exist", "appid不存在", 0, 0) | |||
EMPTY_IMAGE = (216200, "empty image", "图片为空!", 1, 0) | |||
IMAGE_FORMAT_ERROR = (216201, "image format error", "上传的图片格式错误,现阶段我们支持的图片格式为:PNG、JPG、JPEG、BMP", 1, 0) | |||
IMAGE_SIZE_ERROR = (216202, "image size error", "上传的图片大小错误,分辨率不高于4096*4096", 1, 0) | |||
IMAGE_SIZE_BASE_ERROR = (216203, "image size error", "上传的图片编码有误", 1, 0) | |||
RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 2) | |||
DETECT_ERROR = (216634, "detect error", "检测错误", 2, 2) | |||
MISSING_PARAMETERS = (282003, "missing parameters: {参数名}", "请求参数缺失", 0, 0) | |||
BATCH_ROCESSING_ERROR = (282005, "batch processing error", "处理批量任务时发生部分或全部错误", 0, 5) | |||
BATCH_TASK_LIMIT_REACHED = (282006, "batch task limit reached", "批量任务处理数量超出限制,请将任务数量减少到10或10以下", 1, 5) | |||
IMAGE_TRANSCODE_ERROR = (282100, "image transcode error", "图片压缩转码错误", 0, 1) | |||
IMAGE_SPLIT_LIMIT_REACHED = (282101, "image split limit reached", "长图片切分数量超限!", 1, 1) | |||
TARGET_DETECT_ERROR = (282102, "target detect error", "未检测到图片中识别目标!", 2, 1) # | |||
TARGET_RECOGNIZE_ERROR = (282103, "target recognize error", "图片目标识别错误!", 2, 1) | |||
URLS_NOT_EXIT = (282110, "urls not exit", "URL参数不存在,请核对URL后再次提交!", 1, 0) | |||
URL_FORMAT_ILLEGAL = (282111, "url format illegal", "URL格式非法!", 1, 0) | |||
URL_DOWNLOAD_TIMEOUT = (282112, "url download timeout", "URL格式非法!", 1, 0) | |||
URL_RESPONSE_INVALID = (282113, "url response invalid", "URL返回无效参数!", 1, 0) | |||
URL_SIZE_ERROR = (282114, "url size error", "URL长度超过1024字节或为0!", 1, 0) | |||
REQUEST_ID_NOT_EXIST = (282808, "request id: xxxxx not exist", "request id xxxxx 不存在", 0, 0) | |||
RESULT_TYPE_ERROR = (282809, "result type error", "返回结果请求错误(不属于excel或json)", 0, 0) | |||
IMAGE_RECOGNIZE_ERROR = (282810, "image recognize error", "图像识别错误", 2, 1) | |||
INVALID_ARGUMENT = (283300, "Invalid argument", "入参格式有误,可检查下图片编码、代码格式是否有误", 1, 0) | |||
INTERNAL_ERROR_2 = (336000, "Internal error", "服务器内部错误", 0, 0) | |||
INVALID_ARGUMENT_2 = (336001, "Invalid Argument", "入参格式有误,比如缺少必要参数、图片编码错误等等,可检查下图片编码、代码格式是否有误", 0, 0) | |||
SDK_IMAGE_SIZE_ERROR = ('SDK100', "image size error", "图片大小超限,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内,图片请求格式支持:PNG、JPG、BMP", 1, 0) | |||
SDK_IMAGE_LENGTH_ERROR = ('SDK101', "image length error", "图片边长不符合要求,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内", 1, 0) | |||
SDK_READ_IMAGE_FILE_ERROR = ('SDK102', "read image file error", "读取图片文件错误", 0, 1) | |||
SDK_CONNECTION_OR_READ_DATA_TIME_OUT = ('SDK108', "connection or read data time out", "连接超时或读取数据超时,请检查本地网络设置、文件读取设置", 0, 3) | |||
SDK_UNSUPPORTED_IMAGE_FORMAT = ('SDK109', "unsupported image format", "不支持的图片格式,当前支持以下几类图片:PNG、JPG、BMP", 1, 0) | |||
BAIDUERRORDATA = { | |||
BaiduSdkErrorEnum.UNKNOWN_ERROR.value[0]: BaiduSdkErrorEnum.UNKNOWN_ERROR, | |||
BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE.value[0]: BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE, | |||
BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD.value[0]: BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD, | |||
BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA.value[0]: BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA, | |||
BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED.value[0]: BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED, | |||
BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED.value[0]: BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED, | |||
BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED.value[0]: BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED, | |||
BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.INVALID_TOKEN.value[0]: BaiduSdkErrorEnum.INVALID_TOKEN, | |||
BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID, | |||
BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED, | |||
BaiduSdkErrorEnum.INTERNAL_ERROR.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR, | |||
BaiduSdkErrorEnum.INVALID_PARAM.value[0]: BaiduSdkErrorEnum.INVALID_PARAM, | |||
BaiduSdkErrorEnum.NOT_ENOUGH_PARAM.value[0]: BaiduSdkErrorEnum.NOT_ENOUGH_PARAM, | |||
BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT.value[0]: BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT, | |||
BaiduSdkErrorEnum.PARAM_TOO_LONG.value[0]: BaiduSdkErrorEnum.PARAM_TOO_LONG, | |||
BaiduSdkErrorEnum.APPID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.APPID_NOT_EXIST, | |||
BaiduSdkErrorEnum.EMPTY_IMAGE.value[0]: BaiduSdkErrorEnum.EMPTY_IMAGE, | |||
BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR, | |||
BaiduSdkErrorEnum.IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_ERROR, | |||
BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR, | |||
BaiduSdkErrorEnum.RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.RECOGNIZE_ERROR, | |||
BaiduSdkErrorEnum.DETECT_ERROR.value[0]: BaiduSdkErrorEnum.DETECT_ERROR, | |||
BaiduSdkErrorEnum.MISSING_PARAMETERS.value[0]: BaiduSdkErrorEnum.MISSING_PARAMETERS, | |||
BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR.value[0]: BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR, | |||
BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR, | |||
BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED, | |||
BaiduSdkErrorEnum.TARGET_DETECT_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_DETECT_ERROR, | |||
BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR, | |||
BaiduSdkErrorEnum.URL_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.URL_SIZE_ERROR, | |||
BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST, | |||
BaiduSdkErrorEnum.RESULT_TYPE_ERROR.value[0]: BaiduSdkErrorEnum.RESULT_TYPE_ERROR, | |||
BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR, | |||
BaiduSdkErrorEnum.INVALID_ARGUMENT.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT, | |||
BaiduSdkErrorEnum.INTERNAL_ERROR_2.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR_2, | |||
BaiduSdkErrorEnum.INVALID_ARGUMENT_2.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT_2, | |||
BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR, | |||
BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR, | |||
BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR, | |||
BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT.value[0]: BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT, | |||
BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT.value[0]: BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT, | |||
BaiduSdkErrorEnum.URLS_NOT_EXIT.value[0]: BaiduSdkErrorEnum.URLS_NOT_EXIT, | |||
BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL.value[0]: BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL, | |||
BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT.value[0]: BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT, | |||
BaiduSdkErrorEnum.URL_RESPONSE_INVALID.value[0]: BaiduSdkErrorEnum.URL_RESPONSE_INVALID | |||
} | |||
@unique | |||
class VehicleEnum(Enum): | |||
CAR = ("car", "小汽车", 0) | |||
TRICYCLE = ("tricycle", "三轮车", 1) | |||
MOTORBIKE = ("motorbike", "摩托车", 2) | |||
CARPLATE = ("carplate", "车牌", 3) | |||
TRUCK = ("truck", "卡车", 4) | |||
BUS = ("bus", "巴士", 5) | |||
VehicleEnumVALUE={ | |||
VehicleEnum.CAR.value[0]: VehicleEnum.CAR, | |||
VehicleEnum.TRICYCLE.value[0]: VehicleEnum.TRICYCLE, | |||
VehicleEnum.MOTORBIKE.value[0]: VehicleEnum.MOTORBIKE, | |||
VehicleEnum.CARPLATE.value[0]: VehicleEnum.CARPLATE, | |||
VehicleEnum.TRUCK.value[0]: VehicleEnum.TRUCK, | |||
VehicleEnum.BUS.value[0]: VehicleEnum.BUS | |||
} |
@@ -5,62 +5,64 @@ from enum import Enum, unique | |||
@unique | |||
class ExceptionType(Enum): | |||
VIDEO_UPDATE_EXCEPTION = ("SP000", "Video upload exception!") | |||
OR_VIDEO_ADDRESS_EXCEPTION = ("SP000", "未拉取到视频流, 请检查拉流地址是否有视频流!") | |||
OR_VIDEO_ADDRESS_EXCEPTION = ("SP001", "Original Video Address Error!") | |||
ANALYSE_TIMEOUT_EXCEPTION = ("SP001", "AI分析超时!") | |||
ANALYSE_TIMEOUT_EXCEPTION = ("SP002", "Analysis Timeout Exception!") | |||
PULLSTREAM_TIMEOUT_EXCEPTION = ("SP002", "原视频拉流超时!") | |||
PULLSTREAM_TIMEOUT_EXCEPTION = ("SP003", "Pull Stream Timeout Exception!") | |||
READSTREAM_TIMEOUT_EXCEPTION = ("SP003", "原视频读取视频流超时!") | |||
READSTREAM_TIMEOUT_EXCEPTION = ("SP004", "READ Stream Timeout Exception!") | |||
GET_VIDEO_URL_EXCEPTION = ("SP004", "获取视频播放地址失败!") | |||
GET_VIDEO_URL_EXCEPTION = ("SP005", "Get Video Url Exception!") | |||
GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP005", "获取原视频播放地址超时!") | |||
GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP006", "Get Video Url Timeout Exception!") | |||
PULL_STREAM_URL_EXCEPTION = ("SP006", "拉流地址不能为空!") | |||
PULL_STREAM_URL_EXCEPTION = ("SP007", "Pull Stream Address Is Empty!") | |||
PUSH_STREAM_URL_EXCEPTION = ("SP007", "推流地址不能为空!") | |||
PULL_PIPELINE_INIT_EXCEPTION = ("SP008", "Pull Stream PIPELINE Exception!") | |||
PUSH_STREAM_TIME_EXCEPTION = ("SP008", "推流时间或原视频时间太短, 未生成分析结果, 建议延长推流时间或原视频时间!") | |||
PUSH_STREAM_URL_EXCEPTION = ("SP009", "Push Stream Address Is Empty!") | |||
AI_MODEL_MATCH_EXCEPTION = ("SP009", "未匹配到对应的AI模型!") | |||
CV2_IS_NULL_EXCEPTION = ("SP010", "The CV2 Is Empty!") | |||
ILLEGAL_PARAMETER_FORMAT = ("SP010", "非法参数格式!") | |||
OR_WRITE_OBJECT_EXCEPTION = ("SP011", "The Original Video Writing Object Is Empty!") | |||
PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道可能被占用, 请稍后再试!") | |||
AI_WRITE_OBJECT_EXCEPTION = ("SP012", "The Ai Video Writing Object Is Empty!") | |||
VIDEO_RESOLUTION_EXCEPTION = ("SP012", "不支持该分辨率类型的视频,请切换分辨率再试!") | |||
VIDEO_ADDRESS_EXCEPTION = ("SP013", "The Video Address Does Not Exist!") | |||
READ_IAMGE_URL_EXCEPTION = ("SP013", "未能解析图片地址!") | |||
AI_VIDEO_ADDRESS_EXCEPTION = ("SP014", "AI Video Address Error!") | |||
DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED = ("SP014", "不支持该类型的检测目标!") | |||
OFFLINE_RETRY_TIMEOUT_EXCEPTION = ("SP015", "Offline Retry Timeout Exception!") | |||
WRITE_STREAM_EXCEPTION = ("SP015", "写流异常!") | |||
AI_MODEL_CONFIG_EXCEPTION = ("SP016", "AI Model Config Exception!") | |||
OR_VIDEO_DO_NOT_EXEIST_EXCEPTION = ("SP016", "原视频不存在!") | |||
AI_MODEL_MATCH_EXCEPTION = ("SP017", "The AI Model Is Not Matched!") | |||
MODEL_LOADING_EXCEPTION = ("SP017", "模型加载异常!") | |||
VIDEO_MERGE_EXCEPTION = ("SP018", "The Video Merge Exception!") | |||
MODEL_ANALYSE_EXCEPTION = ("SP018", "算法模型分析异常!") | |||
VIDEO_ANALYSIS_EXCEPTION = ("SP019", "Video Analysis Failed!") | |||
AI_MODEL_CONFIG_EXCEPTION = ("SP019", "模型配置不能为空!") | |||
PUSH_STREAM_URL_E_EXCEPTION = ("SP020", "Push Stream URL Exception!") | |||
AI_MODEL_GET_CONFIG_EXCEPTION = ("SP020", "获取模型配置异常, 请检查模型配置是否正确!") | |||
VIDEO_CONFIG_EXCEPTION = ("SP021", "Video Config Exception!") | |||
MODEL_GROUP_LIMIT_EXCEPTION = ("SP021", "模型组合个数超过限制!") | |||
ILLEGAL_PARAMETER_FORMAT = ("SP022", "Illegal Parameter Format!") | |||
MODEL_NOT_SUPPORT_VIDEO_EXCEPTION = ("SP022", "%s不支持视频识别!") | |||
REQUEST_TYPE_NOT_MATCHED = ("SP023", "Request Type Not Matched!") | |||
MODEL_NOT_SUPPORT_IMAGE_EXCEPTION = ("SP023", "%s不支持图片识别!") | |||
MODEL_ANALYSIS_EXCEPTION = ("SP024", "Model Analysis Exception!") | |||
THE_DETECTION_TARGET_CANNOT_BE_EMPTY = ("SP024", "检测目标不能为空!") | |||
PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP025", "推流通道被占用, 请稍后再试!") | |||
URL_ADDRESS_ACCESS_FAILED = ("SP025", "URL地址访问失败, 请检测URL地址是否正确!") | |||
WRITE_STREAM_EXCEPTION = ("SP026", "视频写流异常, 请联系工程师定位处理!") | |||
UNIVERSAL_TEXT_RECOGNITION_FAILED = ("SP026", "识别失败!") | |||
VIDEO_RESOLUTION_EXCEPTION = ("SP027", "不支持该分辨率类型的视频,请切换分辨率再试!") | |||
COORDINATE_ACQUISITION_FAILED = ("SP027", "飞行坐标识别异常!") | |||
SERVICE_COMMON_EXCEPTION = ("SP997", "公共服务异常!") | |||
NO_GPU_RESOURCES = ("SP998", "暂无GPU资源可以使用,请稍后再试!") | |||
SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常, 请联系工程师定位处理!") | |||
SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常!") |
@@ -1,34 +1,73 @@ | |||
from enum import Enum, unique | |||
''' | |||
参数说明 | |||
1. 编号 | |||
2. 模型编号 | |||
3. 模型名称 | |||
4. 选用的模型名称 | |||
5. 是否可以参与多个模型组合调用 | |||
0: 视频、图片模型组合都支持 | |||
1: 只支持视频模型之间的组合 | |||
2: 只支持图片模型之间的组合 | |||
''' | |||
# 异常枚举 | |||
@unique | |||
class ModelType(Enum): | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', 0) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', 0) | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river') | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', 0) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2') | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, 2) | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'road') | |||
PLATE_MODEL = ("5", "005", "车牌模型", None, 2) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', 0) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None) | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', 0) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle') | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', 0) | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian') | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', 0) | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire') | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', 0) | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer') | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', 0) | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad') | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, 2) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship') | |||
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', 0) | |||
def checkCode(code): | |||
for model in ModelType: | |||
if model.value[1] == code: | |||
return True | |||
return False | |||
return False | |||
''' | |||
参数1: 检测目标名称 | |||
参数2: 检测目标 | |||
参数3: 初始化百度检测客户端 | |||
''' | |||
@unique | |||
class BaiduModelTarget(Enum): | |||
VEHICLE_DETECTION = ( | |||
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id)) | |||
HUMAN_DETECTION = ( | |||
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id)) | |||
PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id)) | |||
BAIDU_MODEL_TARGET_CONFIG = { | |||
BaiduModelTarget.VEHICLE_DETECTION.value[1]: BaiduModelTarget.VEHICLE_DETECTION, | |||
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION, | |||
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING | |||
} |
@@ -0,0 +1,16 @@ | |||
from enum import Enum, unique | |||
# 录屏状态枚举 | |||
@unique | |||
class RecordingStatus(Enum): | |||
RECORDING_WAITING = ("5", "待录制") | |||
RECORDING_RUNNING = ("10", "录制中") | |||
RECORDING_SUCCESS = ("15", "录制完成") | |||
RECORDING_TIMEOUT = ("20", "录制超时") | |||
RECORDING_FAILED = ("25", "录制失败") |
@@ -8,9 +8,12 @@ from loguru import logger | |||
class ServiceException(Exception): # 继承异常类 | |||
def __init__(self, code, msg): | |||
def __init__(self, code, msg, desc=None): | |||
self.code = code | |||
self.msg = msg | |||
if desc is None: | |||
self.msg = msg | |||
else: | |||
self.msg = msg % desc | |||
def __str__(self): | |||
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg) |
@@ -1,19 +1,24 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import GPUtil | |||
import torch | |||
from cerberus import Validator | |||
from common import YmlConstant | |||
from concurrency.FeedbackThread import FeedbackThread | |||
from entity.FeedBack import message_feedback | |||
from entity.FeedBack import message_feedback, recording_feedback | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from enums.AnalysisTypeEnum import AnalysisType | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
from exception.CustomerException import ServiceException | |||
from util import YmlUtils, FileUtils, LogUtils, KafkaUtils, TimeUtils | |||
from loguru import logger | |||
from multiprocessing import Queue | |||
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \ | |||
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess | |||
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess, ScreenRecordingProcess | |||
from util import GPUtils | |||
from util.GPUtils import get_first_gpu_name | |||
''' | |||
分发服务 | |||
@@ -21,318 +26,317 @@ from util import GPUtils | |||
class DispatcherService: | |||
""" | |||
初始化 | |||
""" | |||
def __init__(self, base_dir): | |||
if not torch.cuda.is_available(): | |||
raise Exception("cuda不在活动状态, 请检测显卡驱动是否正常!!!!") | |||
# 初始化alg相关配置 ###################################################################### | |||
self.base_dir = base_dir # 根路径 | |||
self.context = YmlUtils.getConfigs(base_dir) # 获取alg需要使用的配置 | |||
self.context[YmlConstant.BASE_DIR] = base_dir # 将根路径设置到上下文中 | |||
self.feedbackThread = None # 初始化反馈线程对象 | |||
# 初始化日志框架 ######################################################################### | |||
LogUtils.init_log(self.context) # 初始化日志框架 | |||
# 初始化 | |||
def __init__(self): | |||
# 获取DSP环境所需要的配置 | |||
self.content = YmlUtils.getConfigs() | |||
# 初始化日志 | |||
LogUtils.init_log(self.content) | |||
# 检查视频保存地址,不存在创建文件夹,迁移初始化 | |||
FileUtils.create_dir_not_exist(self.content["video"]["file_path"]) | |||
# 记录当前正在执行的实时流分析任务 | |||
self.onlineProcesses = {} | |||
# 记录当前正在执行的离线视频分析任务 | |||
self.offlineProcesses = {} | |||
# 记录当前正在执行的图片分析任务 | |||
self.photoProcesses = {} | |||
# 初始化视频保存文件夹 ##################################################################### | |||
FileUtils.create_dir_not_exist(YmlConstant.get_file_path(self.context)) # 创建文件夹 | |||
# 创建任务记录字典 ######################################################################## | |||
self.onlineProcesses = {} # 记录当前正在执行的实时流分析任务 | |||
self.offlineProcesses = {} # 记录当前正在执行的离线视频分析任务 | |||
self.photoProcesses = {} # 记录当前正在执行的图片分析任务 | |||
self.recordingProcesses = {} # 记录当前录屏任务 | |||
self.listeningProcesses = [self.onlineProcesses, self.offlineProcesses, | |||
self.photoProcesses, self.recordingProcesses] | |||
# 反馈队列 ############################################################################### | |||
self.fbQueue = Queue() | |||
self.online_topic = self.content["kafka"]["topic"]["dsp-alg-online-tasks-topic"] | |||
self.offline_topic = self.content["kafka"]["topic"]["dsp-alg-offline-tasks-topic"] | |||
self.image_topic = self.content["kafka"]["topic"]["dsp-alg-image-tasks-topic"] | |||
self.topics = [self.online_topic, self.offline_topic, self.image_topic] | |||
# 监听topic信息 ########################################################################## | |||
self.online_topic = YmlConstant.get_online_tasks_topic(self.context) | |||
self.offline_topic = YmlConstant.get_offline_tasks_topic(self.context) | |||
self.image_topic = YmlConstant.get_image_tasks_topic(self.context) | |||
self.recording_task_topic = YmlConstant.get_recording_tasks_topic(self.context) | |||
self.topics = [self.online_topic, self.offline_topic, self.image_topic, self.recording_task_topic] | |||
self.analysisType = { | |||
self.online_topic: (AnalysisType.ONLINE.value, lambda x: self.online(x)), | |||
self.offline_topic: (AnalysisType.OFFLINE.value, lambda x: self.offline(x)), | |||
self.image_topic: (AnalysisType.IMAGE.value, lambda x: self.image(x)) | |||
self.online_topic: (AnalysisType.ONLINE.value, lambda x, y: self.online(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.offline_topic: (AnalysisType.OFFLINE.value, lambda x, y: self.offline(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.image_topic: (AnalysisType.IMAGE.value, lambda x, y: self.image(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.recording_task_topic: (AnalysisType.RECORDING.value, lambda x, y: self.recording(x, y), | |||
lambda x, y, z: self.recording_method(x, y, z)) | |||
} | |||
# 获取当前显卡设备名称 ##################################################################### | |||
gpu_name = get_first_gpu_name() | |||
gpu_codes = YmlConstant.GPU_CODES | |||
gpu_array = [g for g in gpu_codes if g in gpu_name] | |||
if len(gpu_array) > 0: | |||
self.context[YmlConstant.GPU_NAME] = gpu_array[0] | |||
if gpu_array[0] == YmlConstant.GPU_2080: | |||
self.context[YmlConstant.GPU_NAME] = YmlConstant.GPU_2080_Ti | |||
else: | |||
raise Exception("GPU资源不在提供的模型所支持的范围内!请先提供对应的GPU模型!") | |||
# 服务调用启动方法 | |||
def start_service(self): | |||
# 启动问题反馈线程 | |||
feedbackThread = self.start_feedback_thread() | |||
# 初始化kafka监听者 | |||
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.content, topics=self.topics) | |||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.context, topics=self.topics) | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
# 循环消息处理 | |||
while True: | |||
# 检查任务进程运行情况,去除活动的任务 | |||
self.check_process_task() | |||
# 校验问题反馈线程是否正常 | |||
if not feedbackThread.is_alive(): | |||
logger.error("======================问题反馈线程异常停止======================") | |||
break | |||
# 获取当前可用gpu使用数量 | |||
msg = customerKafkaConsumer.poll() | |||
if msg is not None and len(msg) > 0: | |||
for k, v in msg.items(): | |||
for m in v: | |||
message = m.value | |||
analysisType = self.analysisType.get(m.topic)[0] | |||
try: | |||
try: | |||
# 检查任务进程运行情况,去除活动的任务 | |||
self.check_process_task() | |||
self.start_feedback_thread() | |||
msg = customerKafkaConsumer.poll() | |||
if msg is not None and len(msg) > 0: | |||
for k, v in msg.items(): | |||
for m in v: | |||
message = m.value | |||
# 提交offset | |||
customerKafkaConsumer.commit_offset(m) | |||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, message.get("request_id")) | |||
self.analysisType.get(m.topic)[1](message) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id")) | |||
if analysisType is not None: | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||
if analysisType is not None: | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
else: | |||
time.sleep(1) | |||
def checkGPU(self, msgId): | |||
gpu_ids = None | |||
while True: | |||
GPUtil.showUtilization() | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0: | |||
logger.warning("暂无可用GPU资源,5秒后重试, 可用gpu数: {}, msgId: {}", len(gpu_ids), msgId) | |||
time.sleep(5) | |||
continue | |||
else: | |||
break | |||
return gpu_ids | |||
m.topic, m.offset, m.partition, message, | |||
self.getRequestId(message.get(YmlConstant.REQUEST_ID))) | |||
topic_method = self.analysisType.get(m.topic) | |||
topic_method[2](m, message, topic_method[0]) | |||
else: | |||
time.sleep(1) | |||
except Exception as e: | |||
logger.exception("主线程异常:", e) | |||
''' | |||
考虑到requestId为空的场景 | |||
''' | |||
@staticmethod | |||
def getRequestId(request_id): | |||
if request_id is None or len(request_id) == 0: | |||
return '' | |||
return request_id | |||
''' | |||
实时、离线、图片识别逻辑 | |||
1. m kafka消息体 | |||
2. 请求消息体 | |||
3. 分析类型:实时、离线、图片 | |||
''' | |||
def identify_method(self, m, message, analysisType): | |||
try: | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
self.analysisType.get(m.topic)[1](message, analysisType) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, | |||
self.getRequestId(message.get(YmlConstant.REQUEST_ID))) | |||
if message.get(YmlConstant.REQUEST_ID): | |||
self.fbQueue.put({ | |||
YmlConstant.FEEDBACK: message_feedback(message.get(YmlConstant.REQUEST_ID), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, | |||
self.getRequestId(message.get(YmlConstant.REQUEST_ID))) | |||
if message.get(YmlConstant.REQUEST_ID): | |||
self.fbQueue.put({ | |||
YmlConstant.FEEDBACK: message_feedback(message.get(YmlConstant.REQUEST_ID), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
def recording_method(self, m, message, analysisType): | |||
try: | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
self.analysisType.get(m.topic)[1](message, analysisType) | |||
except ServiceException as s: | |||
logger.exception("消息监听异常:{}, requestId: {}", s.msg, | |||
self.getRequestId(message.get(YmlConstant.REQUEST_ID))) | |||
if message.get(YmlConstant.REQUEST_ID): | |||
self.fbQueue.put({ | |||
YmlConstant.RECORDING: recording_feedback(message.get(YmlConstant.REQUEST_ID), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)}) | |||
except Exception as e: | |||
logger.exception("消息监听异常:{}, requestId: {}", e, | |||
self.getRequestId(message.get(YmlConstant.REQUEST_ID))) | |||
if message.get(YmlConstant.REQUEST_ID): | |||
self.fbQueue.put({ | |||
YmlConstant.RECORDING: recording_feedback(message.get(YmlConstant.REQUEST_ID), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}) | |||
# 开启实时进程 | |||
def startOnlineProcess(self, msg, gpu_ids): | |||
# 相同的requestId不在执行 | |||
if self.onlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
def startOnlineProcess(self, msg, gpu_ids, analysisType): | |||
if self.onlineProcesses.get(msg.get(YmlConstant.REQUEST_ID)): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {YmlConstant.FBQUEUE: self.fbQueue, YmlConstant.CONTEXT: self.context, YmlConstant.MSG: msg, | |||
YmlConstant.GPU_IDS: gpu_ids, YmlConstant.ANALYSE_TYPE: analysisType} | |||
# 创建在线识别进程并启动 | |||
oirp = OnlineIntelligentRecognitionProcess(cfg) | |||
oirp.start() | |||
# 记录请求与进程映射 | |||
self.onlineProcesses[msg.get("request_id")] = oirp | |||
self.onlineProcesses[msg.get(YmlConstant.REQUEST_ID)] = oirp | |||
# 结束实时进程 | |||
def stopOnlineProcess(self, msg): | |||
ps = self.onlineProcesses.get(msg.get("request_id")) | |||
ps = self.onlineProcesses.get(msg.get(YmlConstant.REQUEST_ID)) | |||
if ps is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
ps.sendEvent({'command': 'stop'}) | |||
# 检查实时、离线进程任务运行情况,去除不活动的任务 | |||
''' | |||
检查实时、离线进程任务运行情况,去除不活动的任务 | |||
''' | |||
def check_process_task(self): | |||
for requestId in list(self.onlineProcesses.keys()): | |||
if not self.onlineProcesses[requestId].is_alive(): | |||
del self.onlineProcesses[requestId] | |||
for requestId in list(self.offlineProcesses.keys()): | |||
if not self.offlineProcesses[requestId].is_alive(): | |||
del self.offlineProcesses[requestId] | |||
for requestId in list(self.photoProcesses.keys()): | |||
if not self.photoProcesses[requestId].is_alive(): | |||
del self.photoProcesses[requestId] | |||
for process in self.listeningProcesses: | |||
for requestId in list(process.keys()): | |||
if not process[requestId].is_alive(): | |||
del process[requestId] | |||
# 开启离线进程 | |||
def startOfflineProcess(self, msg, gpu_ids): | |||
# 相同的requestId不在执行 | |||
if self.offlineProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
def startOfflineProcess(self, msg, gpu_ids, analysisType): | |||
if self.offlineProcesses.get(msg.get(YmlConstant.REQUEST_ID)): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
# 创建在线识别进程并启动 | |||
cfg = {YmlConstant.FBQUEUE: self.fbQueue, YmlConstant.CONTEXT: self.context, YmlConstant.MSG: msg, | |||
YmlConstant.GPU_IDS: gpu_ids, YmlConstant.ANALYSE_TYPE: analysisType} | |||
ofirp = OfflineIntelligentRecognitionProcess(cfg) | |||
ofirp.start() | |||
self.offlineProcesses[msg.get("request_id")] = ofirp | |||
self.offlineProcesses[msg.get(YmlConstant.REQUEST_ID)] = ofirp | |||
# 结束离线进程 | |||
def stopOfflineProcess(self, msg): | |||
ps = self.offlineProcesses.get(msg.get("request_id")) | |||
ps = self.offlineProcesses.get(msg.get(YmlConstant.REQUEST_ID)) | |||
if ps is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
ps.sendEvent({'command': 'stop'}) | |||
# 开启图片分析进程 | |||
def startImageProcess(self, msg, gpu_ids): | |||
# 相同的requestId不在执行 | |||
if self.photoProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
def startImageProcess(self, msg, gpu_ids, analysisType): | |||
pp = self.photoProcesses.get(msg.get(YmlConstant.REQUEST_ID)) | |||
if pp is not None: | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids} | |||
cfg = {YmlConstant.FBQUEUE: self.fbQueue, YmlConstant.CONTEXT: self.context, YmlConstant.MSG: msg, | |||
YmlConstant.GPU_IDS: gpu_ids, YmlConstant.ANALYSE_TYPE: analysisType} | |||
# 创建在线识别进程并启动 | |||
imagep = PhotosIntelligentRecognitionProcess(cfg) | |||
imagep.start() | |||
self.photoProcesses[msg.get("request_id")] = imagep | |||
# 校验实时kafka消息 | |||
def check_online_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
command = msg.get("command") | |||
models = msg.get("models") | |||
pull_url = msg.get("pull_url") | |||
push_url = msg.get("push_url") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == "start" and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == "start" and pull_url is None: | |||
return False | |||
if command == "start" and push_url is None: | |||
return False | |||
if command == "start" and results_base_dir is None: | |||
return False | |||
return True | |||
# 校验实时kafka消息 | |||
def check_offline_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
models = msg.get("models") | |||
command = msg.get("command") | |||
original_url = msg.get("original_url") | |||
original_type = msg.get("original_type") | |||
push_url = msg.get("push_url") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == 'start' and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == 'start' and original_url is None: | |||
return False | |||
if command == 'start' and push_url is None: | |||
return False | |||
if command == 'start' and original_type is None: | |||
return False | |||
if command == 'start' and results_base_dir is None: | |||
return False | |||
return True | |||
# 校验图片kafka消息 | |||
def check_image_msg(self, msg): | |||
requestId = msg.get("request_id") | |||
models = msg.get("models") | |||
command = msg.get("command") | |||
image_urls = msg.get("image_urls") | |||
results_base_dir = msg.get("results_base_dir") | |||
if command is None: | |||
return False | |||
if requestId is None: | |||
return False | |||
if command == 'start' and models is None: | |||
return False | |||
if models is not None: | |||
for model in models: | |||
if model.get("code") is None: | |||
return False | |||
if model.get("categories") is None: | |||
return False | |||
if command == 'start' and image_urls is None: | |||
return False | |||
if command == 'start' and results_base_dir is None: | |||
return False | |||
return True | |||
self.photoProcesses[msg.get(YmlConstant.REQUEST_ID)] = imagep | |||
''' | |||
开启问题反馈线程 | |||
校验kafka消息 | |||
''' | |||
def start_feedback_thread(self): | |||
feedbackThread = FeedbackThread(self.fbQueue, self.content) | |||
feedbackThread.setDaemon(True) | |||
feedbackThread.start() | |||
return feedbackThread | |||
def online(self, message): | |||
check_result = self.check_online_msg(message) | |||
if not check_result: | |||
@staticmethod | |||
def check_msg(msg): | |||
try: | |||
v = Validator(YmlConstant.SCHEMA, allow_unknown=True) | |||
result = v.validate(msg) | |||
if not result: | |||
logger.error("参数校验异常: {}", v.errors) | |||
if msg.get(YmlConstant.REQUEST_ID) is not None and len(msg.get(YmlConstant.REQUEST_ID)) > 0: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], v.errors) | |||
return result | |||
except ServiceException as s: | |||
raise s | |||
except Exception as e: | |||
logger.exception("参数校验异常: {}", e) | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
if 'start' == message.get("command"): | |||
logger.info("开始实时分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
AnalysisType.ONLINE.value, | |||
ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
return | |||
self.startOnlineProcess(message, gpu_ids) | |||
elif 'stop' == message.get("command"): | |||
''' | |||
开启反馈线程,用于发送消息 | |||
''' | |||
def start_feedback_thread(self): | |||
# 如果反馈线程为空,启动反馈线程,如果反馈线程意外停止,再次启动反馈线程 | |||
if self.feedbackThread is None or not self.feedbackThread.is_alive(): | |||
self.feedbackThread = FeedbackThread(self.fbQueue, self.context) | |||
self.feedbackThread.setDaemon(True) | |||
self.feedbackThread.start() | |||
''' | |||
在线分析逻辑 | |||
''' | |||
def online(self, message, analysisType): | |||
if YmlConstant.START == message.get(YmlConstant.COMMAND): | |||
gpu_ids = GPUtils.check_gpu_resource(self.context) | |||
self.startOnlineProcess(message, gpu_ids, analysisType) | |||
elif YmlConstant.STOP == message.get(YmlConstant.COMMAND): | |||
self.stopOnlineProcess(message) | |||
else: | |||
pass | |||
def offline(self, message): | |||
check_result = self.check_offline_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
if 'start' == message.get("command"): | |||
logger.info("开始离线分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
feedback = { | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
AnalysisType.OFFLINE.value, | |||
ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())} | |||
self.fbQueue.put(feedback) | |||
return | |||
self.startOfflineProcess(message, gpu_ids) | |||
time.sleep(3) | |||
elif 'stop' == message.get("command"): | |||
def offline(self, message, analysisType): | |||
if YmlConstant.START == message.get(YmlConstant.COMMAND): | |||
gpu_ids = GPUtils.check_gpu_resource(self.context) | |||
self.startOfflineProcess(message, gpu_ids, analysisType) | |||
elif YmlConstant.STOP == message.get(YmlConstant.COMMAND): | |||
self.stopOfflineProcess(message) | |||
else: | |||
pass | |||
def image(self, message): | |||
check_result = self.check_image_msg(message) | |||
if not check_result: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
if 'start' == message.get("command"): | |||
logger.info("开始图片分析") | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
if gpu_ids is None or len(gpu_ids) == 0: | |||
raise ServiceException(ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1]) | |||
self.startImageProcess(message, gpu_ids) | |||
# elif 'stop' == message.get("command"): | |||
# self.stopImageProcess(message) | |||
def image(self, message, analysisType): | |||
if YmlConstant.START == message.get(YmlConstant.COMMAND): | |||
gpu_ids = GPUtils.check_gpu_resource(self.context) | |||
self.startImageProcess(message, gpu_ids, analysisType) | |||
else: | |||
pass | |||
def recording(self, message, analysisType): | |||
if YmlConstant.START == message.get(YmlConstant.COMMAND): | |||
logger.info("开始录屏") | |||
self.startRecordingProcess(message, analysisType) | |||
elif YmlConstant.STOP == message.get(YmlConstant.COMMAND): | |||
self.stopRecordingProcess(message) | |||
else: | |||
pass | |||
# 开启录屏进程 | |||
def startRecordingProcess(self, msg, analysisType): | |||
if self.recordingProcesses.get(msg.get(YmlConstant.REQUEST_ID)): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
cfg = {YmlConstant.FBQUEUE: self.fbQueue, YmlConstant.CONTEXT: self.context, YmlConstant.MSG: msg, | |||
YmlConstant.ANALYSE_TYPE: analysisType} | |||
srp = ScreenRecordingProcess(cfg) | |||
srp.start() | |||
self.recordingProcesses[msg.get(YmlConstant.REQUEST_ID)] = srp | |||
# 结束录屏进程 | |||
def stopRecordingProcess(self, msg): | |||
rdp = self.recordingProcesses.get(msg.get(YmlConstant.REQUEST_ID)) | |||
if rdp is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get(YmlConstant.REQUEST_ID)) | |||
return | |||
rdp.sendEvent({'command': 'stop'}) | |||
@@ -0,0 +1,119 @@ | |||
# -*- coding: utf-8 -*- | |||
import datetime | |||
import cv2 | |||
import oss2 | |||
import time | |||
from loguru import logger | |||
''' | |||
图片上传使用OSS | |||
1. 阿里云对象存储OSS官网地址:https://help.aliyun.com/product/31815.html?spm=a2c4g.32006.0.0.8c546cf0BpkAQ2 | |||
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/32006.html?spm=a2c4g.32006.0.0.66874b78q1pwLa | |||
3. python安装SDK地址: https://help.aliyun.com/document_detail/85288.html?spm=a2c4g.32026.0.0.3f24417coCphWj | |||
4. 安装SDK: pip install oss2 | |||
5. 安装python-devel | |||
安装python-devel | |||
由于SDK需要crcmod库计算CRC校验码,而crcmod依赖Python.h文件,如果系统缺少这个头文件,安装SDK不会失败,但crcmod的C扩展模式安装会失败,因此导致上传、下载等操作效率非常低下。 | |||
如果python-devel包不存在,则首先要安装这个包。 | |||
对于Windows系统和Mac OS X系统,由于安装Python的时候会将Python依赖的头文件一并安装,因此您无需安装python-devel。 | |||
对于CentOS、RHEL、Fedora系统,请执行以下命令安装python-devel。 | |||
sudo yum install python-devel | |||
对于Debian,Ubuntu系统,请执行以下命令安装python-devel。 | |||
sudo apt-get install python-dev | |||
6、图片域名地址:https://image.t-aaron.com/ | |||
''' | |||
class AliyunOssSdk: | |||
def __init__(self): | |||
self.__client = None | |||
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9' | |||
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk' | |||
self.__endpoint = 'http://oss-cn-shanghai.aliyuncs.com' | |||
self.__bucket = 'ta-tech-image' | |||
def get_oss_bucket(self): | |||
if not self.__client: | |||
auth = oss2.Auth(self.__access_key, self.__access_secret) | |||
self.__client = oss2.Bucket(auth, self.__endpoint, self.__bucket, connect_timeout=30) | |||
def upload_file(self, updatePath, fileByte): | |||
logger.info("开始上传文件到oss!") | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
self.get_oss_bucket() | |||
result = self.__client.put_object(updatePath, fileByte) | |||
return result | |||
logger.info("上传文件到oss成功!") | |||
break | |||
except Exception as e: | |||
self.__client = None | |||
retry_count += 1 | |||
time.sleep(1) | |||
logger.info("上传文件到oss失败, 重试次数:{}", retry_count) | |||
if retry_count > MAX_RETRIES: | |||
logger.exception("上传文件到oss重试失败:{}", e) | |||
raise e | |||
YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S" | |||
YMDHMSF = "%Y%m%d%H%M%S%f" | |||
def generate_timestamp(): | |||
"""根据当前时间获取时间戳,返回整数""" | |||
return int(time.time()) | |||
def now_date_to_str(fmt=None): | |||
if fmt is None: | |||
fmt = YY_MM_DD_HH_MM_SS | |||
return datetime.datetime.now().strftime(fmt) | |||
if __name__ == "__main__": | |||
# 初始化oss对象 | |||
ossClient = AliyunOssSdk() | |||
# 读取本地图片 | |||
image_frame = cv2.imread('aaa.jpeg') | |||
or_result, or_image = cv2.imencode(".jpg", image_frame) | |||
# 图片名称命名规则 | |||
# 1、base_dir 基本文件夹名称,由拓恒公司传参 | |||
# 2、time_now 现在的时间 | |||
# 3、current_frame 当前视频的帧数 | |||
# 4、last_frame 如果有跳帧操作, 填写跳帧的步长,如果没有,和current_frame参数保持一致 | |||
# 5、random_num 随机时间字符串 | |||
# 6、mode_type 类型:实时视频直播的方式用(online) 离线视频直播(填写视频地址识别)用(offline) | |||
# 7、requestId 请求id, 拓恒公司传参 | |||
# 8、image_type 原图用(OR) AI识别后的图片用(AI) | |||
random_num = now_date_to_str(YMDHMSF) | |||
time_now = now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \ | |||
"-{requestId}_{image_type}.jpg" | |||
image_name = image_format.format( | |||
base_dir='PWL202304141639429276', | |||
time_now=time_now, | |||
current_frame='0', | |||
last_frame='0', | |||
random_num=random_num, | |||
mode_type='offline', | |||
requestId='111111111111111111', | |||
image_type='OR') | |||
result = ossClient.upload_file(image_name, or_image.tobytes()) | |||
# print('http status: {0}'.format(result.status)) | |||
# # 请求ID。请求ID是本次请求的唯一标识,强烈建议在程序日志中添加此参数。 | |||
# print('request_id: {0}'.format(result.request_id)) | |||
# # ETag是put_object方法返回值特有的属性,用于标识一个Object的内容。 | |||
# print('ETag: {0}'.format(result.etag)) | |||
# # HTTP响应头部。 | |||
# print('date: {0}'.format(result.headers['date'])) | |||
# print(result.__reduce__()) | |||
# 对于图片上传, 上传成功后,直接将image_name给拓恒公司就可以了 | |||
# 如果测试查看图片是否上传成功 | |||
# 可以使用域名拼接 | |||
image_url = 'https://image.t-aaron.com/' + image_name | |||
print(image_url) | |||
# 拓恒公司只需要image_name | |||
@@ -0,0 +1,130 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
import json | |||
from aliyunsdkcore.client import AcsClient | |||
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest | |||
from voduploadsdk.AliyunVodUtils import * | |||
from voduploadsdk.AliyunVodUploader import AliyunVodUploader | |||
from voduploadsdk.UploadVideoRequest import UploadVideoRequest | |||
''' | |||
视频上传使用vod | |||
1. 阿里云VOD文档地址:https://help.aliyun.com/product/29932.html?spm=5176.8413026.J_3895079540.5.1b4a1029mXvncc | |||
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/64148.html?spm=a2c4g.64148.0.0.5ae54150jUecEU | |||
4. 安装SDK: | |||
python -m pip install aliyun-python-sdk-core -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-live -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-core-v3 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install aliyun-python-sdk-vod -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install alibabacloud_vod20170321 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install oss2 -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
python -m pip install voduploadsdk -i https://pypi.tuna.tsinghua.edu.cn/simple | |||
5. 视频域名地址:https://vod.play.t-aaron.com/ | |||
''' | |||
class AliyunVodSdk: | |||
def __init__(self): | |||
self.__client = None | |||
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9' | |||
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk' | |||
self.__regionId = "cn-shanghai" | |||
self.__cateId = '1000468340' | |||
def init_vod_client(self): | |||
return AcsClient(self.__access_key, self.__access_secret, self.__regionId, auto_retry=True, max_retry_time=3, | |||
timeout=5) | |||
''' | |||
根据videoId获取视频地址 | |||
''' | |||
def get_play_info(self, videoId): | |||
logger.info("开始获取视频地址,videoId:{}", videoId) | |||
start = time.time() | |||
while True: | |||
try: | |||
clt = self.init_vod_client() | |||
request = GetPlayInfoRequest.GetPlayInfoRequest() | |||
request.set_accept_format('JSON') | |||
request.set_VideoId(videoId) | |||
request.set_AuthTimeout(3600 * 5) | |||
response = json.loads(clt.do_action_with_exception(request)) | |||
play_url = response["PlayInfoList"]["PlayInfo"][0]["PlayURL"] | |||
logger.info("获取视频地址成功,视频地址: {}", play_url) | |||
return play_url | |||
except Exception as e: | |||
logger.error("获取视频地址失败,5秒后重试, requestId: {}") | |||
time.sleep(5) | |||
current_time = time.time() | |||
if "HTTP Status: 403" not in str(e): | |||
logger.exception("获取视频地址失败: {}", e) | |||
raise e | |||
if "HTTP Status: 403" in str(e) and ("UploadFail" in str(e) or "TranscodeFail" in str(e)): | |||
self.logger.exception("获取视频地址失败: {}", e) | |||
raise e | |||
diff_time = current_time - start | |||
if diff_time > 60 * 60 * 2: | |||
logger.exception("获取视频地址失败超时异常: {},超时时间:{}", e, diff_time) | |||
raise e | |||
def upload_local_video(self, filePath, file_title, storageLocation=None): | |||
logger.info("开始执行vod视频上传, filePath: {}", filePath) | |||
uploader = AliyunVodUploader(self.__access_key, self.__access_secret) | |||
uploadVideoRequest = UploadVideoRequest(filePath, file_title) | |||
uploadVideoRequest.setCateId(self.__cateId) | |||
if storageLocation: | |||
uploadVideoRequest.setStorageLocation(storageLocation) | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
result = uploader.uploadLocalVideo(uploadVideoRequest) | |||
logger.info("vod视频上传成功, videoId:{}", result.get("VideoId")) | |||
return result.get("VideoId") | |||
except AliyunVodException as e: | |||
retry_count += 1 | |||
time.sleep(3) | |||
logger.error("vod视频上传失败,重试次数:{}", retry_count) | |||
if retry_count >= MAX_RETRIES: | |||
self.logger.exception("vod视频上传重试失败: {}", e) | |||
raise e | |||
YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S" | |||
YMDHMSF = "%Y%m%d%H%M%S%f" | |||
def generate_timestamp(): | |||
"""根据当前时间获取时间戳,返回整数""" | |||
return int(time.time()) | |||
def now_date_to_str(fmt=None): | |||
if fmt is None: | |||
fmt = YY_MM_DD_HH_MM_SS | |||
return datetime.datetime.now().strftime(fmt) | |||
if __name__ == "__main__": | |||
# 本地原视频命名 | |||
random_time = now_date_to_str(YMDHMSF) | |||
# # 如果是离线视频,将 _on_or_ 替换为 _off_or_ | |||
# orFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_or_", 'requestId', ".mp4") | |||
# # 本地AI识别后的视频命名 | |||
# # 如果是离线视频,将 _on_ai_ 替换为 _off_ai_ | |||
# aiFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_ai_", 'requestId', ".mp4") | |||
# filePath = "%s%s%s%s%s" % ('D:\\shipin\\', random_time, "_on_ai_", '11111111', ".mp4") | |||
filePath = 'D:\\shipin\\777.mp4' | |||
codClinet = AliyunVodSdk() | |||
result = codClinet.upload_local_video(filePath, 'aiOnLineVideo1') | |||
print(result) | |||
url = codClinet.get_play_info(result) | |||
print(url) | |||
@@ -0,0 +1,73 @@ | |||
import cv2 | |||
import numpy as np | |||
from PIL import Image, ImageDraw, ImageFont | |||
rainbows = [ | |||
[0, 0, 255], | |||
[255, 0, 0], | |||
[211, 0, 148], | |||
[0, 127, 0], | |||
[0, 69, 255], | |||
[0, 255, 0], | |||
[255, 0, 255], | |||
[0, 0, 127], | |||
[127, 0, 255], | |||
[255, 129, 0], | |||
[139, 139, 0], | |||
[255, 255, 0], | |||
[127, 255, 0], | |||
[0, 127, 255], | |||
[0, 255, 127], | |||
[255, 127, 255], | |||
[8, 101, 139], | |||
[171, 130, 255], | |||
[139, 112, 74], | |||
[205, 205, 180]] | |||
# rainbows = [[0, 0, 255], | |||
# [211, 0, 148], | |||
# [0, 69, 255], | |||
# [133, 21, 199], | |||
# [0, 100, 0], | |||
# [34, 139, 34], | |||
# [8, 101, 139], | |||
# [11, 134, 184], | |||
# [92, 92, 205], | |||
# [147, 20, 255], | |||
# [255, 0, 255], | |||
# [96, 48, 176], | |||
# [205, 205, 105], | |||
# [139, 139, 102], | |||
# [255, 245, 0], | |||
# [170, 205, 102], | |||
# [155, 205, 155], | |||
# [0, 205, 0], | |||
# [79, 79, 47], | |||
# [105, 105, 105], | |||
# [112, 25, 25], | |||
# [205, 0, 0], | |||
# ] | |||
def get_label_array(color=None, label=None, font=None, fontSize=40): | |||
x, y, width, height = font.getbbox(label) | |||
text_image = np.zeros((height, width, 3), dtype=np.uint8) | |||
text_image = Image.fromarray(text_image) | |||
draw = ImageDraw.Draw(text_image) | |||
draw.rectangle((0, 0, width, height), fill=tuple(color)) | |||
draw.text((0, -3), label, fill=(255, 255, 255), font=font) | |||
im_array = np.asarray(text_image) | |||
scale = fontSize / height | |||
im_array = cv2.resize(im_array, (0, 0), fx=scale, fy=scale) | |||
return im_array | |||
if __name__ == '__main__': | |||
font = ImageFont.truetype('platech.ttf', 40, encoding='utf-8') | |||
im_arrays = [] | |||
for color in rainbows: | |||
im_array = get_label_array(color=color, label="植被", font=font, fontSize=40) | |||
im_arrays.append(im_array) | |||
frame_merge = np.hstack(tuple(im_arrays)) | |||
cv2.imshow('frame1', frame_merge) | |||
cv2.waitKey(10000000) |
@@ -0,0 +1,88 @@ | |||
import time | |||
from pathlib import Path | |||
import GPUtil | |||
import cv2 | |||
import numpy as np | |||
import torch | |||
from PIL import ImageFont, Image, ImageDraw | |||
# print(Path(__file__)) # 表示当前脚本文件的路径 | |||
# print(Path(__file__).parent) # 表示当前路径的父级目录 | |||
# import time | |||
# from contextlib import contextmanager | |||
# | |||
# @contextmanager | |||
# def timer(): | |||
# start_time = time.time() | |||
# yield | |||
# end_time = time.time() | |||
# print('Time elapsed:', end_time - start_time) | |||
# # 使用上下文管理器 | |||
# with timer(): | |||
# time.sleep(1) | |||
# print(torch.cuda.is_available()) | |||
# print(GPUtil.getGPUs()[0].name) | |||
# def get_first_gpu_name(): | |||
# gps = GPUtil.getGPUs() | |||
# if gps is None or len(gps) == 0: | |||
# raise Exception("未获取到gpu资源, 先检测服务器是否已经配置GPU资源!") | |||
# return gps[0].name | |||
# gpu_name = get_first_gpu_name() | |||
# aa = [g for g in ['3090', '2080', '4090', 'A10'] if g in gpu_name] | |||
# print(aa) | |||
# | |||
# import tensorrt as trt | |||
# # 定义反序列化引擎文件的函数 | |||
# def deserialize_engine_from_file(engine_file_path): | |||
# runtime = trt.Runtime(trt.Logger()) | |||
# engine = None | |||
# with open(engine_file_path, "rb") as f: | |||
# while True: | |||
# data = f.read(1024 * 1024) | |||
# if not data: | |||
# break | |||
# tmp_engine = runtime.deserialize_cuda_engine(data) | |||
# if engine is None: | |||
# engine = tmp_engine | |||
# else: | |||
# for i in range(tmp_engine.num_bindings): | |||
# engine.set_binding_shape(i, tmp_engine.get_binding_shape(i)) | |||
# return engine | |||
# engine_file_path = "/path/to/engine_file.engine" | |||
# s = time.time() | |||
# engine = deserialize_engine_from_file(engine_file_path) | |||
# print("1 加载trt文件时间", time.time() - s) | |||
# s1 = time.time() | |||
# with open(engine_file_path, "rb") as f1, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
# model = runtime.deserialize_cuda_engine(f1.read()) | |||
# print("2 加载trt文件时间", time.time() - s1) | |||
def get_label_array(color=None, label=None, outfontsize=None, fontpath="conf/platech.ttf"): | |||
# Plots one bounding box on image 'im' using PIL | |||
fontsize = outfontsize | |||
font = ImageFont.truetype(fontpath, fontsize, encoding='utf-8') | |||
x,y,txt_width, txt_height = font.getbbox(label) | |||
print(x,y,txt_width, txt_height) | |||
im = np.zeros((txt_height, txt_width, 3), dtype=np.uint8) | |||
im = Image.fromarray(im) | |||
draw = ImageDraw.Draw(im) | |||
draw.rectangle([0, 0, txt_width, txt_height], fill=tuple(color)) | |||
draw.text((0, -3), label, fill=(255, 255, 255), font=font) | |||
im_array = np.asarray(im) | |||
# if outfontsize: | |||
# scaley = outfontsize / txt_height | |||
# im_array = cv2.resize(im_array, (0, 0), fx=scaley, fy=scaley) | |||
return im_array | |||
aaa = time.time() | |||
im_array = get_label_array(color=(0, 255, 0), label="排口", outfontsize=40, fontpath="platech.ttf") | |||
print(time.time() - aaa) | |||
cv2.imshow("frame", im_array) | |||
cv2.waitKey(0) |
@@ -0,0 +1,27 @@ | |||
from sklearn import linear_model | |||
# x = [[20, 3], | |||
# [23, 7], | |||
# [31, 10], | |||
# [42, 13], | |||
# [50, 7], | |||
# [60, 5]] | |||
# y = [0, 1, 1, 1, 0, 0] | |||
# lr = linear_model.LogisticRegression() | |||
# lr.fit(x, y) | |||
# testX = [[28, 8]] | |||
# label = lr.predict(testX) | |||
# print("predicted label = ", label) | |||
# | |||
# prob = lr.predict_proba(testX) | |||
# print("probability = ", prob) | |||
import tensorflow as tf | |||
tf.compat.v1.disable_eager_execution() | |||
hello = tf.constant("hello, world!") | |||
sess = tf.compat.v1.Session() | |||
result = sess.run(hello) | |||
sess.close() | |||
print(result) |
@@ -0,0 +1,223 @@ | |||
import copy | |||
import json | |||
import os | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Queue, Process | |||
from loguru import logger | |||
import subprocess as sp | |||
import cv2 | |||
import numpy as np | |||
from aip import AipImageClassify | |||
import sys | |||
from enums.BaiduSdkEnum import BAIDUERRORDATA, VehicleEnumVALUE | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.ModelTypeEnum import ModelType | |||
from exception.CustomerException import ServiceException | |||
from util.ModelUtils import Model | |||
def get_recording_video_info(url): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % url | |||
p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) | |||
out, err = p.communicate(timeout=17) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!") | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!:") | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!") | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
up, down = str(fps).split('/') | |||
fps = int(eval(up) / eval(down)) | |||
return (width, height, nb_frames, fps) | |||
except Exception as e: | |||
raise e | |||
client = AipImageClassify(str(31096670), 'Dam3O4tgPRN3qh4OYE82dbg7', '1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa') | |||
def vehicleDetect(client, iamge, options={}): | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
options["show"] = "true" | |||
res_image = client.vehicleDetect(iamge,options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云车辆检测异常!error_code:{}", error_code) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = 10 | |||
logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}", enum.value[0], enum.value[2], reply_num) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = 10 | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = 10 | |||
if reply_num >= reply_value: | |||
return None | |||
return res_image | |||
except ServiceException as s: | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", s.msg) | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
logger.exception("车辆检测失败: {}, 当前重试次数:{}", e, reply_num) | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", e) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def mark(content, info, img, color): | |||
score = info.get("probability") | |||
if score is None: | |||
score = info.get("location").get("score") | |||
text = "%s: %.2f]" % (content, score) | |||
text_xy = (info.get("location").get("left"), info.get("location").get("top") - 25) | |||
img_lu = (info.get("location").get("left"), info.get("location").get("top")) | |||
img_rd = (info.get("location").get("left") + info.get("location").get("width"), | |||
info.get("location").get("top") + info.get("location").get("height")) | |||
cv2.putText(img, text, text_xy, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2, cv2.LINE_AA) | |||
count = 1 | |||
if img.shape[1] > 1600: | |||
count = 2 | |||
cv2.rectangle(img, img_lu, img_rd, color, count) | |||
return img | |||
def pull_stream(url, queue, nb_frames): | |||
command = ['ffmpeg -re -y -i ' + url +' -f rawvideo -pix_fmt bgr24 -an -'] | |||
pull_p = sp.Popen(command, stdout=sp.PIPE, shell=True) | |||
aa = 0 | |||
try: | |||
while True: | |||
if queue.qsize() == 200: | |||
time.sleep(1) | |||
continue | |||
in_bytes = pull_p.stdout.read(width*height*3) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
img = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) | |||
queue.put({"status": "1", "img": img}) | |||
aa+=1 | |||
else: | |||
if aa -10 > nb_frames: | |||
queue.put({"status": "2"}) | |||
pull_p.terminate() | |||
pull_p.wait() | |||
break; | |||
except Exception as e: | |||
logger.exception("拉流异常: {}", e) | |||
finally: | |||
pull_p.terminate() | |||
pull_p.wait() | |||
def getQueue(queue): | |||
eBody = None | |||
try: | |||
eBody = queue.get(block=False) | |||
return eBody | |||
except Exception as e: | |||
pass | |||
return eBody | |||
def buildFrame(queue, senlin_mod, client, width, height, nb_frames, fps): | |||
frames = [] | |||
status = None | |||
for i in range(queue.qsize()): | |||
frame_result = getQueue(queue) | |||
if frame_result is None: | |||
time.sleep(0.01) | |||
continue | |||
if frame_result.get("status") == '1': | |||
frames.append((frame_result.get("img"), senlin_mod, client, width, height, nb_frames, fps)) | |||
else: | |||
status = frame_result.get("status") | |||
return frames, status | |||
def process(frame): | |||
try: | |||
p_result, timeOut = frame[1].process(copy.deepcopy(frame[0]), frame[3]) | |||
or_result, or_image = cv2.imencode(".jpg", frame[0]) | |||
result = vehicleDetect(frame[2], or_image) | |||
if result is not None: | |||
vehicleInfo = result.get("vehicle_info") | |||
if vehicleInfo is not None and len(vehicleInfo) > 0: | |||
for i, info in enumerate(vehicleInfo): | |||
value = VehicleEnumVALUE.get(info.get("type")) | |||
if value is None: | |||
logger.error("车辆识别出现未支持的目标类型!type:{}", info.get("type")) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
p_result[1] = mark(value.value[1], info, p_result[1], (255, 0, 255)) | |||
frame_merge = np.hstack((frame[0], p_result[1])) | |||
return frame_merge | |||
except Exception as e: | |||
logger.exception("模型分析异常: {}", e) | |||
return None | |||
queue = Queue(200) | |||
url ='/home/th/tuo_heng/dev/11.mp4' | |||
width, height, nb_frames, fps = get_recording_video_info(url) | |||
my_process = Process(target = pull_stream, args=(url, queue, nb_frames)) | |||
#启动子进程 | |||
my_process.start() | |||
current_path = os.path.abspath(os.path.dirname(__file__)) | |||
import GPUtil | |||
senlin_mod = Model(str(GPUtil.getAvailable()[0]), [2,3,4], logger, "11111", ModelType.FOREST_FARM_MODEL) | |||
or_video_file = cv2.VideoWriter("aaa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps, | |||
(int(width) * 2, int(height))) | |||
with ThreadPoolExecutor(max_workers=3) as t: | |||
task_frame = None | |||
while True: | |||
frames = [] | |||
status = None | |||
if task_frame is not None: | |||
frames, status = task_frame.result() | |||
task_frame = t.submit(buildFrame, queue, senlin_mod, client, width, height, nb_frames, fps) | |||
if len(frames) == 0 and status is None: | |||
time.sleep(0.02) | |||
continue | |||
if frames is not None and len(frames) > 0: | |||
for result in t.map(process, frames): | |||
if result is not None: | |||
or_video_file.write(result) | |||
if status is None: | |||
continue | |||
if status.get("status") == "2": | |||
t.shutdown(wait=False) | |||
or_video_file.release() | |||
t.shutdown(wait=False) | |||
or_video_file.release() | |||
@@ -0,0 +1,189 @@ | |||
import asyncio | |||
import copy | |||
import json | |||
import os | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Queue, Process | |||
from loguru import logger | |||
import subprocess as sp | |||
import cv2 | |||
import numpy as np | |||
from aip import AipImageClassify | |||
import sys | |||
from enums.BaiduSdkEnum import BAIDUERRORDATA, VehicleEnumVALUE | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.ModelTypeEnum import ModelType | |||
from exception.CustomerException import ServiceException | |||
from util.ModelUtils import Model | |||
def get_recording_video_info(url): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % url | |||
p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) | |||
out, err = p.communicate(timeout=17) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!") | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!:") | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!") | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
up, down = str(fps).split('/') | |||
fps = int(eval(up) / eval(down)) | |||
return (width, height, nb_frames, fps) | |||
except Exception as e: | |||
raise e | |||
client = AipImageClassify(str(31096670), 'Dam3O4tgPRN3qh4OYE82dbg7', '1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa') | |||
def vehicleDetect(client, iamge, options={}): | |||
reply_num = 0 | |||
reply_value = None | |||
while True: | |||
try: | |||
options["show"] = "true" | |||
res_image = client.vehicleDetect(iamge,options) | |||
error_code = res_image.get("error_code") | |||
if error_code: | |||
enum = BAIDUERRORDATA.get(error_code) | |||
# 如果异常编码未知, 返回空值 | |||
if enum is None: | |||
logger.error("百度云车辆检测异常!error_code:{}", error_code) | |||
return None | |||
# 重试指定次数后,还是异常,输出统一内部异常 | |||
if enum.value[3] == 0: | |||
if reply_value is None: | |||
reply_value = 10 | |||
logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}", enum.value[0], enum.value[2], reply_num) | |||
raise Exception() | |||
# 重试指定次数后,还是异常,输出对应的异常 | |||
if enum.value[3] == 1: | |||
if reply_value is None: | |||
reply_value = 10 | |||
raise ServiceException(str(enum.value[0]), enum.value[2]) | |||
# 重试指定次数后,还是异常,输出空 | |||
if enum.value[3] == 2: | |||
if reply_value is None: | |||
reply_value = 10 | |||
if reply_num >= reply_value: | |||
return None | |||
return res_image | |||
except ServiceException as s: | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", s.msg) | |||
raise ServiceException(e.code, e.msg) | |||
except Exception as e: | |||
logger.exception("车辆检测失败: {}, 当前重试次数:{}", e, reply_num) | |||
time.sleep(0.2) | |||
reply_num += 1 | |||
if reply_num > reply_value: | |||
logger.exception("车辆检测识别失败: {}", e) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def mark(content, info, img, color): | |||
score = info.get("probability") | |||
if score is None: | |||
score = info.get("location").get("score") | |||
text = "%s: %.2f" % (content, score) | |||
text_xy = (info.get("location").get("left"), info.get("location").get("top") - 25) | |||
img_lu = (info.get("location").get("left"), info.get("location").get("top")) | |||
img_rd = (info.get("location").get("left") + info.get("location").get("width"), | |||
info.get("location").get("top") + info.get("location").get("height")) | |||
cv2.putText(img, text, text_xy, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2, cv2.LINE_AA) | |||
count = 1 | |||
if img.shape[1] > 1600: | |||
count = 2 | |||
cv2.rectangle(img, img_lu, img_rd, color, count) | |||
return img | |||
async def mode_handler(img, width): | |||
return senlin_mod.process(copy.deepcopy(img), width) | |||
async def modprocess(img, width): | |||
p_result, timeOut = await mode_handler(img, width) | |||
return p_result, timeOut | |||
async def car_handler(img, width): | |||
return car_mod.process(copy.deepcopy(img), width) | |||
async def carprocess(img, width): | |||
p_result, timeOut = await car_handler(img, width) | |||
return p_result, timeOut | |||
async def baidu_handler(img, client): | |||
or_result, or_image = cv2.imencode(".jpg", img) | |||
return vehicleDetect(client, or_image) | |||
async def baiduprocess(img, client): | |||
result = await baidu_handler(img, client) | |||
return result | |||
url ='/home/th/tuo_heng/dev/11.mp4' | |||
width, height, nb_frames, fps = get_recording_video_info(url) | |||
current_path = os.path.abspath(os.path.dirname(__file__)) | |||
import GPUtil | |||
senlin_mod = Model(str(GPUtil.getAvailable()[0]), [2,3,4], logger, "11112", ModelType.FOREST_FARM_MODEL) | |||
car_mod = Model(str(GPUtil.getAvailable()[0]), [0], logger, "11112", ModelType.VEHICLE_MODEL) | |||
or_video_file = cv2.VideoWriter("aaa2.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps, | |||
(int(width) * 2, int(height))) | |||
command = ['ffmpeg -re -y -i ' + url +' -f rawvideo -pix_fmt bgr24 -an -'] | |||
pull_p = sp.Popen(command, stdout=sp.PIPE, shell=True) | |||
num = 0 | |||
loop = asyncio.new_event_loop() | |||
asyncio.set_event_loop(loop) | |||
try: | |||
while True: | |||
print(num, nb_frames) | |||
in_bytes = pull_p.stdout.read(width*height*3) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
img = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) | |||
# r = loop.run_until_complete(asyncio.gather(modprocess(img, width), carprocess(img, width))) | |||
p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width) | |||
p_result1, timeOut1 = car_mod.process(copy.deepcopy(p_result[1]), width) | |||
# r = loop.run_until_complete(asyncio.gather(modprocess(img, width), baiduprocess(img, client))) | |||
# p_result, timeOut = r[0] | |||
# result = r[1] | |||
# p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width) | |||
# if result is not None: | |||
# vehicleInfo = result.get("vehicle_info") | |||
# if vehicleInfo is not None and len(vehicleInfo) > 0: | |||
# for i, info in enumerate(vehicleInfo): | |||
# value = VehicleEnumVALUE.get(info.get("type")) | |||
# if value is None: | |||
# logger.error("车辆识别出现未支持的目标类型!type:{}", info.get("type")) | |||
# raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
# ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
# p_result[1] = mark(value.value[1], info, p_result[1], (255, 0, 255)) | |||
frame_merge = np.hstack((img, p_result1[1])) | |||
or_video_file.write(frame_merge) | |||
num+=1 | |||
else: | |||
if num -10 > nb_frames: | |||
break; | |||
finally: | |||
or_video_file.release() | |||
pull_p.terminate() | |||
pull_p.wait() | |||
@@ -0,0 +1,7 @@ | |||
2023-04-18 13:41:42.066 [ERROR][MainProcess-25160-MainThread-30168][16] Test-<module> - 异常信息:division by zero | |||
Traceback (most recent call last): | |||
> File "D:\tuoheng\codenew\tuoheng_alg\test\路径\Test.py", line 14, in <module> | |||
2/0 | |||
ZeroDivisionError: division by zero |
@@ -0,0 +1,45 @@ | |||
from io import BytesIO | |||
import cv2 | |||
import matplotlib.pyplot as plt | |||
import matplotlib.patches as pat | |||
import numpy as np | |||
import requests | |||
from PIL import ImageDraw, Image | |||
from util.ImageUtils import url2Array | |||
url = "https://www.2008php.com/2015_Website_appreciate/2015-12-06/20151206234254.jpg" | |||
color= (255, 255, 0) | |||
#( 蓝, 绿, 红) | |||
# 红色 (0, 0, 255) | |||
# 洋红色 (255, 0, 255) | |||
# 青色 (255, 255, 0) | |||
# 黑色 (0, 0, 0) | |||
# 蓝色 (255, 0, 0) | |||
# 绿色 (0, 255, 0) | |||
# 黄色 (0, 255, 255) # 不考虑 | |||
img = url2Array(url) | |||
cv2.putText(img,"Hello World", (100,100), cv2.FONT_HERSHEY_SIMPLEX, 1.0,color, 1, cv2.LINE_AA) | |||
# rectangle 坐标的参数格式为左上角(x1, y1),右下角(x2, y2), 颜色 , 粗细 | |||
cv2.rectangle(img, (100, 110), (400, 310), color, 2) | |||
cv2.imshow('img', img) | |||
cv2.waitKey() | |||
# fig, ax = plt.subplots(1) | |||
# ax.imshow(img) | |||
# # Rectangle 坐标的参数格式为左上角(x, y),width, height。 | |||
# rec = pat.Rectangle((386, 144), 1049, 760, linewidth=2, edgecolor='r', facecolor='None') | |||
# ax.add_patch(rec) | |||
# plt.imshow(img) | |||
# plt.show() | |||
# response = requests.get(url) | |||
# image = Image.open(BytesIO(response.content)) | |||
# a = ImageDraw.ImageDraw(image) | |||
# # rectangle 坐标的参数格式为左上角(x1, y1),右下角(x2, y2)。 | |||
# a.rectangle(((386, 144), (1435, 904)), fill=None, outline='red', width=2) | |||
# image.show() | |||
@@ -1,9 +1,6 @@ | |||
import copy | |||
import subprocess as sp | |||
from enum import Enum, unique | |||
from PIL import Image | |||
import time | |||
import cv2 | |||
import sys | |||
sys.path.extend(['..','../AIlib' ]) | |||
@@ -15,8 +12,6 @@ from models.experimental import attempt_load | |||
from utils.torch_utils import select_device | |||
from utilsK.queRiver import get_labelnames,get_label_arrays | |||
import numpy as np | |||
import torch | |||
from utilsK.masterUtils import get_needed_objectsIndex | |||
@@ -1,34 +1,7 @@ | |||
import os | |||
import pynvml | |||
pynvml.nvmlInit() | |||
# 安装 pip install nvidia-ml-py3 | |||
def usegpu(need_gpu_count=1): | |||
nouse=[] | |||
for index in range(pynvml.nvmlDeviceGetCount()): | |||
# 这里的0是GPU id | |||
handle = pynvml.nvmlDeviceGetHandleByIndex(index) | |||
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle) | |||
used= meminfo.used/meminfo.total | |||
print(meminfo.used) | |||
print(meminfo.total) | |||
if used < 0.8: | |||
nouse.append(index) | |||
if len(nouse) >= need_gpu_count: | |||
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, nouse[:need_gpu_count])) | |||
return nouse[:need_gpu_count] | |||
elif len(nouse)>0: | |||
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, nouse)) | |||
return len(nouse) | |||
else: | |||
return 0 | |||
import tensorflow as tf | |||
import torch | |||
if __name__ == '__main__': | |||
gpus=usegpu(need_gpu_count=2) | |||
print(gpus) | |||
if gpus: | |||
print("use gpu ok") | |||
else: | |||
print("no gpu is valid") | |||
gpu_name = torch.cuda.get_device_name(0) | |||
print(gpu_name) |
@@ -1,29 +1,9 @@ | |||
# a=None | |||
# for i in range(12): | |||
# if a is None: | |||
# a = str(i) | |||
# else: | |||
# a = '%s,%s' % (a, str(i)) | |||
# print(a) | |||
# a=[1,2,3,4,5,8, 9] | |||
# b=[1,3,2] | |||
# if set(a) >= set(b): | |||
# print("a") | |||
# else: | |||
# print("b") | |||
from enums.ModelTypeEnum import ModelType | |||
if ModelType.WATER_SURFACE_MODEL == ModelType.FOREST_FARM_MODEL: | |||
print("aaaaa") | |||
else: | |||
print("bbbbbbbbbb") | |||
if isinstance(ModelType.WATER_SURFACE_MODEL, ModelType): | |||
print("aaaaa") | |||
else: | |||
print("bbbbbbbbbb") | |||
if type(ModelType.WATER_SURFACE_MODEL)==ModelType.FOREST_FARM_MODEL: | |||
print("aaaaa") | |||
else: | |||
print("bbbbbbbbbb") | |||
aaa={"1":"1","2":"2","3":"3","4": {"1": "4"}} | |||
for i,v in aaa.items(): | |||
if i =="4": | |||
v["1"] = "5" | |||
print(aaa) |
@@ -0,0 +1,139 @@ | |||
import re | |||
from cerberus import Validator | |||
# pattern = re.compile('^[a-zA-Z0-9]{1,36}$') # 用于匹配至少一个数字 | |||
# m = pattern.match('111aaa3213213123123213123a222') | |||
# print(m) | |||
# | |||
# schema = { | |||
# 'name': {'type': 'string', 'required': True}, | |||
# 'age': {'type': 'integer', 'required': True, 'min': 18}, | |||
# 'email': {'type': 'string', 'required': True, 'regex': r'\w+@\w+\.\w+'} | |||
# } | |||
# v = Validator(schema) | |||
# print(v.validate({ 'name': '11', 'age': 20, 'email': '764784960@qq.com'})) | |||
# aa = str({ 'name': '11', 'age': 20, 'email': '764784960@qq.com'}) | |||
# print(isinstance(aa, dict)) | |||
# schema = {'name': {'type': 'string'}} | |||
# v = Validator(schema) | |||
# document = {'name1': 'john doe'} | |||
# print(v.validate(document)) | |||
# print(v.validate(document, schema)) | |||
schema = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'string', | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'dict', | |||
'required': False, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
v = Validator(schema, allow_unknown=True) | |||
aa={ | |||
'request_id': "111", | |||
'command': 'start', | |||
'pull_url': None, | |||
'push_url': None, | |||
'original_url': '', | |||
'original_type': '', | |||
'image_urls': ['1','1'], | |||
'results_base_dir': '111', | |||
'models': [ | |||
# { | |||
# 'code': '1', | |||
# 'categories': [ | |||
# # { | |||
# # 'id': '1', | |||
# # 'config': {} | |||
# # } | |||
# ] | |||
# } | |||
] | |||
} | |||
print(v.validate(aa)) | |||
print(v.errors) |
@@ -0,0 +1,34 @@ | |||
import asyncio | |||
import time | |||
async def sleep(delay): | |||
time.sleep(delay) | |||
print("1111") | |||
async def say_after(delay, what): | |||
await sleep(delay) | |||
print(what) | |||
async def main(): | |||
task1 = asyncio.create_task(say_after(1, 'hello')) | |||
task2 = asyncio.create_task(say_after(2, 'world')) | |||
# await task1 | |||
# await task2 | |||
await asyncio.gather(task1, task2) | |||
# await say_after(1, 'hello') | |||
# await say_after(2, 'world') | |||
start = time.time() | |||
loop = asyncio.new_event_loop() | |||
print(loop) | |||
asyncio.set_event_loop(loop) | |||
task1 = loop.create_task(say_after(1, 'hello')) | |||
task2 = loop.create_task(say_after(2, 'world')) | |||
loop.run_until_complete(asyncio.wait([task1, task2])) | |||
loop.close() | |||
# asyncio.run(main()) | |||
print(time.time() - start) | |||
@@ -0,0 +1,21 @@ | |||
from loguru import logger | |||
import pickle | |||
# 定义一个类 | |||
class Person: | |||
def __init__(self, name, age): | |||
self.name = name | |||
self.age = age | |||
# 创建一个Person实例 | |||
person = Person("Alice", 25) | |||
# 使用Loguru的serialize方法将Person实例序列化为字节字符串 | |||
serialized_person = logger.serialize(person) | |||
print(serialized_person) | |||
# 使用pickle库将字节字符串反序列化为Python对象 | |||
deserialized_person = pickle.loads(serialized_person) | |||
# 输出反序列化后的对象属性 | |||
print(deserialized_person.name) # Alice | |||
print(deserialized_person.age) # 25 |
@@ -0,0 +1,44 @@ | |||
# -*- coding: utf-8 -*- | |||
import threading | |||
import time | |||
from concurrent.futures import ThreadPoolExecutor | |||
class Test(object): | |||
def __init__(self): | |||
# threading.Thread.__init__(self) | |||
self._sName = "machao" | |||
def process(self): | |||
#args是关键字参数,需要加上名字,写成args=(self,) | |||
th1 = threading.Thread(target=self.buildList, args=()) | |||
th1.start() | |||
th1.join() | |||
def buildList(self): | |||
while True: | |||
print("start") | |||
print(self._sName) | |||
self._sName = "1111111" | |||
time.sleep(3) | |||
def bb(): | |||
print("!1111111111") | |||
def aa(t): | |||
while True: | |||
t.submit(bb) | |||
# test = Test() | |||
# test.process() | |||
# print(3//2) | |||
# with ThreadPoolExecutor(max_workers=10) as t: | |||
# t.submit(aa, t) | |||
# time.sleep(1000) | |||
# codeArray=[''] | |||
# codeStr = ','.join(codeArray) | |||
# print(codeStr) | |||
aa={'aaaa': []} | |||
aa["aaaa"].append("1111111") | |||
aa["aaaa"].append("1111111") | |||
aa["aaaa"].append("1111111") | |||
print(aa) |
@@ -0,0 +1,16 @@ | |||
import os | |||
import sys | |||
from util import YmlUtils, LogUtils | |||
from loguru import logger | |||
print(os.getcwd()) | |||
print(os.path.relpath(__file__)) | |||
base_dir = os.path.dirname(os.path.realpath(sys.argv[0])) | |||
content = YmlUtils.getConfigs(base_dir + "/../../") | |||
LogUtils.init_log(content) | |||
try: | |||
2/0 | |||
except Exception as e: | |||
logger.exception("异常信息:{}", e) |
@@ -0,0 +1,34 @@ | |||
import time | |||
from concurrent.futures import ProcessPoolExecutor | |||
a = 0 | |||
def aa(): | |||
global a | |||
# print("aaaaaaaa", a) | |||
time.sleep(3) | |||
a += 1 | |||
def bb(): | |||
global a | |||
print(a) | |||
if __name__ == "__main__": | |||
with ProcessPoolExecutor(max_workers=3) as t: | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
t.submit(bb) | |||
t.submit(aa) | |||
@@ -0,0 +1,23 @@ | |||
# list1 = [1, 2, 3, 4] | |||
# list2 = [1,2,4] | |||
# if set(list2) == set(list1): | |||
# print("1111111") | |||
# else: | |||
# print("222222") | |||
import numpy as np | |||
# list1 = [1, 2, 3, 4] | |||
# tl = np.asarray([1, 2], np.float32) | |||
# box = np.asarray([tl], np.int32) | |||
# print(tl)c | |||
# print(box[0][1]) | |||
import cv2 | |||
ai_video_file = cv2.VideoWriter(r"C:\Users\chenyukun\Desktop\fsdownload\aa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 25, | |||
(1920,1080)) | |||
# ai_video_file.set(cv2.VIDEOWRITER_PROP_BITRATE, 4000) | |||
ai_video_file.set(cv2.CAP_PROP_BITRATE, 4000) | |||
ai_video_file.set(cv2.VIDEOWRITER_PROP_QUALITY, 80) | |||
print(help(cv2.VideoWriter.set)) | |||
print(dir(cv2)) | |||
print(help(cv2)) |
@@ -3,7 +3,7 @@ | |||
<component name="NewModuleRootManager" inherit-compiler-output="true"> | |||
<exclude-output /> | |||
<content url="file://$MODULE_DIR$" /> | |||
<orderEntry type="jdk" jdkName="Remote Python 3.8.8 (sftp://root@212.129.223.66:20653/opt/conda/bin/python3.8)" jdkType="Python SDK" /> | |||
<orderEntry type="jdk" jdkName="Python 3.8 (test)" jdkType="Python SDK" /> | |||
<orderEntry type="sourceFolder" forTests="false" /> | |||
</component> | |||
</module> |
@@ -1,5 +1,10 @@ | |||
# -*- coding: utf-8 -*- | |||
import oss2 | |||
import time | |||
from aliyunsdkvod.request.v20170321.GetPlayInfoRequest import GetPlayInfoRequest | |||
from common import YmlConstant | |||
from exception.CustomerException import ServiceException | |||
from enums.ExceptionEnum import ExceptionType | |||
import json | |||
@@ -10,119 +15,117 @@ from voduploadsdk.AliyunVodUploader import AliyunVodUploader | |||
from voduploadsdk.UploadVideoRequest import UploadVideoRequest | |||
class AliyunOssSdk(): | |||
class AliyunOssSdk: | |||
def __init__(self, content, logger, requestId): | |||
self.content = content | |||
def __init__(self, context, log, requestId): | |||
self.__context = context | |||
self.bucket = None | |||
self.logger = logger | |||
self.requestId = requestId | |||
self.__logger = log | |||
self.__requestId = requestId | |||
def get_oss_bucket(self): | |||
if self.bucket is None: | |||
self.logger.info("初始化oss桶, requestId:{}", self.requestId) | |||
auth = oss2.Auth(self.content["aliyun"]["access_key"], self.content["aliyun"]["access_secret"]) | |||
self.bucket = oss2.Bucket(auth, self.content["aliyun"]["oss"]["endpoint"], | |||
self.content["aliyun"]["oss"]["bucket"], | |||
connect_timeout=self.content["aliyun"]["oss"]["connect_timeout"]) | |||
self.__logger.info("初始化oss桶, requestId:{}", self.__requestId) | |||
auth = oss2.Auth(YmlConstant.get_aliyun_access_key(self.__context), | |||
YmlConstant.get_aliyun_access_secret(self.__context)) | |||
self.bucket = oss2.Bucket(auth, YmlConstant.get_aliyun_oss_endpoint(self.__context), | |||
YmlConstant.get_aliyun_oss_bucket(self.__context), | |||
connect_timeout=YmlConstant.get_aliyun_oss_connect_timeout(self.__context)) | |||
async def put_object(self, updatePath, fileByte): | |||
self.bucket.put_object(updatePath, fileByte) | |||
async def upload_file(self, updatePath, fileByte): | |||
self.logger.info("开始上传文件到oss, requestId:{}", self.requestId) | |||
def sync_upload_file(self, updatePath, fileByte): | |||
self.__logger.info("开始上传文件到oss, requestId:{}", self.__requestId) | |||
self.get_oss_bucket() | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
await self.put_object(updatePath, fileByte) | |||
self.logger.info("上传文件到oss成功! requestId:{}", self.requestId) | |||
self.bucket.put_object(updatePath, fileByte) | |||
self.__logger.info("上传文件到oss成功! requestId:{}", self.__requestId) | |||
break | |||
except Exception as e: | |||
retry_count += 1 | |||
time.sleep(1) | |||
self.logger.info("上传文件到oss失败, 重试次数:{}, requestId:{}", retry_count, self.requestId) | |||
self.__logger.info("上传文件到oss失败, 重试次数:{}, requestId:{}", retry_count, self.__requestId) | |||
if retry_count > MAX_RETRIES: | |||
self.logger.exception("上传文件到oss重试失败:{}, requestId:{}", e, self.requestId) | |||
self.__logger.exception("上传文件到oss重试失败:{}, requestId:{}", e, self.__requestId) | |||
raise e | |||
class ThAliyunVodSdk(): | |||
class ThAliyunVodSdk: | |||
def __init__(self, content, logger, requestId): | |||
self.content = content | |||
self.logger = logger | |||
self.requestId = requestId | |||
def __init__(self, context, log, requestId): | |||
self.__context = context | |||
self.__logger = log | |||
self.__requestId = requestId | |||
def init_vod_client(self, accessKeyId, accessKeySecret): | |||
regionId = self.content["aliyun"]["vod"]["ecsRegionId"] | |||
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=5) | |||
regionId = YmlConstant.get_aliyun_vod_ecsRegionId(self.__context) | |||
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=30) | |||
def get_play_info(self, videoId): | |||
self.logger.info("开始获取视频地址,videoId:{}, requestId:{}", videoId, self.requestId) | |||
self.__logger.info("开始获取视频地址,videoId:{}, requestId:{}", videoId, self.__requestId) | |||
start = time.time() | |||
while True: | |||
try: | |||
clt = self.init_vod_client(self.content["aliyun"]["access_key"], | |||
self.content["aliyun"]["access_secret"]) | |||
request = GetPlayInfoRequest.GetPlayInfoRequest() | |||
clt = self.init_vod_client(YmlConstant.get_aliyun_access_key(self.__context), | |||
YmlConstant.get_aliyun_access_secret(self.__context)) | |||
request: GetPlayInfoRequest = GetPlayInfoRequest.GetPlayInfoRequest() | |||
request.set_accept_format('JSON') | |||
request.set_VideoId(videoId) | |||
request.set_AuthTimeout(3600 * 5) | |||
response = json.loads(clt.do_action_with_exception(request)) | |||
play_url = response["PlayInfoList"]["PlayInfo"][0]["PlayURL"] | |||
self.logger.info("获取视频地址成功,视频地址: {}, requestId: {}", play_url, self.requestId) | |||
self.__logger.info("获取视频地址成功,视频地址: {}, requestId: {}", play_url, self.__requestId) | |||
return play_url | |||
except Exception as e: | |||
self.logger.error("获取视频地址失败,5秒后重试, requestId: {}", self.requestId) | |||
self.__logger.error("获取视频地址失败,5秒后重试, requestId: {}", self.__requestId) | |||
time.sleep(5) | |||
current_time = time.time() | |||
if "HTTP Status: 403" not in str(e): | |||
self.logger.exception("获取视频地址失败: {}, requestId: {}", e, self.requestId) | |||
self.__logger.exception("获取视频地址失败: {}, requestId: {}", e, self.__requestId) | |||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0], | |||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1]) | |||
if "HTTP Status: 403" in str(e) and ("UploadFail" in str(e) or "TranscodeFail" in str(e)): | |||
self.logger.exception("获取视频地址失败: {}, requestId: {}", e, self.requestId) | |||
self.__logger.exception("获取视频地址失败: {}, requestId: {}", e, self.__requestId) | |||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0], | |||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1]) | |||
diff_time = current_time - start | |||
if diff_time > 60 * 60 * 2: | |||
self.logger.exception("获取视频地址失败超时异常: {},超时时间:{}, requestId: {}", e, diff_time, self.requestId) | |||
self.__logger.exception("获取视频地址失败超时异常: {},超时时间:{}, requestId: {}", e, diff_time, | |||
self.__requestId) | |||
raise ServiceException(ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[1]) | |||
def upload_local_video(self, filePath, file_title, storageLocation=None): | |||
self.logger.info("开始执行vod视频上传, filePath: {}, requestId: {}", filePath, self.requestId) | |||
uploader = AliyunVodUploader(self.content["aliyun"]["access_key"], self.content["aliyun"]["access_secret"]) | |||
uploadVideoRequest = UploadVideoRequest(filePath, file_title) | |||
def upload_local_video(self, filePath, file_title): | |||
self.__logger.info("开始执行vod视频上传, filePath: {}, requestId: {}", filePath, self.__requestId) | |||
uploader = AliyunVodUploader(YmlConstant.get_aliyun_access_key(self.__context), | |||
YmlConstant.get_aliyun_access_secret(self.__context)) | |||
uploadVideoRequest: UploadVideoRequest = UploadVideoRequest(filePath, file_title) | |||
self.__logger.info("视频分类:{}", YmlConstant.get_aliyun_vod_cateId(self.__context)) | |||
uploadVideoRequest.setCateId(YmlConstant.get_aliyun_vod_cateId(self.__context)) | |||
# 可以设置视频封面,如果是本地或网络图片可使用UploadImageRequest上传图片到视频点播,获取到ImageURL | |||
# ImageURL示例:https://example.com/sample-****.jpg | |||
# uploadVideoRequest.setCoverURL('<your Image URL>') | |||
# 标签 | |||
# uploadVideoRequest.setTags('tag1,tag2') | |||
if storageLocation: | |||
uploadVideoRequest.setStorageLocation(storageLocation) | |||
MAX_RETRIES = 3 | |||
retry_count = 0 | |||
while True: | |||
try: | |||
result = uploader.uploadLocalVideo(uploadVideoRequest) | |||
self.logger.info("vod视频上传成功, videoId:{}, requestId:{}", result.get("VideoId"), self.requestId) | |||
self.__logger.info("vod视频上传成功, videoId:{}, requestId:{}", result.get("VideoId"), self.__requestId) | |||
return result.get("VideoId") | |||
except AliyunVodException as e: | |||
retry_count += 1 | |||
time.sleep(3) | |||
self.logger.error("vod视频上传失败,重试次数:{}, requestId:{}", retry_count, self.requestId) | |||
self.__logger.error("vod视频上传失败,重试次数:{}, requestId:{}", retry_count, self.__requestId) | |||
if retry_count >= MAX_RETRIES: | |||
self.logger.exception("vod视频上传重试失败: {}, requestId:{}", e, self.requestId) | |||
raise ServiceException(ExceptionType.VIDEO_UPDATE_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_UPDATE_EXCEPTION.value[1]) | |||
self.__logger.exception("vod视频上传重试失败: {}, requestId:{}", e.message, self.__requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def get_play_url(args): | |||
thAliyunVodSdk = ThAliyunVodSdk(args[2], args[3], args[4]) | |||
videoId = thAliyunVodSdk.upload_local_video(args[0], args[1]) | |||
if videoId is None or len(videoId) == 0: | |||
return None | |||
return thAliyunVodSdk.get_play_info(videoId) | |||
def get_play_url(self, filePath, file_title): | |||
videoId = self.upload_local_video(filePath, file_title) | |||
if videoId is None or len(videoId) == 0: | |||
return None | |||
return self.get_play_info(videoId) |
@@ -1,23 +1,26 @@ | |||
# -*- coding: utf-8 -*- | |||
import json | |||
import time | |||
import cv2 | |||
import subprocess as sp | |||
import ffmpeg | |||
import numpy as np | |||
from loguru import logger | |||
from common import Constant | |||
from exception.CustomerException import ServiceException | |||
from enums.ExceptionEnum import ExceptionType | |||
class Cv2Util(): | |||
def __init__(self, pullUrl, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None): | |||
def __init__(self, pullUrl=None, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None, context=None, | |||
gpu_ids=None, log=logger): | |||
self.pullUrl = pullUrl | |||
self.pushUrl = pushUrl | |||
self.orFilePath = orFilePath | |||
self.aiFilePath = aiFilePath | |||
self.__logger = log | |||
self.cap = None | |||
self.p = None | |||
self.or_video_file = None | |||
@@ -25,47 +28,36 @@ class Cv2Util(): | |||
self.fps = None | |||
self.width = None | |||
self.height = None | |||
self.wah = None | |||
self.wh = None | |||
self.h = None | |||
self.hn = None | |||
self.w = None | |||
self.all_frames = None | |||
self.bit_rate = None | |||
self.pull_p = None | |||
self.requestId = requestId | |||
self.p_push_retry_num = 0 | |||
self.resize_status = False | |||
self.current_frame = 0 | |||
self.isGpu = False | |||
self.read_w_h = None | |||
self.context = context | |||
if gpu_ids is not None and len(gpu_ids) > 0: | |||
self.isGpu = True | |||
def getFrameConfig(self, fps, width, height): | |||
if self.fps is None: | |||
if self.fps is None or self.width != width or self.height != height: | |||
self.fps = fps | |||
self.width = width | |||
self.height = height | |||
if width > 1600: | |||
self.wh = int(width * height * 3 // 8) | |||
self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
self.h = int(self.height * 3 // 4) | |||
self.w = int(self.width // 2) | |||
self.hn = int(self.height // 2) | |||
self.wn = int(self.width // 2) | |||
w_f = self.wh != width * height * 3 / 8 | |||
h_f = self.h != self.height * 3 / 4 | |||
wd_f = self.w != self.width / 2 | |||
if w_f or h_f or wd_f: | |||
self.resize_status = True | |||
self.wh = int(width * height * 3 // 2) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height * 3 // 2) | |||
self.w = int(self.width) | |||
if width > Constant.width: | |||
self.h = int(self.height//2) | |||
self.w = int(self.width//2) | |||
else: | |||
self.wh = int(width * height * 3 // 2) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height * 3 // 2) | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
self.hn = int(self.height) | |||
self.wn = int(self.width) | |||
def clear_video_info(self): | |||
self.fps = None | |||
self.width = None | |||
self.height = None | |||
''' | |||
获取视频信息 | |||
@@ -73,119 +65,209 @@ class Cv2Util(): | |||
def get_video_info(self): | |||
try: | |||
if self.pullUrl is None: | |||
logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||
if self.pullUrl is None or len(self.pullUrl) == 0: | |||
self.__logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0], | |||
ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1]) | |||
probe = ffmpeg.probe(self.pullUrl) | |||
args = ['ffprobe', '-show_format', '-show_streams', '-of', 'json', self.pullUrl] | |||
p = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE) | |||
out, err = p.communicate(timeout=20) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
return | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
# 视频大小 | |||
# format = probe['format'] | |||
# size = int(format['size'])/1024/1024 | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
logger.error("根据拉流地址未获取到视频流, requestId:{}", self.requestId) | |||
return | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
# duration = video_stream.get('duration') | |||
bit_rate = video_stream.get('bit_rate') | |||
self.width = int(width) | |||
self.height = int(height) | |||
# bit_rate = video_stream.get('bit_rate') | |||
if width is not None and height is not None: | |||
if width > 1600: | |||
self.wh = int(width * height * 3 // 8) | |||
self.wah = '%sx%s' % (int(self.width / 2), int(self.height / 2)) | |||
self.h = int(self.height * 3 // 4) | |||
self.w = int(self.width / 2) | |||
self.hn = int(self.height / 2) | |||
self.wn = int(self.width // 2) | |||
w_f = self.wh != width * height * 3 / 8 | |||
h_f = self.h != self.height * 3 / 4 | |||
wd_f = self.w != self.width / 2 | |||
if w_f or h_f or wd_f: | |||
self.resize_status = True | |||
self.wh = int(width * height * 3 // 2) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height * 3 // 2) | |||
self.w = int(self.width) | |||
self.width = int(width) | |||
self.height = int(height) | |||
self.wh = self.width * self.height * 3 | |||
if width > Constant.width: | |||
self.h = int(self.height//2) | |||
self.w = int(self.width//2) | |||
else: | |||
self.wh = int(width * height * 3 // 2) | |||
self.wah = '%sx%s' % (int(self.width), int(self.height)) | |||
self.h = int(self.height * 3 // 2) | |||
self.h = int(self.height) | |||
self.w = int(self.width) | |||
self.hn = int(self.height) | |||
self.wn = int(self.width) | |||
if nb_frames: | |||
if nb_frames: | |||
self.all_frames = int(nb_frames) | |||
up, down = str(fps).split('/') | |||
self.fps = int(eval(up) / eval(down)) | |||
# if duration: | |||
# self.duration = float(video_stream['duration']) | |||
# self.bit_rate = int(bit_rate) / 1000 | |||
self.__logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}|bit_rate:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.bit_rate, self.requestId) | |||
except ServiceException as s: | |||
self.__logger.error("获取视频信息异常: {}, requestId:{}", s.msg, self.requestId) | |||
self.clear_video_info() | |||
raise s | |||
except Exception as e: | |||
self.__logger.error("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
''' | |||
录屏任务获取视频信息 | |||
''' | |||
def get_recording_video_info(self): | |||
try: | |||
video_info = 'ffprobe -show_format -show_streams -of json %s' % self.pullUrl | |||
p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) | |||
out, err = p.communicate(timeout=17) | |||
if p.returncode != 0: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
probe = json.loads(out.decode('utf-8')) | |||
if probe is None or probe.get("streams") is None: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None) | |||
if video_stream is None: | |||
raise Exception("未获取视频信息!!!!!requestId:" + self.requestId) | |||
width = video_stream.get('width') | |||
height = video_stream.get('height') | |||
nb_frames = video_stream.get('nb_frames') | |||
fps = video_stream.get('r_frame_rate') | |||
if width and int(width) > 0: | |||
self.width = int(width) | |||
if height and int(height) > 0: | |||
self.height = int(height) | |||
if self.width and self.height: | |||
self.wh = int(width * height * 3) | |||
self.read_w_h = ([self.height, self.width, 3]) | |||
if nb_frames and int(nb_frames) > 0: | |||
self.all_frames = int(nb_frames) | |||
if fps: | |||
up, down = str(fps).split('/') | |||
self.fps = int(eval(up) / eval(down)) | |||
# if duration: | |||
# self.duration = float(video_stream['duration']) | |||
if bit_rate: | |||
self.bit_rate = int(bit_rate) / 1000 | |||
logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}|bit_rate:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.bit_rate, self.requestId) | |||
self.__logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}, requestId:{}", self.width, | |||
self.height, self.fps, self.all_frames, self.requestId) | |||
except ServiceException as s: | |||
logger.error("获取视频信息异常: {}, requestId:{}", s.msg, self.requestId) | |||
self.__logger.error("获取视频信息异常: {}, requestId:{}", s.msg, self.requestId) | |||
self.clear_video_info() | |||
raise s | |||
except ffmpeg._run.Error as er: | |||
logger.error("获取视频信息异常: {}, requestId:{}", er.stderr.decode(encoding='utf-8'), self.requestId) | |||
except Exception as e: | |||
logger.exception("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||
self.__logger.exception("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
def getRecordingFrameConfig(self, fps, width, height): | |||
self.fps = fps | |||
self.width = width | |||
self.height = height | |||
''' | |||
拉取视频 | |||
录屏拉取视频 | |||
''' | |||
def build_pull_p(self): | |||
def recording_pull_p(self): | |||
try: | |||
if self.wah is None: | |||
# 如果视频信息不存在, 不初始化拉流 | |||
if self.checkconfig(): | |||
return | |||
# 如果已经初始化, 不再初始化 | |||
if self.pull_p: | |||
return | |||
command = ['ffmpeg -re', '-y' | |||
# '-hide_banner', | |||
] | |||
if self.pullUrl.startswith('rtsp://'): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
if self.isGpu: | |||
command.extend(['-hwaccel', 'cuda']) | |||
command.extend(['-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-']) | |||
self.pull_p = sp.Popen(command, stdout=sp.PIPE) | |||
except ServiceException as s: | |||
self.__logger.exception("构建拉流管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
self.clear_video_info() | |||
if self.pull_p: | |||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.__logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
# command = ['ffmpeg', | |||
# # '-b:v', '3000k', | |||
# '-i', self.pullUrl, | |||
# '-f', 'rawvideo', | |||
# '-vcodec', 'rawvideo', | |||
# '-pix_fmt', 'bgr24', | |||
# # '-s', "{}x{}".format(int(width), int(height)), | |||
# '-an', | |||
# '-'] | |||
# input_config = {'c:v': 'h264_cuvid', 'resize': self.wah} | |||
# process = ( | |||
# ffmpeg | |||
# .input(self.pullUrl, **input_config) | |||
# .output('pipe:', format='rawvideo', r=str(self.fps)) # pix_fmt='bgr24' | |||
# .overwrite_output() | |||
# .global_args('-an') | |||
# .run_async(pipe_stdout=True) | |||
# ) | |||
command = ['ffmpeg', | |||
'-re', | |||
'-y', | |||
'-c:v', 'h264_cuvid', | |||
'-resize', self.wah, | |||
'-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-an', | |||
'-'] | |||
self.pull_p = None | |||
raise s | |||
except Exception as e: | |||
self.__logger.exception("构建拉流管道异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
if self.pull_p: | |||
self.__logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
def recording_read(self): | |||
result = None | |||
try: | |||
self.recording_pull_p() | |||
in_bytes = self.pull_p.stdout.read(self.wh) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
try: | |||
result = np.frombuffer(in_bytes, np.uint8).reshape(self.read_w_h) | |||
except Exception as ei: | |||
self.__logger.exception("视频格式异常:{}, requestId:{}", ei, self.requestId) | |||
raise ServiceException(ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
if self.pull_p: | |||
self.__logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
raise s | |||
except Exception as e: | |||
self.__logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||
if self.pull_p: | |||
self.__logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
return result | |||
''' | |||
拉取视频 | |||
''' | |||
def build_pull_p(self): | |||
try: | |||
command = ['ffmpeg'] | |||
if self.pullUrl.startswith("rtsp://"): | |||
command.extend(['-rtsp_transport', 'tcp']) | |||
command.extend(['-re', | |||
'-y', | |||
'-hwaccel', 'cuda', | |||
# '-resize', self.wah, | |||
'-i', self.pullUrl, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-']) | |||
self.pull_p = sp.Popen(command, stdout=sp.PIPE) | |||
# self.pull_p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE) | |||
# self.pull_p = process | |||
except ServiceException as s: | |||
logger.exception("构建拉流管道异常: {}, requestId:{}", s, self.requestId) | |||
self.__logger.exception("构建拉流管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
logger.exception("构建拉流管道异常:{}, requestId:{}", e, self.requestId) | |||
self.__logger.exception("构建拉流管道异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
if self.pull_p: | |||
self.__logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
def checkconfig(self): | |||
if self.fps is None or self.width is None or self.height is None: | |||
@@ -195,29 +277,34 @@ class Cv2Util(): | |||
def read(self): | |||
result = None | |||
try: | |||
# if self.pull_p is None: | |||
# logger.error("拉流管道为空, requestId:{}", self.requestId) | |||
# raise ServiceException(ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[0], | |||
# ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1]) | |||
if self.pull_p is None: | |||
self.build_pull_p() | |||
in_bytes = self.pull_p.stdout.read(self.wh) | |||
self.current_frame += 1 | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
# result = (np.frombuffer(in_bytes, np.uint8).reshape([int(self.height), int(self.width), 3])) | |||
try: | |||
img = (np.frombuffer(in_bytes, np.uint8)).reshape((self.h, self.w)) | |||
result = (np.frombuffer(in_bytes, np.uint8).reshape([self.height, self.width, 3])) | |||
# img = (np.frombuffer(in_bytes, np.uint8)).reshape((self.h, self.w)) | |||
except Exception as ei: | |||
logger.exception("视频格式异常:{}, requestId:{}", ei, self.requestId) | |||
self.__logger.exception("视频格式异常:{}, requestId:{}", ei, self.requestId) | |||
raise ServiceException(ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[1]) | |||
result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) | |||
if self.resize_status: | |||
if self.width > 1600: | |||
result = cv2.resize(result, (int(self.width / 2), int(self.height / 2)), | |||
interpolation=cv2.INTER_LINEAR) | |||
# result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12) | |||
# result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR) | |||
if self.width > Constant.width: | |||
result = cv2.resize(result, (self.w, self.h), interpolation=cv2.INTER_LINEAR) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as e: | |||
logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||
self.clear_video_info() | |||
if self.pull_p: | |||
self.__logger.info("关闭拉流管道, requestId:{}", self.requestId) | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
self.pull_p = None | |||
self.__logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||
if result is None: | |||
self.__logger.error("读取的帧:{}, requestId:{}", len(in_bytes), self.requestId) | |||
return result | |||
def close(self): | |||
@@ -226,7 +313,7 @@ class Cv2Util(): | |||
self.pull_p.stdout.close() | |||
self.pull_p.terminate() | |||
self.pull_p.wait() | |||
logger.info("关闭拉流管道完成, requestId:{}", self.requestId) | |||
self.__logger.info("关闭拉流管道完成, requestId:{}", self.requestId) | |||
if self.p: | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
@@ -234,13 +321,13 @@ class Cv2Util(): | |||
self.p.wait() | |||
# self.p.communicate() | |||
# self.p.kill() | |||
logger.info("关闭管道完成, requestId:{}", self.requestId) | |||
self.__logger.info("关闭管道完成, requestId:{}", self.requestId) | |||
if self.or_video_file: | |||
self.or_video_file.release() | |||
logger.info("关闭原视频写入流完成, requestId:{}", self.requestId) | |||
self.__logger.info("关闭原视频写入流完成, requestId:{}", self.requestId) | |||
if self.ai_video_file: | |||
self.ai_video_file.release() | |||
logger.info("关闭AI视频写入流完成, requestId:{}", self.requestId) | |||
self.__logger.info("关闭AI视频写入流完成, requestId:{}", self.requestId) | |||
# 构建 cv2 | |||
# def build_cv2(self): | |||
@@ -300,21 +387,10 @@ class Cv2Util(): | |||
# 构建 cv2 | |||
def build_p(self): | |||
try: | |||
if self.p: | |||
logger.info("重试, 关闭管道, requestId:{}", self.requestId) | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
# self.p.communicate() | |||
# self.p.kill() | |||
if self.pushUrl is None: | |||
logger.error("推流地址不能为空, requestId:{}", self.requestId) | |||
if self.pushUrl is None or len(self.pushUrl) == 0: | |||
self.__logger.error("推流地址不能为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[0], | |||
ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[1]) | |||
width = int(self.width) | |||
if width <= 1600: | |||
width = 2 * int(self.width) | |||
command = ['ffmpeg', | |||
# '-loglevel', 'debug', | |||
'-y', | |||
@@ -322,8 +398,7 @@ class Cv2Util(): | |||
'-vcodec', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-thread_queue_size', '1024', | |||
# '-s', "{}x{}".format(self.width * 2, self.height), | |||
'-s', "{}x{}".format(width, int(self.hn)), | |||
'-s', "{}x{}".format(self.w * 2, self.h), | |||
'-r', str(self.fps), | |||
'-i', '-', # 指定输入文件 | |||
'-g', str(self.fps), | |||
@@ -348,60 +423,33 @@ class Cv2Util(): | |||
'-tune', 'll', | |||
'-f', 'flv', | |||
self.pushUrl] | |||
# command = 'ffmpeg -loglevel debug -y -f rawvideo -vcodec rawvideo -pix_fmt bgr24' +\ | |||
# ' -s ' + "{}x{}".format(int(self.width), int(self.height/2))\ | |||
# + ' -i - ' + '-g ' + str(self.fps)+\ | |||
# ' -b:v 6000k -tune zerolatency -c:v libx264 -pix_fmt yuv420p -preset ultrafast'+\ | |||
# ' -f flv ' + self.pushUrl | |||
# kwargs = {'format': 'rawvideo', | |||
# # 'vcodec': 'rawvideo', | |||
# 'pix_fmt': 'bgr24', | |||
# 's': '{}x{}'.format(int(self.width), int(self.height/2))} | |||
# out = { | |||
# 'r': str(self.fps), | |||
# 'g': str(self.fps), | |||
# 'b:v': '5500k', # 恒定码率 | |||
# # 'maxrate': '15000k', | |||
# # 'crf': '18', | |||
# 'bufsize': '5500k', | |||
# 'tune': 'zerolatency', # 加速编码速度 | |||
# 'c:v': 'libx264', # 指定视频编码器 | |||
# 'sc_threshold': '0', | |||
# 'pix_fmt': 'yuv420p', | |||
# # 'flvflags': 'no_duration_filesize', | |||
# 'preset': 'medium', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||
# # superfast, veryfast, faster, fast, medium, slow, slower, veryslow。 | |||
# 'format': 'flv'} | |||
# 管道配置 | |||
# process2 = ( | |||
# ffmpeg | |||
# .input('pipe:', **kwargs) | |||
# .output(self.pushUrl, **out) | |||
# .global_args('-y', '-an') | |||
# .overwrite_output() | |||
# .run_async(pipe_stdin=True) | |||
# ) | |||
logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||
self.__logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||
self.p = sp.Popen(command, stdin=sp.PIPE, shell=False) | |||
# self.p = process2 | |||
except ServiceException as s: | |||
logger.exception("构建p管道异常: {}, requestId:{}", s, self.requestId) | |||
if self.p: | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
self.__logger.exception("构建p管道异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
logger.exception("初始化p管道异常:{}, requestId:{}", e, self.requestId) | |||
async def push_stream_write(self, frame): | |||
self.p.stdin.write(frame.tostring()) | |||
if self.p: | |||
if self.p.stdin: | |||
self.p.stdin.close() | |||
self.p.terminate() | |||
self.p.wait() | |||
self.__logger.exception("初始化p管道异常:{}, requestId:{}", e, self.requestId) | |||
async def push_stream(self, frame): | |||
if self.p is None: | |||
self.build_p() | |||
def push_stream(self, frame): | |||
try: | |||
await self.push_stream_write(frame) | |||
return True | |||
if self.p is None: | |||
self.build_p() | |||
self.p.stdin.write(frame.tostring()) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as ex: | |||
logger.exception("推流进管道异常:{}, requestId: {}", ex, self.requestId) | |||
self.__logger.exception("推流进管道异常:{}, requestId: {}", ex, self.requestId) | |||
current_retry_num = 0 | |||
while True: | |||
try: | |||
@@ -409,73 +457,97 @@ class Cv2Util(): | |||
self.p_push_retry_num += 1 | |||
current_retry_num += 1 | |||
if current_retry_num > 3 or self.p_push_retry_num > 600: | |||
return False | |||
raise ServiceException(ExceptionType.PUSH_STREAMING_CHANNEL_IS_OCCUPIED.value[0], | |||
ExceptionType.PUSH_STREAMING_CHANNEL_IS_OCCUPIED.value[1]) | |||
self.build_p() | |||
await self.push_stream_write(frame) | |||
logger.info("构建p管道重试成功, 当前重试次数: {}, requestId: {}", current_retry_num, | |||
self.p.stdin.write(frame.tostring()) | |||
self.__logger.info("构建p管道重试成功, 当前重试次数: {}, requestId: {}", current_retry_num, | |||
self.requestId) | |||
return True | |||
except ServiceException as ss: | |||
raise ss | |||
except Exception as e: | |||
logger.exception("构建p管道异常:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
self.__logger.exception("构建p管道异常:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
current_retry_num, self.requestId) | |||
return False | |||
async def video_frame_write(self, or_frame, ai_frame): | |||
if or_frame is not None: | |||
self.or_video_file.write(or_frame) | |||
if ai_frame is not None: | |||
self.ai_video_file.write(ai_frame) | |||
def build_or_write(self): | |||
try: | |||
if self.orFilePath is not None and self.or_video_file is None: | |||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(self.w, self.h)) | |||
if self.or_video_file is None: | |||
self.__logger.error("or_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
self.__logger.exception("构建OR文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
self.__logger.exception("构建OR文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
def build_ai_write(self): | |||
try: | |||
if self.aiFilePath is not None and self.ai_video_file is None: | |||
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(self.w * 2, self.h)) | |||
if self.ai_video_file is None: | |||
self.__logger.error("ai_video_file为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
self.__logger.exception("构建AI文件写对象异常: {}, requestId:{}", s.msg, self.requestId) | |||
raise s | |||
except Exception as e: | |||
self.__logger.exception("构建AI文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
async def video_write(self, or_frame, ai_frame): | |||
def video_or_write(self, frame): | |||
try: | |||
self.build_write() | |||
if or_frame is not None and len(or_frame) > 0: | |||
await self.video_frame_write(or_frame, None) | |||
if ai_frame is not None and len(ai_frame) > 0: | |||
await self.video_frame_write(None, ai_frame) | |||
return True | |||
if self.or_video_file is None: | |||
self.build_or_write() | |||
self.or_video_file.write(frame) | |||
except ServiceException as s: | |||
raise s | |||
except Exception as ex: | |||
ai_retry_num = 0 | |||
while True: | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
logger.exception("重新写入离线分析后视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||
return False | |||
if or_frame is not None and len(or_frame) > 0: | |||
await self.or_video_file.write(or_frame) | |||
if ai_frame is not None and len(ai_frame) > 0: | |||
await self.ai_video_file.write(ai_frame) | |||
logger.info("重新写入离线分析后视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.__logger.exception("重新写入原视频视频到本地,重试失败, requestId: {}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.or_video_file.write(frame) | |||
self.__logger.info("重新写入原视频视到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
return True | |||
break | |||
except Exception as e: | |||
logger.exception("重新写入离线分析后视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
self.__logger.exception("重新写入原视频视到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) | |||
def build_write(self): | |||
def video_ai_write(self, frame): | |||
try: | |||
if self.fps is None or self.width is None or self.height is None: | |||
raise ServiceException(ExceptionType.VIDEO_CONFIG_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_CONFIG_EXCEPTION.value[1]) | |||
if self.orFilePath is not None and self.or_video_file is None: | |||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(int(self.wn), int(self.hn))) | |||
if self.or_video_file is None: | |||
raise ServiceException(ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[0], | |||
ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[1]) | |||
if self.aiFilePath is not None and self.ai_video_file is None: | |||
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||
(int(self.wn * 2), int(self.hn))) | |||
if self.ai_video_file is None: | |||
raise ServiceException(ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[0], | |||
ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[1]) | |||
if self.ai_video_file is None: | |||
self.build_ai_write() | |||
self.ai_video_file.write(frame) | |||
except ServiceException as s: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", s, self.requestId) | |||
raise s | |||
except Exception as e: | |||
logger.exception("构建文件写对象异常: {}, requestId:{}", e, self.requestId) | |||
raise e | |||
except Exception as ex: | |||
ai_retry_num = 0 | |||
while True: | |||
try: | |||
ai_retry_num += 1 | |||
if ai_retry_num > 3: | |||
self.__logger.exception("重新写入分析后的视频到本地,重试失败, requestId: {}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.ai_video_file.write(frame) | |||
self.__logger.info("重新写入分析后的视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||
self.requestId) | |||
break | |||
except Exception as e: | |||
self.__logger.exception("重新写入分析后的视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||
ai_retry_num, self.requestId) | |||
def video_merge(self, frame1, frame2): | |||
# frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) | |||
@@ -486,28 +558,21 @@ class Cv2Util(): | |||
def getP(self): | |||
if self.p is None: | |||
logger.error("获取管道为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[0], | |||
ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1]) | |||
self.__logger.error("获取管道为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
return self.p | |||
def getCap(self): | |||
if self.cap is None: | |||
logger.error("获取cv2为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.CV2_IS_NULL_EXCEPTION.value[0], | |||
ExceptionType.CV2_IS_NULL_EXCEPTION.value[1]) | |||
return self.cap | |||
def getOrVideoFile(self): | |||
if self.or_video_file is None: | |||
logger.error("获取原视频写入对象为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[0], | |||
ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[1]) | |||
self.__logger.error("获取原视频写入对象为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
return self.or_video_file | |||
def getAiVideoFile(self): | |||
if self.ai_video_file is None: | |||
logger.error("获取AI视频写入对象为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[0], | |||
ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[1]) | |||
self.__logger.error("获取AI视频写入对象为空, requestId:{}", self.requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
return self.ai_video_file |
@@ -8,7 +8,6 @@ from loguru import logger | |||
def create_dir_not_exist(path): | |||
logger.info("检查文件夹是否存在: {}", path) | |||
if not os.path.exists(path): | |||
logger.info("开始创建文件夹: {}", path) | |||
os.makedirs(path) |
@@ -1,5 +1,9 @@ | |||
import GPUtil | |||
from common import YmlConstant | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
# order- 确定返回可用 GPU 设备 ID 的顺序。order应指定为以下字符串之一: | |||
# 'first'- 按升序排列可用的 GPU 设备 ID(默认) | |||
@@ -15,16 +19,31 @@ import GPUtil | |||
# excludeUUIDexcludeID-除了它使用 UUID 之外,其他相同。(默认 = []) | |||
# 输出 | |||
# deviceIDs - 所有可用 GPU 设备 ID 的列表。如果当前负载和内存使用量分别小于maxLoad和maxMemory,则认为 GPU 可用。该列表是根据 排序的order。返回的设备 ID 的最大数量由 限制limit。 | |||
def get_gpu_ids(content): | |||
deviceIDs = GPUtil.getAvailable(order=content["gpu"]["order"], | |||
limit=int(content["gpu"]["limit"]), | |||
maxLoad=float(content["gpu"]["maxLoad"]), | |||
maxMemory=float(content["gpu"]["maxMemory"]), | |||
includeNan=content["gpu"]["includeNan"], | |||
excludeID=content["gpu"]["excludeID"], | |||
excludeUUID=content["gpu"]["excludeUUID"]) | |||
def get_gpu_ids(context): | |||
deviceIDs = GPUtil.getAvailable(order=YmlConstant.get_gpu_order(context), | |||
limit=int(YmlConstant.get_gpu_limit(context)), | |||
maxLoad=float(YmlConstant.get_gpu_maxLoad(context)), | |||
maxMemory=float(YmlConstant.get_gpu_maxMemory(context)), | |||
includeNan=YmlConstant.get_gpu_includeNan(context), | |||
excludeID=YmlConstant.get_gpu_excludeID(context), | |||
excludeUUID=YmlConstant.get_gpu_excludeUUID(context)) | |||
return deviceIDs | |||
def get_all_gpu_ids(): | |||
return GPUtil.getGPUs() | |||
def get_first_gpu_name(): | |||
gps = GPUtil.getGPUs() | |||
if gps is None or len(gps) == 0: | |||
raise Exception("未获取到gpu资源, 先检测服务器是否已经配置GPU资源!") | |||
return gps[0].name | |||
def check_gpu_resource(context): | |||
gpu_ids = get_gpu_ids(context) | |||
if gpu_ids is None or len(gpu_ids) == 0 or (0 not in gpu_ids and str(0) not in gpu_ids): | |||
raise ServiceException(ExceptionType.NO_GPU_RESOURCES.value[0], | |||
ExceptionType.NO_GPU_RESOURCES.value[1]) | |||
return gpu_ids |