@@ -1,58 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="PublishConfigData" serverName="外网" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||
<serverData> | |||
<paths name="10.21"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/chenyukun/dev/algSch" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="10.22"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/th/tuo_heng/prod/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="192.168.11.7"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/th/tuo_heng/test/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="192.168.11.8"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/th/tuo_heng/dev/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="66"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/opt/ai/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
<mapping local="" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="chenyukun"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/opt/ai/algSch" local="$PROJECT_DIR$" web="/" /> | |||
<mapping deploy="" local="" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
<paths name="外网"> | |||
<serverdata> | |||
<mappings> | |||
<mapping deploy="/home/thsw/chenyukun/tuoheng_alg" local="$PROJECT_DIR$" web="/" /> | |||
</mappings> | |||
</serverdata> | |||
</paths> | |||
</serverData> | |||
</component> | |||
</project> |
@@ -1,6 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="Encoding"> | |||
<file url="PROJECT" charset="UTF-8" /> | |||
</component> | |||
</project> |
@@ -1,88 +0,0 @@ | |||
<component name="InspectionProjectProfileManager"> | |||
<profile version="1.0"> | |||
<option name="myName" value="Project Default" /> | |||
<inspection_tool class="JavaDoc" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="TOP_LEVEL_CLASS_OPTIONS"> | |||
<value> | |||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||
<option name="REQUIRED_TAGS" value="" /> | |||
</value> | |||
</option> | |||
<option name="INNER_CLASS_OPTIONS"> | |||
<value> | |||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||
<option name="REQUIRED_TAGS" value="" /> | |||
</value> | |||
</option> | |||
<option name="METHOD_OPTIONS"> | |||
<value> | |||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" /> | |||
</value> | |||
</option> | |||
<option name="FIELD_OPTIONS"> | |||
<value> | |||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||
<option name="REQUIRED_TAGS" value="" /> | |||
</value> | |||
</option> | |||
<option name="IGNORE_DEPRECATED" value="false" /> | |||
<option name="IGNORE_JAVADOC_PERIOD" value="true" /> | |||
<option name="IGNORE_DUPLICATED_THROWS" value="false" /> | |||
<option name="IGNORE_POINT_TO_ITSELF" value="false" /> | |||
<option name="myAdditionalJavadocTags" value="date" /> | |||
</inspection_tool> | |||
<inspection_tool class="JavadocDeclaration" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="ADDITIONAL_TAGS" value="date" /> | |||
</inspection_tool> | |||
<inspection_tool class="MissingJavadoc" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="PACKAGE_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="MODULE_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="TOP_LEVEL_CLASS_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="INNER_CLASS_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="METHOD_SETTINGS"> | |||
<Options> | |||
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" /> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
<option name="FIELD_SETTINGS"> | |||
<Options> | |||
<option name="ENABLED" value="false" /> | |||
</Options> | |||
</option> | |||
</inspection_tool> | |||
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true"> | |||
<option name="ignoredErrors"> | |||
<list> | |||
<option value="N806" /> | |||
<option value="N803" /> | |||
<option value="N802" /> | |||
</list> | |||
</option> | |||
</inspection_tool> | |||
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true"> | |||
<option name="ignoredIdentifiers"> | |||
<list> | |||
<option value="str.*" /> | |||
</list> | |||
</option> | |||
</inspection_tool> | |||
</profile> | |||
</component> |
@@ -1,4 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Remote Python 3.8.15 (sftp://th@192.168.11.8:32178/home/th/anaconda3/envs/chenyukun/bin/python3.8)" project-jdk-type="Python SDK" /> | |||
</project> |
@@ -1,8 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="ProjectModuleManager"> | |||
<modules> | |||
<module fileurl="file://$PROJECT_DIR$/tuoheng_alg.iml" filepath="$PROJECT_DIR$/tuoheng_alg.iml" /> | |||
</modules> | |||
</component> | |||
</project> |
@@ -1,13 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="SshConfigs"> | |||
<configs> | |||
<sshConfig authType="PASSWORD" host="192.168.10.66" id="aa89844a-f7c0-47b6-9359-30d13fa76380" port="22" nameFormat="DESCRIPTIVE" username="thsw2" /> | |||
<sshConfig authType="PASSWORD" host="192.168.10.21" id="adf5e1da-4910-4668-bfbb-432f4e2ae77c" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" host="192.168.10.22" id="ac18a75e-ff42-4875-a5da-ad98d2d695ea" port="22" nameFormat="DESCRIPTIVE" username="th" /> | |||
<sshConfig authType="PASSWORD" connectionConfig="{"serverAliveInterval":300}" host="192.168.10.66" id="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" port="32782" nameFormat="DESCRIPTIVE" username="root" /> | |||
<sshConfig authType="PASSWORD" connectionConfig="{"proxyParams":{"proxyHost":"","proxyPort":-1,"proxyType":"IDE_WIDE_PROXY"}}" host="192.168.11.7" id="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" port="22" nameFormat="DESCRIPTIVE" username="th" useOpenSSHConfig="true" /> | |||
<sshConfig authType="PASSWORD" host="221.226.114.142" id="2af8cb49-06d5-499e-85f2-e22072c6c979" port="1011" nameFormat="DESCRIPTIVE" username="thsw" useOpenSSHConfig="true" /> | |||
</configs> | |||
</component> | |||
</project> |
@@ -1,6 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="VcsDirectoryMappings"> | |||
<mapping directory="$PROJECT_DIR$" vcs="Git" /> | |||
</component> | |||
</project> |
@@ -1,56 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="WebServers"> | |||
<option name="servers"> | |||
<webServer id="630d5d4a-219c-4d57-bb0b-44534517b306" name="chenyukun"> | |||
<fileTransfer accessType="SFTP" host="192.168.10.66" port="32782" sshConfigId="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" sshConfig="root@192.168.10.66:32782 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="cc246223-f324-4e86-9e18-4b309f3a6500" name="66"> | |||
<fileTransfer accessType="SFTP" host="192.168.10.66" port="32782" sshConfigId="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" sshConfig="root@192.168.10.66:32782 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="c24476df-a574-465f-9529-a8e029b84f34" name="10.21"> | |||
<fileTransfer accessType="SFTP" host="192.168.10.22" port="22" sshConfigId="ac18a75e-ff42-4875-a5da-ad98d2d695ea" sshConfig="th@192.168.10.22:22 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="575fb0f8-1aa4-4ab8-8952-1657964a0673" name="10.22"> | |||
<fileTransfer accessType="SFTP" host="192.168.10.22" port="22" sshConfigId="ac18a75e-ff42-4875-a5da-ad98d2d695ea" sshConfig="th@192.168.10.22:22 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="b761b5c5-5f66-4c6a-ad49-4783ff5df619" name="192.168.11.8"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="080a8ea2-04ef-404c-8202-a30cad7668a2" sshConfig="th@192.168.11.8:32178 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="d52d4eb1-ad07-4dd6-adac-d5e84d4a0f0c" name="192.168.11.7"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.7" port="22" sshConfigId="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" sshConfig="th@192.168.11.7:22 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
<webServer id="e0e06591-e01f-4d76-88e9-9c8ee17b919f" name="外网"> | |||
<fileTransfer accessType="SFTP" host="192.168.11.7" port="22" sshConfigId="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" sshConfig="th@192.168.11.7:22 password"> | |||
<advancedOptions> | |||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||
</advancedOptions> | |||
</fileTransfer> | |||
</webServer> | |||
</option> | |||
</component> | |||
</project> |
@@ -1,687 +0,0 @@ | |||
<?xml version="1.0" encoding="UTF-8"?> | |||
<project version="4"> | |||
<component name="AutoImportSettings"> | |||
<option name="autoReloadType" value="SELECTIVE" /> | |||
</component> | |||
<component name="ChangeListManager"> | |||
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes"> | |||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FeedbackThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FeedbackThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/FileUploadThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FileUploadThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/test/线程/Test.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/线程/Test.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/Cv2Utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/Cv2Utils.py" afterDir="false" /> | |||
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" /> | |||
</list> | |||
<option name="SHOW_DIALOG" value="false" /> | |||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> | |||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> | |||
<option name="LAST_RESOLUTION" value="IGNORE" /> | |||
</component> | |||
<component name="FileTemplateManagerImpl"> | |||
<option name="RECENT_TEMPLATES"> | |||
<list> | |||
<option value="Python Script" /> | |||
</list> | |||
</option> | |||
</component> | |||
<component name="Git.Settings"> | |||
<excluded-from-favorite> | |||
<branch-storage> | |||
<map> | |||
<entry type="LOCAL"> | |||
<value> | |||
<list> | |||
<branch-info repo="$PROJECT_DIR$" source="master" /> | |||
</list> | |||
</value> | |||
</entry> | |||
<entry type="REMOTE"> | |||
<value> | |||
<list> | |||
<branch-info repo="$PROJECT_DIR$" source="origin/master" /> | |||
</list> | |||
</value> | |||
</entry> | |||
</map> | |||
</branch-storage> | |||
</excluded-from-favorite> | |||
<favorite-branches> | |||
<branch-storage> | |||
<map> | |||
<entry type="LOCAL"> | |||
<value> | |||
<list> | |||
<branch-info repo="$PROJECT_DIR$" source="develop" /> | |||
</list> | |||
</value> | |||
</entry> | |||
<entry type="REMOTE"> | |||
<value> | |||
<list> | |||
<branch-info repo="$PROJECT_DIR$" source="origin/develop" /> | |||
</list> | |||
</value> | |||
</entry> | |||
</map> | |||
</branch-storage> | |||
</favorite-branches> | |||
<option name="RECENT_BRANCH_BY_REPOSITORY"> | |||
<map> | |||
<entry key="$PROJECT_DIR$" value="master" /> | |||
</map> | |||
</option> | |||
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> | |||
</component> | |||
<component name="GitSEFilterConfiguration"> | |||
<file-type-list> | |||
<filtered-out-file-type name="LOCAL_BRANCH" /> | |||
<filtered-out-file-type name="REMOTE_BRANCH" /> | |||
<filtered-out-file-type name="TAG" /> | |||
<filtered-out-file-type name="COMMIT_BY_MESSAGE" /> | |||
</file-type-list> | |||
</component> | |||
<component name="GitToolBoxStore"> | |||
<option name="recentBranches"> | |||
<RecentBranches> | |||
<option name="branchesForRepo"> | |||
<list> | |||
<RecentBranchesForRepo> | |||
<option name="branches"> | |||
<list> | |||
<RecentBranch> | |||
<option name="branchName" value="develop" /> | |||
<option name="lastUsedInstant" value="1668736215" /> | |||
</RecentBranch> | |||
<RecentBranch> | |||
<option name="branchName" value="master" /> | |||
<option name="lastUsedInstant" value="1668668084" /> | |||
</RecentBranch> | |||
</list> | |||
</option> | |||
<option name="repositoryRootUrl" value="file://$PROJECT_DIR$" /> | |||
</RecentBranchesForRepo> | |||
</list> | |||
</option> | |||
</RecentBranches> | |||
</option> | |||
</component> | |||
<component name="MarkdownSettingsMigration"> | |||
<option name="stateVersion" value="1" /> | |||
</component> | |||
<component name="MavenImportPreferences"> | |||
<option name="generalSettings"> | |||
<MavenGeneralSettings> | |||
<option name="mavenHome" value="C:/learn/maven/apache-maven-3.6.3-bin/apache-maven-3.6.3" /> | |||
<option name="userSettingsFile" value="C:\learn\maven\apache-maven-3.6.3-bin\apache-maven-3.6.3\conf\settings.xml" /> | |||
</MavenGeneralSettings> | |||
</option> | |||
<option name="importingSettings"> | |||
<MavenImportingSettings> | |||
<option name="jdkForImporter" value="11" /> | |||
</MavenImportingSettings> | |||
</option> | |||
</component> | |||
<component name="MavenRunner"> | |||
<option name="jreName" value="11" /> | |||
</component> | |||
<component name="ProjectId" id="2DTRMTxJTz5BhFzI55HkZIMBcy5" /> | |||
<component name="ProjectViewState"> | |||
<option name="hideEmptyMiddlePackages" value="true" /> | |||
<option name="showLibraryContents" value="true" /> | |||
</component> | |||
<component name="PropertiesComponent">{ | |||
"keyToString": { | |||
"RunOnceActivity.OpenProjectViewOnStart": "true", | |||
"RunOnceActivity.ShowReadmeOnStart": "true", | |||
"WebServerToolWindowFactoryState": "true", | |||
"WebServerToolWindowPanel.toolwindow.highlight.mappings": "true", | |||
"WebServerToolWindowPanel.toolwindow.highlight.symlinks": "true", | |||
"WebServerToolWindowPanel.toolwindow.show.date": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.permissions": "false", | |||
"WebServerToolWindowPanel.toolwindow.show.size": "false", | |||
"last_opened_file_path": "D:/tuoheng/codenew/tuoheng_alg", | |||
"node.js.detected.package.eslint": "true", | |||
"node.js.detected.package.tslint": "true", | |||
"node.js.selected.package.eslint": "(autodetect)", | |||
"node.js.selected.package.tslint": "(autodetect)", | |||
"nodejs_package_manager_path": "npm", | |||
"project.structure.last.edited": "SDK", | |||
"project.structure.proportion": "0.15", | |||
"project.structure.side.proportion": "0.2816092", | |||
"settings.editor.selected.configurable": "preferences.pluginManager", | |||
"vue.rearranger.settings.migration": "true" | |||
} | |||
}</component> | |||
<component name="RecentsManager"> | |||
<key name="CopyFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\enums" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\entity" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\读写" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\config" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\算法" /> | |||
</key> | |||
<key name="MoveFile.RECENT_KEYS"> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\config" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\设计模式\单例" /> | |||
<recent name="D:\tuoheng\codenew\tuoheng_alg\font" /> | |||
</key> | |||
</component> | |||
<component name="RunManager" selected="Python.Test"> | |||
<configuration name="CpuUtils" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/util" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/util/CpuUtils.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="Test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/线程" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/线程/Test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="csv_test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/读写" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/读写/csv_test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" /> | |||
<option name="IS_MODULE_SDK" value="false" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="mysqltest" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/mysqltest.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="test (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/内存优化/slots" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/内存优化/slots/test.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<configuration name="test1" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||
<module name="tuoheng_alg" /> | |||
<option name="INTERPRETER_OPTIONS" value="" /> | |||
<option name="PARENT_ENVS" value="true" /> | |||
<envs> | |||
<env name="PYTHONUNBUFFERED" value="1" /> | |||
</envs> | |||
<option name="SDK_HOME" value="" /> | |||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/内存优化/slots" /> | |||
<option name="IS_MODULE_SDK" value="true" /> | |||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/内存优化/slots/test1.py" /> | |||
<option name="PARAMETERS" value="" /> | |||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||
<option name="EMULATE_TERMINAL" value="false" /> | |||
<option name="MODULE_MODE" value="false" /> | |||
<option name="REDIRECT_INPUT" value="false" /> | |||
<option name="INPUT_FILE" value="" /> | |||
<method v="2" /> | |||
</configuration> | |||
<list> | |||
<item itemvalue="Python.editImage" /> | |||
<item itemvalue="Python.mysqltest" /> | |||
<item itemvalue="Python.Test" /> | |||
<item itemvalue="Python.test1" /> | |||
<item itemvalue="Python.CpuUtils" /> | |||
<item itemvalue="Python.csv_test" /> | |||
<item itemvalue="Python.test (1)" /> | |||
</list> | |||
<recent_temporary> | |||
<list> | |||
<item itemvalue="Python.Test" /> | |||
<item itemvalue="Python.test1" /> | |||
<item itemvalue="Python.test (1)" /> | |||
<item itemvalue="Python.csv_test" /> | |||
<item itemvalue="Python.CpuUtils" /> | |||
</list> | |||
</recent_temporary> | |||
</component> | |||
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" /> | |||
<component name="SshConsoleOptionsProvider"> | |||
<option name="myEncoding" value="UTF-8" /> | |||
</component> | |||
<component name="TaskManager"> | |||
<task active="true" id="Default" summary="Default task"> | |||
<changelist id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="" /> | |||
<created>1660721040418</created> | |||
<option name="number" value="Default" /> | |||
<option name="presentableId" value="Default" /> | |||
<updated>1660721040418</updated> | |||
<workItem from="1660721041939" duration="5378000" /> | |||
<workItem from="1660742200263" duration="758000" /> | |||
<workItem from="1660781586599" duration="12677000" /> | |||
<workItem from="1660821003279" duration="3079000" /> | |||
<workItem from="1660831418060" duration="2591000" /> | |||
<workItem from="1660867353831" duration="14213000" /> | |||
<workItem from="1661125394679" duration="2000000" /> | |||
<workItem from="1661212127373" duration="12131000" /> | |||
<workItem from="1661228338772" duration="10683000" /> | |||
<workItem from="1661263812380" duration="582000" /> | |||
<workItem from="1661298710414" duration="3633000" /> | |||
<workItem from="1661385517494" duration="6862000" /> | |||
<workItem from="1661474047536" duration="3841000" /> | |||
<workItem from="1661506480813" duration="579000" /> | |||
<workItem from="1661753711797" duration="4495000" /> | |||
<workItem from="1661847814441" duration="5437000" /> | |||
<workItem from="1661864932477" duration="11602000" /> | |||
<workItem from="1661903556894" duration="23425000" /> | |||
<workItem from="1661956938136" duration="695000" /> | |||
<workItem from="1661989919031" duration="25723000" /> | |||
<workItem from="1662039810210" duration="419000" /> | |||
<workItem from="1662076586600" duration="25491000" /> | |||
<workItem from="1662335184832" duration="5150000" /> | |||
<workItem from="1662348891112" duration="7581000" /> | |||
<workItem from="1662421409878" duration="15047000" /> | |||
<workItem from="1663472604061" duration="19071000" /> | |||
<workItem from="1663515200540" duration="648000" /> | |||
<workItem from="1663545195142" duration="10121000" /> | |||
<workItem from="1666136820911" duration="26264000" /> | |||
<workItem from="1666223126104" duration="11494000" /> | |||
<workItem from="1666269871579" duration="3963000" /> | |||
<workItem from="1666351797324" duration="835000" /> | |||
<workItem from="1666436589395" duration="2588000" /> | |||
<workItem from="1666568450522" duration="695000" /> | |||
<workItem from="1666658084006" duration="458000" /> | |||
<workItem from="1668557891343" duration="19808000" /> | |||
<workItem from="1668667255748" duration="5947000" /> | |||
<workItem from="1668729965000" duration="4848000" /> | |||
<workItem from="1668992475879" duration="14053000" /> | |||
<workItem from="1669163433580" duration="26647000" /> | |||
<workItem from="1669251179588" duration="805000" /> | |||
<workItem from="1669276335478" duration="1020000" /> | |||
<workItem from="1669280017428" duration="1441000" /> | |||
<workItem from="1669628332462" duration="4397000" /> | |||
<workItem from="1669680423418" duration="5107000" /> | |||
<workItem from="1669778128579" duration="17564000" /> | |||
<workItem from="1669860048099" duration="1865000" /> | |||
<workItem from="1670294320960" duration="9059000" /> | |||
<workItem from="1670484573639" duration="7495000" /> | |||
<workItem from="1670545327661" duration="1911000" /> | |||
<workItem from="1670573239165" duration="15919000" /> | |||
<workItem from="1670893670201" duration="9457000" /> | |||
<workItem from="1670976721564" duration="5457000" /> | |||
<workItem from="1671067024950" duration="576000" /> | |||
<workItem from="1671427932628" duration="2256000" /> | |||
<workItem from="1671445227735" duration="582000" /> | |||
<workItem from="1671606515022" duration="4520000" /> | |||
<workItem from="1672047085940" duration="72000" /> | |||
<workItem from="1672119186060" duration="628000" /> | |||
<workItem from="1672192765984" duration="1002000" /> | |||
<workItem from="1672273700875" duration="1315000" /> | |||
<workItem from="1672295805200" duration="19000" /> | |||
<workItem from="1672709979593" duration="2445000" /> | |||
<workItem from="1672797232144" duration="25138000" /> | |||
<workItem from="1672877597405" duration="5633000" /> | |||
<workItem from="1672967214543" duration="2590000" /> | |||
<workItem from="1673483697794" duration="13972000" /> | |||
<workItem from="1674003653469" duration="751000" /> | |||
<workItem from="1674089698944" duration="1180000" /> | |||
<workItem from="1674174312546" duration="591000" /> | |||
<workItem from="1674953245041" duration="2374000" /> | |||
<workItem from="1675038738781" duration="20967000" /> | |||
<workItem from="1675126111623" duration="4395000" /> | |||
<workItem from="1675158655221" duration="5508000" /> | |||
<workItem from="1675298111671" duration="1710000" /> | |||
<workItem from="1675388395566" duration="5304000" /> | |||
<workItem from="1675643763842" duration="771000" /> | |||
<workItem from="1676269822235" duration="1954000" /> | |||
<workItem from="1676362382024" duration="821000" /> | |||
<workItem from="1676424351744" duration="4050000" /> | |||
<workItem from="1676506502236" duration="585000" /> | |||
<workItem from="1676871078953" duration="337000" /> | |||
<workItem from="1676895744433" duration="4418000" /> | |||
<workItem from="1676944131792" duration="515000" /> | |||
<workItem from="1677036599171" duration="4605000" /> | |||
<workItem from="1677112353743" duration="588000" /> | |||
<workItem from="1677574708616" duration="34000" /> | |||
<workItem from="1677632498068" duration="4279000" /> | |||
<workItem from="1677654510460" duration="2082000" /> | |||
<workItem from="1677727307545" duration="438000" /> | |||
<workItem from="1678153491396" duration="9573000" /> | |||
<workItem from="1678253386456" duration="45394000" /> | |||
<workItem from="1678668097364" duration="2754000" /> | |||
<workItem from="1678760898640" duration="1320000" /> | |||
<workItem from="1678791733686" duration="531000" /> | |||
<workItem from="1678839507873" duration="595000" /> | |||
<workItem from="1678885439785" duration="444000" /> | |||
<workItem from="1678925915104" duration="595000" /> | |||
<workItem from="1678927031601" duration="987000" /> | |||
<workItem from="1678928413253" duration="6728000" /> | |||
<workItem from="1679013228398" duration="17427000" /> | |||
<workItem from="1679039229464" duration="9832000" /> | |||
<workItem from="1679118299629" duration="17688000" /> | |||
<workItem from="1679289612196" duration="5820000" /> | |||
<workItem from="1679297557058" duration="1333000" /> | |||
<workItem from="1679359163976" duration="1997000" /> | |||
<workItem from="1679444345433" duration="1190000" /> | |||
<workItem from="1679633582926" duration="1979000" /> | |||
<workItem from="1679876991879" duration="1396000" /> | |||
<workItem from="1680136325711" duration="24199000" /> | |||
<workItem from="1680250415691" duration="1353000" /> | |||
<workItem from="1680486532876" duration="8132000" /> | |||
<workItem from="1680502907387" duration="10960000" /> | |||
<workItem from="1680527121128" duration="3411000" /> | |||
<workItem from="1680577929248" duration="5512000" /> | |||
<workItem from="1680741123267" duration="14728000" /> | |||
<workItem from="1680826640176" duration="21580000" /> | |||
<workItem from="1680914030055" duration="14971000" /> | |||
<workItem from="1680952718810" duration="967000" /> | |||
<workItem from="1681086404430" duration="27714000" /> | |||
<workItem from="1681170492379" duration="39568000" /> | |||
<workItem from="1681220684404" duration="2140000" /> | |||
<workItem from="1681258113350" duration="32577000" /> | |||
<workItem from="1681301257655" duration="429000" /> | |||
<workItem from="1681344786746" duration="5993000" /> | |||
<workItem from="1681363389283" duration="5626000" /> | |||
<workItem from="1681431288218" duration="25974000" /> | |||
<workItem from="1681690599771" duration="2894000" /> | |||
<workItem from="1681696465772" duration="30396000" /> | |||
<workItem from="1681826261843" duration="1474000" /> | |||
<workItem from="1681863254347" duration="13207000" /> | |||
<workItem from="1681950317514" duration="23460000" /> | |||
<workItem from="1682036333722" duration="651000" /> | |||
<workItem from="1682405963588" duration="37651000" /> | |||
<workItem from="1682554149580" duration="33878000" /> | |||
<workItem from="1682640444831" duration="10674000" /> | |||
<workItem from="1683244481879" duration="9171000" /> | |||
<workItem from="1683332505792" duration="23325000" /> | |||
<workItem from="1683506530261" duration="919000" /> | |||
<workItem from="1683507482567" duration="15434000" /> | |||
<workItem from="1683591783960" duration="1186000" /> | |||
<workItem from="1683677260592" duration="21750000" /> | |||
<workItem from="1683762579964" duration="23871000" /> | |||
<workItem from="1683851036596" duration="51000" /> | |||
<workItem from="1683851900729" duration="83000" /> | |||
<workItem from="1683851995142" duration="24789000" /> | |||
<workItem from="1684110880642" duration="5895000" /> | |||
<workItem from="1684197638479" duration="9103000" /> | |||
<workItem from="1684284520362" duration="13345000" /> | |||
<workItem from="1684379357818" duration="22600000" /> | |||
<workItem from="1684456296559" duration="11147000" /> | |||
<workItem from="1684653340859" duration="1199000" /> | |||
<workItem from="1684715657250" duration="6747000" /> | |||
<workItem from="1684801865053" duration="16900000" /> | |||
<workItem from="1684887585997" duration="21179000" /> | |||
<workItem from="1685069170536" duration="5199000" /> | |||
<workItem from="1685318330589" duration="16451000" /> | |||
<workItem from="1685367595669" duration="1105000" /> | |||
<workItem from="1685405545435" duration="5540000" /> | |||
<workItem from="1685929597469" duration="1586000" /> | |||
<workItem from="1686009758832" duration="4033000" /> | |||
<workItem from="1686099127317" duration="8648000" /> | |||
<workItem from="1686181421528" duration="9733000" /> | |||
<workItem from="1686530580527" duration="10215000" /> | |||
<workItem from="1686708793889" duration="28856000" /> | |||
<workItem from="1686787483987" duration="42321000" /> | |||
<workItem from="1686882826411" duration="32824000" /> | |||
<workItem from="1686963632234" duration="27367000" /> | |||
<workItem from="1687046210304" duration="54489000" /> | |||
<workItem from="1687141700932" duration="30282000" /> | |||
<workItem from="1687219517554" duration="39842000" /> | |||
<workItem from="1687306657563" duration="921000" /> | |||
<workItem from="1687307950930" duration="44000" /> | |||
<workItem from="1687308509659" duration="25425000" /> | |||
<workItem from="1687652018398" duration="8524000" /> | |||
<workItem from="1687736740408" duration="603000" /> | |||
<workItem from="1687737713032" duration="3837000" /> | |||
<workItem from="1687779451916" duration="5176000" /> | |||
<workItem from="1687933838564" duration="4146000" /> | |||
<workItem from="1687954592393" duration="1199000" /> | |||
<workItem from="1687997778160" duration="3792000" /> | |||
<workItem from="1688021144565" duration="1972000" /> | |||
<workItem from="1688083600084" duration="65000" /> | |||
<workItem from="1688083679443" duration="5459000" /> | |||
<workItem from="1688344638833" duration="1197000" /> | |||
<workItem from="1688441104396" duration="5401000" /> | |||
<workItem from="1688524387384" duration="5018000" /> | |||
<workItem from="1688611646979" duration="3976000" /> | |||
<workItem from="1688688564237" duration="1253000" /> | |||
<workItem from="1688953948444" duration="13288000" /> | |||
<workItem from="1689120808268" duration="5671000" /> | |||
<workItem from="1689297784810" duration="1254000" /> | |||
<workItem from="1689341342536" duration="14000" /> | |||
<workItem from="1689378621763" duration="831000" /> | |||
<workItem from="1689554206797" duration="16635000" /> | |||
<workItem from="1689644925650" duration="19947000" /> | |||
</task> | |||
<servers /> | |||
</component> | |||
<component name="TypeScriptGeneratedFilesManager"> | |||
<option name="version" value="3" /> | |||
</component> | |||
<component name="Vcs.Log.Tabs.Properties"> | |||
<option name="TAB_STATES"> | |||
<map> | |||
<entry key="MAIN"> | |||
<value> | |||
<State /> | |||
</value> | |||
</entry> | |||
</map> | |||
</option> | |||
</component> | |||
<component name="XDebuggerManager"> | |||
<breakpoint-manager> | |||
<breakpoints> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/test/ffmpeg11/ffmpeg33.py</url> | |||
<line>24</line> | |||
<option name="timeStamp" value="1" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/test/aliyun/ossdemo.py</url> | |||
<line>4</line> | |||
<option name="timeStamp" value="4" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/test/collections/deque.py</url> | |||
<line>134</line> | |||
<option name="timeStamp" value="6" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/util/ModelUtils.py</url> | |||
<line>1</line> | |||
<option name="timeStamp" value="7" /> | |||
</line-breakpoint> | |||
<line-breakpoint enabled="true" suspend="THREAD" type="python-line"> | |||
<url>file://$PROJECT_DIR$/dsp_master.py</url> | |||
<line>1</line> | |||
<option name="timeStamp" value="8" /> | |||
</line-breakpoint> | |||
</breakpoints> | |||
</breakpoint-manager> | |||
</component> | |||
<component name="XSLT-Support.FileAssociations.UIState"> | |||
<expand /> | |||
<select /> | |||
</component> | |||
<component name="com.intellij.coverage.CoverageDataManagerImpl"> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$color_test.coverage" NAME="color_test 覆盖结果" MODIFIED="1683683775604" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/color" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo1.coverage" NAME="demo1 覆盖结果" MODIFIED="1685325611032" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo4.coverage" NAME="demo4 覆盖结果" MODIFIED="1684809818971" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$OrderedDict.coverage" NAME="OrderedDict 覆盖结果" MODIFIED="1684897161191" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$SnakeGame.coverage" NAME="SnakeGame 覆盖结果" MODIFIED="1684825356565" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/游戏" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$wraps.coverage" NAME="wraps 覆盖结果" MODIFIED="1684913804419" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$CpuUtils.coverage" NAME="CpuUtils 覆盖结果" MODIFIED="1686972304076" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg12.coverage" NAME="ffmpeg12 覆盖结果" MODIFIED="1675391366890" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681796501563" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test1.coverage" NAME="test1 覆盖结果" MODIFIED="1687661266628" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ossdemo.coverage" NAME="ossdemo 覆盖结果" MODIFIED="1681715255761" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Counter.coverage" NAME="Counter 覆盖结果" MODIFIED="1684894898737" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__1_.coverage" NAME="test (1) 覆盖结果" MODIFIED="1687056062763" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$singledispatch.coverage" NAME="singledispatch 覆盖结果" MODIFIED="1684912905741" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$editImage.coverage" NAME="editImage 覆盖结果" MODIFIED="1678348350574" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/editimage" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="协程2 覆盖结果" MODIFIED="1668066168428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng/algSch/test/协程/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImgBaiduSdk.coverage" NAME="ImgBaiduSdk 覆盖结果" MODIFIED="1678355024003" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo2.coverage" NAME="demo2 覆盖结果" MODIFIED="1684808407865" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ChainMap.coverage" NAME="ChainMap 覆盖结果" MODIFIED="1684905474944" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master 覆盖结果" MODIFIED="1686926216806" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$IntelligentRecognitionProcess.coverage" NAME="IntelligentRecognitionProcess 覆盖结果" MODIFIED="1682651444560" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/concurrency" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo3.coverage" NAME="demo3 覆盖结果" MODIFIED="1684809071819" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1686930120727" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1689663111360" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$csv_test.coverage" NAME="csv_test 覆盖结果" MODIFIED="1687000802518" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test2.coverage" NAME="test2 覆盖结果" MODIFIED="1669178077956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$csv.coverage" NAME="csv 覆盖结果" MODIFIED="1685331143094" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa.coverage" NAME="aa 覆盖结果" MODIFIED="1684461916527" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cmp_to_key.coverage" NAME="cmp_to_key 覆盖结果" MODIFIED="1684910406140" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils__1_.coverage" NAME="KafkaUtils (1) Coverage Results" MODIFIED="1663464961001" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$__init____1_.coverage" NAME="__init__ (1) 覆盖结果" MODIFIED="1684918690445" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$voddemo.coverage" NAME="voddemo 覆盖结果" MODIFIED="1681722102430" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$re.coverage" NAME="re 覆盖结果" MODIFIED="1684221962919" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/正则" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$deque.coverage" NAME="deque 覆盖结果" MODIFIED="1684896079231" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$__init__.coverage" NAME="__init__ 覆盖结果" MODIFIED="1686535860174" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo.coverage" NAME="demo 覆盖结果" MODIFIED="1686927940237" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__3_.coverage" NAME="test (3) 覆盖结果" MODIFIED="1686902851380" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$numpy_test.coverage" NAME="numpy_test 覆盖结果" MODIFIED="1684205019028" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/numpy" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$namedtuple.coverage" NAME="namedtuple 覆盖结果" MODIFIED="1684898422076" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="冒泡 覆盖结果" MODIFIED="1685368101589" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/算法" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$gputest.coverage" NAME="gputest 覆盖结果" MODIFIED="1681950938970" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/gpu" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$3.coverage" NAME="协程3 覆盖结果" MODIFIED="1668147029048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$AliyunSdk.coverage" NAME="AliyunSdk 覆盖结果" MODIFIED="1683803902993" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="全局变量1 覆盖结果" MODIFIED="1685322476342" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/语法" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$csv_test__1_.coverage" NAME="csv_test (1) 覆盖结果" MODIFIED="1685331476413" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$TimeUtils.coverage" NAME="TimeUtils Coverage Results" MODIFIED="1661222768678" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo5.coverage" NAME="demo5 覆盖结果" MODIFIED="1684810002359" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$demo__1_.coverage" NAME="demo (1) 覆盖结果" MODIFIED="1685086704735" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/语法/for" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__1_.coverage" NAME="Test (1) 覆盖结果" MODIFIED="1683865962957" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$pa.coverage" NAME="pa 覆盖结果" MODIFIED="1684217734590" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/pachong" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$defaultdict.coverage" NAME="defaultdict 覆盖结果" MODIFIED="1684900122612" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||
<SUITE FILE_PATH="coverage/tuoheng_alg$test__2_.coverage" NAME="test (2) 覆盖结果" MODIFIED="1686824265048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" /> | |||
</component> | |||
</project> |
@@ -1,6 +1,4 @@ | |||
# -*- coding: utf-8 -*- | |||
# 配置文件名称 | |||
APPLICATION_CONFIG = "dsp_application.json" | |||
# 编码格式 | |||
UTF_8 = "utf-8" | |||
@@ -18,28 +16,428 @@ success_progess = "1.0000" | |||
width = 1400 | |||
COLOR = ( | |||
[0, 0, 255], | |||
[255, 0, 0], | |||
[211, 0, 148], | |||
[0, 127, 0], | |||
[0, 69, 255], | |||
[0, 255, 0], | |||
[255, 0, 255], | |||
[0, 0, 127], | |||
[127, 0, 255], | |||
[255, 129, 0], | |||
[139, 139, 0], | |||
[255, 255, 0], | |||
[127, 255, 0], | |||
[0, 127, 255], | |||
[0, 255, 127], | |||
[255, 127, 255], | |||
[8, 101, 139], | |||
[171, 130, 255], | |||
[139, 112, 74], | |||
[205, 205, 180]) | |||
[0, 0, 255], | |||
[255, 0, 0], | |||
[211, 0, 148], | |||
[0, 127, 0], | |||
[0, 69, 255], | |||
[0, 255, 0], | |||
[255, 0, 255], | |||
[0, 0, 127], | |||
[127, 0, 255], | |||
[255, 129, 0], | |||
[139, 139, 0], | |||
[255, 255, 0], | |||
[127, 255, 0], | |||
[0, 127, 255], | |||
[0, 255, 127], | |||
[255, 127, 255], | |||
[8, 101, 139], | |||
[171, 130, 255], | |||
[139, 112, 74], | |||
[205, 205, 180]) | |||
ONLINE = "online" | |||
OFFLINE = "offline" | |||
PHOTO = "photo" | |||
RECORDING = "recording" | |||
ONLINE_START_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start"] | |||
}, | |||
"pull_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
"push_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
"logo_url": { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
"models": { | |||
'type': 'list', | |||
'required': True, | |||
'nullable': False, | |||
'minlength': 1, | |||
'maxlength': 3, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"code": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "categories", | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
"is_video": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"is_image": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"categories": { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': "code", | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
"config": { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': "id", | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
ONLINE_STOP_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["stop"] | |||
} | |||
} | |||
OFFLINE_START_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start"] | |||
}, | |||
"push_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
"pull_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
"logo_url": { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
"models": { | |||
'type': 'list', | |||
'required': True, | |||
'maxlength': 3, | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"code": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "categories", | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
"is_video": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"is_image": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"categories": { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': "code", | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
"config": { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': "id", | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
OFFLINE_STOP_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["stop"] | |||
} | |||
} | |||
IMAGE_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start"] | |||
}, | |||
"logo_url": { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
"image_urls": { | |||
'type': 'list', | |||
'required': True, | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
"models": { | |||
'type': 'list', | |||
'required': True, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"code": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "categories", | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
"is_video": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"is_image": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "code", | |||
'allowed': ["0", "1"] | |||
}, | |||
"categories": { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': "code", | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
"config": { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': "id", | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
RECORDING_START_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start"] | |||
}, | |||
"pull_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
"push_url": { | |||
'type': 'string', | |||
'required': False, | |||
'empty': True, | |||
'maxlength': 255 | |||
}, | |||
"logo_url": { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
} | |||
} | |||
RECORDING_STOP_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["stop"] | |||
} | |||
} | |||
PULL2PUSH_START_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start"] | |||
}, | |||
"video_urls": { | |||
'type': 'list', | |||
'required': True, | |||
'nullable': False, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
"id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "pull_url", | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
"pull_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "push_url", | |||
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$' | |||
}, | |||
"push_url": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': "id", | |||
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$' | |||
} | |||
} | |||
} | |||
} | |||
} | |||
PULL2PUSH_STOP_SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
"command": { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': ["start", "stop"] | |||
}, | |||
"video_ids": { | |||
'type': 'list', | |||
'required': False, | |||
'nullable': True, | |||
'schema': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
} | |||
} | |||
} |
@@ -1,256 +0,0 @@ | |||
# -*- coding: utf-8 -*- | |||
model = { | |||
'type': 'list', | |||
'required': True, | |||
'nullable': False, | |||
'minlength': 1, | |||
'maxlength': 3, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'code': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'dependencies': 'categories', | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
'categories': { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': 'code', | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
'id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,255}$'}, | |||
'config': { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': 'id', | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
# 在线参数校验 | |||
ONLINE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 在线停止参数校验 | |||
ONLINE_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} | |||
# 离线开始参数校验 | |||
OFFLINE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'original_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'original_type': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 离线停止参数校验 | |||
OFFLINE_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} | |||
# 图片开始参数校验 | |||
IMAGE_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'image_urls': { | |||
'type': 'list', | |||
'required': True, | |||
'minlength': 1, | |||
'schema': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
'results_base_dir': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'models': model | |||
} | |||
# 录屏参数校验 | |||
RECORDING_START_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
}, | |||
'pull_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
}, | |||
'push_url': { | |||
'type': 'string', | |||
'required': True, | |||
'nullable': False, | |||
'empty': False, | |||
'maxlength': 255 | |||
} | |||
} | |||
# 录屏停止参数校验 | |||
RECORDING_STOP_SCHEMA = { | |||
'request_id': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
'command': { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'nullable': False, | |||
'allowed': ['start', 'stop'] | |||
} | |||
} |
@@ -1,404 +1,10 @@ | |||
# -*- coding: utf-8 -*- | |||
BASE_DIR = 'base_dir' | |||
GPU_CODES = ['3090', '2080', '4090', 'A10'] | |||
GPU_NAME = 'gpu_name' | |||
GPU_2080 = '2080' | |||
GPU_2080_Ti = '2080Ti' | |||
KAFKA_ACKS = "acks" | |||
KAFKA_RETRIES = "retries" | |||
KAFKA_LINGER_MS = "linger_ms" | |||
KAFKA_RETRY_BACKOFF_MS = "retry_backoff_ms" | |||
KAFKA_MAX_IN_FLIGHT_REQUESTS = "max_in_flight_requests_per_connection" | |||
KAFKA_CLIENT_ID = "client_id" | |||
KAFKA_GROUP_ID = "group_id" | |||
KAFKA_AUTO_OFFSET_RESET = "auto_offset_reset" | |||
KAFKA_ENABLE_AUTO_COMMIT = "enable_auto_commit" | |||
KAFKA_MAX_POLL_RECORDS = "max_poll_records" | |||
REQUEST_IDREQUEST_ID = "request_id" | |||
FEEDBACK = "feedback" | |||
RECORDING = "recording" | |||
FBQUEUE = "fbQueue" | |||
CONTEXT = "context" | |||
MODEL = 'model' | |||
MSG = "msg" | |||
GPU_IDS = "gpu_ids" | |||
ANALYSE_TYPE = "analyse_type" | |||
COMMAND = "command" | |||
START = "start" | |||
STOP = "stop" | |||
SERVICE = "service" | |||
FRAME_SCORE = "frame_score" | |||
PULL_URL = "pull_url" | |||
PUSH_URL = "push_url" | |||
ORIGINAL_URL = "original_url" | |||
ORIGINAL_TYPE = "original_type" | |||
IMAGE_URLS = "image_urls" | |||
RESULTS_BASE_DIR = "results_base_dir" | |||
MODELS = "models" | |||
CODE = 'code' | |||
CATEGORIES = "categories" | |||
ID = 'id' | |||
CONFIG = "config" | |||
VIDEO = "video" | |||
FILE_PATH = "file_path" | |||
KAFKA = "kafka" | |||
TOPIC = "topic" | |||
DSP_ALG_ONLINE_TASKS_TOPIC = "dsp-alg-online-tasks-topic" | |||
DSP_ALG_OFFLINE_TASKS_TOPIC = "dsp-alg-offline-tasks-topic" | |||
DSP_ALG_IMAGE_TASKS_TOPI = "dsp-alg-image-tasks-topic" | |||
DSP_RECORDING_TASKS_TOPI = "dsp-recording-task-topic" | |||
DSP_ALG_RESULTS_TOPIC = "dsp-alg-results-topic" | |||
DSP_RECORDING_RESULTS_TOPIC = "dsp-recording-result-topic" | |||
DSP = "dsp" | |||
ACTIVE = "active" | |||
PRODUCER = "producer" | |||
CONSUMER = "consumer" | |||
BOOTSTRAP_SERVERS = "bootstrap_servers" | |||
ALIYUN = "aliyun" | |||
ACCESS_KEY = "access_key" | |||
ACCESS_SECRET = "access_secret" | |||
OSS = "oss" | |||
ENDPOINT = "endpoint" | |||
BUCKET = "bucket" | |||
CONNECT_TIMEOUT = "connect_timeout" | |||
VOD = "vod" | |||
ECSREGIONID = "ecsRegionId" | |||
CATEID = "CateId" | |||
GPU = "gpu" | |||
ORDER = "order" | |||
LIMIT = "limit" | |||
MAXLOAD = "maxLoad" | |||
MAXMEMORY = "maxMemory" | |||
INCLUDENAN = "includeNan" | |||
EXCLUDEID = "excludeID" | |||
EXCLUDEUUID = "excludeUUID" | |||
BAIDU = "baidu" | |||
VEHICLE = "vehicle" | |||
APP_ID = "APP_ID" | |||
API_KEY = "API_KEY" | |||
SECRET_KEY = "SECRET_KEY" | |||
PERSON = "person" | |||
ORC = "orc" | |||
LOG = 'log' | |||
IS_VIDEO = "is_video" | |||
IS_IMAGE = "is_image" | |||
# 校验schema规则定义 | |||
SCHEMA = { | |||
"request_id": { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{1,36}$' | |||
}, | |||
COMMAND: { | |||
'type': 'string', | |||
'required': True, | |||
'allowed': [START, STOP] | |||
}, | |||
PULL_URL: { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
PUSH_URL: { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
ORIGINAL_URL: { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
ORIGINAL_TYPE: { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
"logo_url": { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'maxlength': 255 | |||
}, | |||
IMAGE_URLS: { | |||
'type': 'list', | |||
'required': False, | |||
'schema': { | |||
'type': 'string', | |||
'empty': False, | |||
'maxlength': 5000 | |||
} | |||
}, | |||
RESULTS_BASE_DIR: { | |||
'type': 'string', | |||
'required': False, | |||
'nullable': True, | |||
'regex': r'^[a-zA-Z0-9]{0,36}$' | |||
}, | |||
MODELS: { | |||
'type': 'list', | |||
'required': False, | |||
'nullable': True, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
CODE: { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': CATEGORIES, | |||
'regex': r'^[a-zA-Z0-9]{1,255}$' | |||
}, | |||
IS_VIDEO: { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': CODE, | |||
'allowed': ["0", "1"] | |||
}, | |||
IS_IMAGE: { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'dependencies': CODE, | |||
'allowed': ["0", "1"] | |||
}, | |||
CATEGORIES: { | |||
'type': 'list', | |||
'required': True, | |||
'dependencies': CODE, | |||
'schema': { | |||
'type': 'dict', | |||
'required': True, | |||
'schema': { | |||
ID: { | |||
'type': 'string', | |||
'required': True, | |||
'empty': False, | |||
'regex': r'^[a-zA-Z0-9]{0,255}$'}, | |||
CONFIG: { | |||
'type': 'dict', | |||
'required': False, | |||
'dependencies': ID, | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
} | |||
def get_file_path(context): | |||
return context[VIDEO][FILE_PATH] | |||
def get_video_add_water(context): | |||
return context[VIDEO]["video_add_water"] | |||
def get_online_tasks_topic(context): | |||
return context["kafka"]["topic"]["dsp-alg-online-tasks-topic"] | |||
def get_offline_tasks_topic(context): | |||
return context[KAFKA][TOPIC][DSP_ALG_OFFLINE_TASKS_TOPIC] | |||
def get_image_tasks_topic(context): | |||
return context[KAFKA][TOPIC][DSP_ALG_IMAGE_TASKS_TOPI] | |||
def get_recording_tasks_topic(context): | |||
return context[KAFKA][TOPIC][DSP_RECORDING_TASKS_TOPI] | |||
def get_kafka_producer_config(context): | |||
return context[KAFKA][context[DSP][ACTIVE]][PRODUCER] | |||
def get_kafka_consumer_config(context): | |||
return context[KAFKA][context[DSP][ACTIVE]][CONSUMER] | |||
def get_kafka_bootstrap_servers(context): | |||
return context[KAFKA][context[DSP][ACTIVE]][BOOTSTRAP_SERVERS] | |||
def get_kafka_results_topic(context): | |||
return context[KAFKA][TOPIC][DSP_ALG_RESULTS_TOPIC] | |||
def get_kafka_recording_result_topic(context): | |||
return context[KAFKA][TOPIC][DSP_RECORDING_RESULTS_TOPIC] | |||
def get_aliyun_access_key(context): | |||
return context[ALIYUN][ACCESS_KEY] | |||
def get_aliyun_access_secret(context): | |||
return context[ALIYUN][ACCESS_SECRET] | |||
def get_aliyun_oss_endpoint(context): | |||
return context[ALIYUN][OSS][ENDPOINT] | |||
def get_aliyun_oss_bucket(context): | |||
return context[ALIYUN][OSS][BUCKET] | |||
def get_aliyun_oss_connect_timeout(context): | |||
return context[ALIYUN][OSS][CONNECT_TIMEOUT] | |||
def get_aliyun_vod_ecsRegionId(context): | |||
return context[ALIYUN][VOD][ECSREGIONID] | |||
def get_aliyun_vod_cateId(context): | |||
return context[ALIYUN][VOD][context[DSP][ACTIVE]][CATEID] | |||
def get_gpu_order(context): | |||
return context[GPU][ORDER] | |||
def get_gpu_limit(context): | |||
return context[GPU][LIMIT] | |||
def get_gpu_maxLoad(context): | |||
return context[GPU][MAXLOAD] | |||
def get_gpu_maxMemory(context): | |||
return context[GPU][MAXMEMORY] | |||
def get_gpu_includeNan(context): | |||
return context[GPU][INCLUDENAN] | |||
def get_gpu_excludeID(context): | |||
return context[GPU][EXCLUDEID] | |||
def get_gpu_excludeUUID(context): | |||
return context[GPU][EXCLUDEUUID] | |||
def get_baidu_vehicle_APP_ID(context): | |||
return context[BAIDU][VEHICLE][APP_ID] | |||
def get_baidu_vehicle_API_KEY(context): | |||
return context[BAIDU][VEHICLE][API_KEY] | |||
def get_baidu_vehicle_SECRET_KEY(context): | |||
return context[BAIDU][VEHICLE][SECRET_KEY] | |||
def get_baidu_person_APP_ID(context): | |||
return context[BAIDU][PERSON][APP_ID] | |||
def get_baidu_person_API_KEY(context): | |||
return context[BAIDU][PERSON][API_KEY] | |||
def get_baidu_person_SECRET_KEY(context): | |||
return context[BAIDU][PERSON][SECRET_KEY] | |||
def get_baidu_ocr_APP_ID(context): | |||
return context[BAIDU][ORC][APP_ID] | |||
def get_baidu_ocr_API_KEY(context): | |||
return context[BAIDU][ORC][API_KEY] | |||
def get_baidu_ocr_SECRET_KEY(context): | |||
return context[BAIDU][ORC][SECRET_KEY] | |||
def get_log_base_path(context): | |||
return context[LOG]["base_path"] | |||
def get_log_enable_file(context): | |||
return context[LOG]["enable_file_log"] | |||
def get_log_log_name(context): | |||
return context[LOG]["log_name"] | |||
def get_log_rotation(context): | |||
return context[LOG]["rotation"] | |||
def get_log_retention(context): | |||
return context[LOG]["retention"] | |||
def get_log_log_fmt(context): | |||
return context[LOG]["log_fmt"] | |||
def get_log_level(context): | |||
return context[LOG]["level"] | |||
def get_log_enqueue(context): | |||
return context[LOG]["enqueue"] | |||
def get_log_encoding(context): | |||
return context[LOG]["encoding"] | |||
def get_log_enable_stderr(context): | |||
return context[LOG]["enable_stderr"] | |||
CV2_PULL_STREAM_TIMEOUT = "cv2_pull_stream_timeout" | |||
CV2_READ_STREAM_TIMEOUT = "cv2_read_stream_timeout" | |||
def get_pull_stream_timeout(context): | |||
return int(context[SERVICE][CV2_PULL_STREAM_TIMEOUT]) | |||
def get_read_stream_timeout(context): | |||
return int(context[SERVICE][CV2_READ_STREAM_TIMEOUT]) | |||
def get_service_timeout(context): | |||
return int(context[SERVICE]["timeout"]) | |||
FILTER = "filter" | |||
def get_similarity(context): | |||
return context[SERVICE][FILTER]["similarity"] | |||
def get_picture_similarity(context): | |||
return context[SERVICE][FILTER]["picture_similarity"] | |||
def get_frame_step(context): | |||
return int(context[SERVICE][FILTER]["frame_step"]) | |||
# 服务配置路径 | |||
service_yml_path = 'config/service/dsp_%s_service.yml' | |||
# kafka配置路径 | |||
kafka_yml_path = 'config/kafka/dsp_%s_kafka.yml' | |||
# 阿里云配置路径 | |||
aliyun_yml_path = "config/aliyun/dsp_%s_aliyun.yml" | |||
# 百度配置路径 | |||
baidu_yml_path = 'config/baidu/dsp_%s_baidu.yml' |
@@ -4,24 +4,17 @@ from loguru import logger | |||
class Common(Thread): | |||
__slots__ = [ | |||
'__context', | |||
'__func', | |||
'__param1', | |||
'__param2', | |||
'__result', | |||
] | |||
__slots__ = ('__func', '__param1', '__param2', '__result') | |||
def __init__(self, context, func, param1, param2): | |||
def __init__(self, func, param1, param2): | |||
super(Common, self).__init__() | |||
self.__context = context | |||
self.__func = func | |||
self.__param1 = param1 | |||
self.__param2 = param2 | |||
self.__result = None | |||
def get_result(self): | |||
self.join(60 * 60 * 12) | |||
self.join() | |||
return self.__result | |||
def run(self): |
@@ -13,15 +13,12 @@ from util.KafkaUtils import CustomerKafkaProducer | |||
class FeedbackThread(Thread): | |||
__slots__ = [ | |||
'__fbQueue', | |||
'__context' | |||
] | |||
__slots__ = ('__fbQueue', '__kafka_config') | |||
def __init__(self, fbQueue, context): | |||
def __init__(self, fbQueue, kafka_config): | |||
super().__init__() | |||
self.__fbQueue = fbQueue | |||
self.__context = context | |||
self.__kafka_config = kafka_config | |||
''' | |||
阻塞获取反馈消息 | |||
@@ -32,24 +29,30 @@ class FeedbackThread(Thread): | |||
def run(self): | |||
logger.info("启动问题反馈线程") | |||
kafkaProducer = CustomerKafkaProducer(self.__context) | |||
dsp_alg_results_topic = self.__context["kafka"]["topic"]["dsp-alg-results-topic"] | |||
dsp_recording_result_topic = self.__context["kafka"]["topic"]["dsp-recording-result-topic"] | |||
kafkaProducer = CustomerKafkaProducer(self.__kafka_config) | |||
dsp_alg_results_topic = self.__kafka_config["topic"]["dsp-alg-results-topic"] | |||
dsp_recording_result_topic = self.__kafka_config["topic"]["dsp-recording-result-topic"] | |||
dsp_push_stream_result_topic = self.__kafka_config["topic"]["dsp-push-stream-result-topic"] | |||
while True: | |||
logger.info("问题反馈发送消息循环") | |||
feedback = None | |||
recording = None | |||
pull_stream = None | |||
try: | |||
fb = self.getFeedback() | |||
if fb is not None and len(fb) > 0: | |||
feedback = fb.get("feedback") | |||
recording = fb.get("recording") | |||
pull_stream = fb.get("pull_stream") | |||
if feedback is not None and len(feedback) > 0: | |||
kafkaProducer.sender(dsp_alg_results_topic, feedback["request_id"], feedback, 1) | |||
feedback = None | |||
if recording is not None and len(recording) > 0: | |||
kafkaProducer.sender(dsp_recording_result_topic, recording["request_id"], recording, 1) | |||
recording = None | |||
if pull_stream is not None and len(pull_stream) > 0: | |||
kafkaProducer.sender(dsp_push_stream_result_topic, pull_stream["request_id"], pull_stream, 1) | |||
pull_stream = None | |||
else: | |||
time.sleep(1) | |||
except Exception: | |||
@@ -57,6 +60,8 @@ class FeedbackThread(Thread): | |||
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), feedback.get("request_id")) | |||
elif recording and recording.get("request_id"): | |||
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), recording.get("request_id")) | |||
elif pull_stream and pull_stream.get("request_id"): | |||
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), pull_stream.get("request_id")) | |||
else: | |||
logger.error("问题反馈异常:{}", format_exc()) | |||
logger.info("问题反馈线程执行完成") |
@@ -1,202 +1,276 @@ | |||
from concurrent.futures import ThreadPoolExecutor, as_completed | |||
# -*- coding: utf-8 -*- | |||
from concurrent.futures import ThreadPoolExecutor | |||
from threading import Thread | |||
from time import sleep | |||
from time import sleep, time | |||
from traceback import format_exc | |||
from loguru import logger | |||
import cv2 | |||
from entity.FeedBack import message_feedback | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util.AliyunSdk import AliyunOssSdk | |||
from util import TimeUtils, ImageUtils | |||
from entity import FeedBack | |||
from util import TimeUtils | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from util.PlotsUtils import draw_painting_joint | |||
from util.QueUtil import put_queue, get_no_block_queue, clear_queue | |||
class FileUpload(Thread): | |||
__slots__ = ('_fbQueue', '_context', '_imageQueue', '_analyse_type', '_msg', '_base_dir') | |||
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg') | |||
def __init__(self, fbQueue, context, msg, imageQueue, analyse_type, base_dir): | |||
def __init__(self, *args): | |||
super().__init__() | |||
self._fbQueue = fbQueue | |||
self._context = context | |||
self._imageQueue = imageQueue | |||
self._analyse_type = analyse_type | |||
self._msg = msg | |||
self._base_dir = base_dir | |||
''' | |||
图片上传线程 | |||
''' | |||
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args | |||
class ImageFileUpload(FileUpload): | |||
__slots__ = () | |||
@staticmethod | |||
def handle_image(frame_msg, frame_step): | |||
# (high_score_image["code"], all_frames, draw_config["font_config"]) | |||
# high_score_image["code"][code][cls] = (frame, frame_index_list[i], cls_list) | |||
det_xywh, frame, current_frame, all_frames, font_config = frame_msg | |||
''' | |||
det_xywh:{ | |||
'code':{ | |||
1: [[detect_targets_code, box, score, label_array, color]] | |||
} | |||
} | |||
模型编号:modeCode | |||
检测目标:detectTargetCode | |||
''' | |||
model_info = [] | |||
# 更加模型编码解析数据 | |||
for code, det_list in det_xywh.items(): | |||
if len(det_list) > 0: | |||
for cls, target_list in det_list.items(): | |||
if len(target_list) > 0: | |||
aFrame = frame.copy() | |||
for target in target_list: | |||
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config) | |||
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame}) | |||
if len(model_info) > 0: | |||
image_result = { | |||
"or_frame": frame, | |||
"model_info": model_info, | |||
"current_frame": current_frame, | |||
"last_frame": current_frame + frame_step | |||
} | |||
return image_result | |||
return None | |||
def run(self): | |||
requestId = self._msg.get("request_id") | |||
logger.info("启动图片上传线程, requestId:{}", requestId) | |||
# 初始化oss客户端 | |||
aliyunOssSdk = AliyunOssSdk(self._base_dir, requestId) | |||
aliyunOssSdk.get_oss_bucket() | |||
high_score_image = {} | |||
similarity = self._context["service"]["filter"]["similarity"] | |||
picture_similarity = bool(self._context["service"]["filter"]["picture_similarity"]) | |||
frame_step = int(self._context["service"]["filter"]["frame_step"]) | |||
image_queue = self._imageQueue | |||
analyse_type = self._analyse_type | |||
results_base_dir = self._msg.get("results_base_dir") | |||
fb_queue = self._fbQueue | |||
with ThreadPoolExecutor(max_workers=5) as t: | |||
try: | |||
msg, context = self._msg, self._context | |||
service = context["service"] | |||
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"] | |||
logger.info("启动图片上传线程, requestId: {}", request_id) | |||
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type | |||
service_timeout = int(service["timeout"]) | |||
frame_step = int(service["filter"]["frame_step"]) + 120 | |||
try: | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
# 初始化oss客户端 | |||
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id) | |||
start_time = time() | |||
while True: | |||
try: | |||
if time() - start_time > service_timeout: | |||
logger.error("图片上线线程运行超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
# 获取队列中的消息 | |||
image_msg = image_queue.get(timeout=43200) | |||
if image_msg is not None and len(image_msg) > 0: | |||
image_dict = image_msg.get("image") | |||
command = image_msg.get("command") | |||
if command == "stop": | |||
break | |||
if image_dict is not None and len(image_dict) > 0: | |||
image_result = handle_image(high_score_image, image_dict, picture_similarity, | |||
similarity, frame_step, analyse_type) | |||
del image_dict | |||
if image_result: | |||
# 图片帧数编码 | |||
image_msg = get_no_block_queue(image_queue) | |||
if image_msg is not None: | |||
if image_msg[0] == 2: | |||
if 'stop' == image_msg[1]: | |||
logger.info("开始停止图片上传线程, requestId:{}", request_id) | |||
break | |||
if image_msg[0] == 1: | |||
image_result = self.handle_image(image_msg[1], frame_step) | |||
if image_result is not None: | |||
task = [] | |||
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame")) | |||
or_image_name = build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"OR", "O", results_base_dir, requestId) | |||
or_future = t.submit(aliyunOssSdk.sync_upload_file, or_image_name, | |||
or_image.tobytes()) | |||
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1] | |||
or_image_name = build_image_name(image_result["current_frame"], | |||
image_result["last_frame"], | |||
analyse_type, | |||
"OR", "0", "0", request_id) | |||
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes()) | |||
task.append(or_future) | |||
model_info_list = image_result.get("model_info") | |||
model_info_list = image_result["model_info"] | |||
msg_list = [] | |||
for model_info in model_info_list: | |||
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame")) | |||
ai_image_name = build_image_name(str(image_result.get("current_frame")), | |||
str(image_result.get("last_frame")), | |||
image_result.get("mode_service"), | |||
"AI", model_info.get("detectTargetCode"), | |||
results_base_dir, requestId) | |||
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name, | |||
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1] | |||
ai_image_name = build_image_name(image_result["current_frame"], | |||
image_result["last_frame"], | |||
analyse_type, | |||
"AI", | |||
model_info["modelCode"], | |||
model_info["detectTargetCode"], | |||
request_id) | |||
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, | |||
ai_image.tobytes()) | |||
task.append(ai_future) | |||
msg_list.append( | |||
{"feedback": FeedBack.message_feedback(requestId, | |||
AnalysisStatus.RUNNING.value, | |||
analyse_type, "", "", | |||
image_result.get("progress"), | |||
or_image_name, | |||
ai_image_name, | |||
model_info.get('modelCode'), | |||
model_info.get('detectTargetCode'), | |||
TimeUtils.now_date_to_str())}) | |||
for thread_result in as_completed(task): | |||
thread_result.result() | |||
msg_list.append(message_feedback(request_id, | |||
AnalysisStatus.RUNNING.value, | |||
analyse_type, "", "", "", | |||
or_image_name, | |||
ai_image_name, | |||
model_info['modelCode'], | |||
model_info['detectTargetCode'])) | |||
for tk in task: | |||
tk.result() | |||
for msg in msg_list: | |||
sendResult(fb_queue, msg, requestId) | |||
put_queue(fb_queue, msg, timeout=2, is_ex=False) | |||
del task, msg_list | |||
else: | |||
sleep(1) | |||
del image_msg | |||
except Exception: | |||
logger.error("图片上传异常:{}, requestId:{}", format_exc(), requestId) | |||
finally: | |||
high_score_image.clear() | |||
logger.info("停止图片上传线程, requestId:{}", requestId) | |||
def sendResult(fbQueue, result, requestId): | |||
try: | |||
fbQueue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加反馈到队列超时异常:{}, requestId:{}", format_exc(), requestId) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id) | |||
finally: | |||
logger.info("停止图片上传线程0, requestId:{}", request_id) | |||
clear_queue(image_queue) | |||
logger.info("停止图片上传线程1, requestId:{}", request_id) | |||
def build_image_name(current_frame, last_frame, mode_type, image_type, target, results_base_dir, requestId): | |||
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \ | |||
"-{target}-{requestId}_{image_type}.jpg" | |||
def build_image_name(*args): | |||
""" | |||
{requestId}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \ | |||
"-{modeCode}-{target}_{image_type}.jpg | |||
""" | |||
current_frame, last_frame, mode_type, image_type, modeCode, target, request_id = args | |||
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF) | |||
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S") | |||
image_name = image_format.format( | |||
base_dir=results_base_dir, | |||
time_now=time_now, | |||
current_frame=current_frame, | |||
last_frame=last_frame, | |||
random_num=random_num, | |||
mode_type=mode_type, | |||
target=target, | |||
requestId=requestId, | |||
image_type=image_type) | |||
return image_name | |||
def handle_image(high_score_image, frame_all, picture_similarity, similarity, frame_step, analyse_type): | |||
flag = True | |||
if picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame_all[0][1]) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity_1 = 1 - dist * 1.0 / 64 | |||
if similarity_1 >= similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_all[0][2] - high_score_image.get("current_frame") | |||
if diff_frame_num < frame_step: | |||
flag = False | |||
# if diff_frame_num >= frame_step: | |||
# hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
# hash2 = ImageUtils.dHash(frame_all[0][1]) | |||
# dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
# similarity_1 = 1 - dist * 1.0 / 64 | |||
# if similarity_1 != 1: | |||
# flag = True | |||
det_result = frame_all[1] | |||
model_info = [] | |||
if flag and det_result is not None and len(det_result) > 0: | |||
''' | |||
det_xywh:{ | |||
'code':{ | |||
1: [[detect_targets_code, box, score, label_array, color]] | |||
return "%s/%s_frame-%s-%s_type_%s-%s-%s-%s_%s.jpg" % (request_id, time_now, current_frame, last_frame, | |||
random_num, mode_type, modeCode, target, image_type) | |||
class ImageTypeImageFileUpload(Thread): | |||
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg') | |||
def __init__(self, *args): | |||
super().__init__() | |||
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args | |||
@staticmethod | |||
def handle_image(det_xywh, copy_frame, font_config): | |||
""" | |||
det_xywh:{ | |||
'code':{ | |||
1: [[detect_targets_code, box, score, label_array, color]] | |||
} | |||
} | |||
} | |||
模型编号:modeCode | |||
检测目标:detectTargetCode | |||
''' | |||
模型编号:modeCode | |||
检测目标:detectTargetCode | |||
""" | |||
model_info = [] | |||
# 更加模型编码解析数据 | |||
for modelCode in list(det_result.keys()): | |||
# 模型编号下面的检测目标对象 | |||
det_info = det_result.get(modelCode) | |||
for code, det_info in det_xywh.items(): | |||
if det_info is not None and len(det_info) > 0: | |||
for detectTargetCode in list(det_info.keys()): | |||
target_list = det_info.get(detectTargetCode) | |||
for cls, target_list in det_info.items(): | |||
if target_list is not None and len(target_list) > 0: | |||
# orFrame = loads(dumps(frame_all.get("frame"))) | |||
orFrame = frame_all[0][1].copy() | |||
aiFrame = copy_frame.copy() | |||
for target in target_list: | |||
draw_painting_joint(target[1], orFrame, target[3], target[2], target[4], "leftTop") | |||
draw_painting_joint(target[1], aiFrame, target[3], target[2], target[4], font_config) | |||
model_info.append({ | |||
"modelCode": str(modelCode), | |||
"detectTargetCode": str(detectTargetCode), | |||
"frame": orFrame | |||
"modelCode": str(code), | |||
"detectTargetCode": str(cls), | |||
"frame": aiFrame | |||
}) | |||
if len(model_info) > 0: | |||
high_score_image["or_frame"] = frame_all[0][1] | |||
high_score_image["current_frame"] = frame_all[0][2] | |||
image_result = { | |||
"or_frame": frame_all[0][1], | |||
"or_frame": copy_frame, | |||
"model_info": model_info, | |||
"current_frame": frame_all[0][2], | |||
"last_frame": frame_all[0][2] + frame_step, | |||
"progress": "", | |||
"mode_service": analyse_type, | |||
"current_frame": 0, | |||
"last_frame": 0 | |||
} | |||
return image_result | |||
return None | |||
return None | |||
def run(self): | |||
context, msg = self._context, self._msg | |||
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"] | |||
logger.info("启动图片识别图片上传线程, requestId: {}", request_id) | |||
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type | |||
service_timeout = int(context["service"]["timeout"]) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
try: | |||
# 初始化oss客户端 | |||
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id) | |||
start_time = time() | |||
while True: | |||
try: | |||
if time() - start_time > service_timeout: | |||
logger.error("拉流进程运行超时, requestId: {}", request_id) | |||
break | |||
# 获取队列中的消息 | |||
image_msg = image_queue.get() | |||
if image_msg is not None: | |||
if image_msg[0] == 2: | |||
if 'stop' == image_msg[1]: | |||
logger.info("开始停止图片上传线程, requestId:{}", request_id) | |||
break | |||
if image_msg[0] == 1: | |||
task, msg_list = [], [] | |||
det_xywh, image_url, copy_frame, font_config, result = image_msg[1] | |||
if det_xywh is None: | |||
ai_image_name = build_image_name(0, 0, analyse_type, "AI", result.get("modelCode"), | |||
result.get("type"), request_id) | |||
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, copy_frame) | |||
task.append(ai_future) | |||
msg_list.append(message_feedback(request_id, | |||
AnalysisStatus.RUNNING.value, | |||
analyse_type, "", "", "", | |||
image_url, | |||
ai_image_name, | |||
result.get("modelCode"), | |||
result.get("type"), | |||
analyse_results=result)) | |||
else: | |||
image_result = self.handle_image(det_xywh, copy_frame, font_config) | |||
if image_result: | |||
# 图片帧数编码 | |||
if image_url is None: | |||
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame")) | |||
image_url = build_image_name(image_result.get("current_frame"), | |||
image_result.get("last_frame"), | |||
analyse_type, | |||
"OR", "0", "O", request_id) | |||
or_future = t.submit(aliyunOssSdk.put_object, image_url, | |||
or_image.tobytes()) | |||
task.append(or_future) | |||
model_info_list = image_result.get("model_info") | |||
for model_info in model_info_list: | |||
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame")) | |||
ai_image_name = build_image_name(image_result.get("current_frame"), | |||
image_result.get("last_frame"), | |||
analyse_type, | |||
"AI", | |||
model_info.get("modelCode"), | |||
model_info.get("detectTargetCode"), | |||
request_id) | |||
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, | |||
ai_image.tobytes()) | |||
task.append(ai_future) | |||
msg_list.append(message_feedback(request_id, | |||
AnalysisStatus.RUNNING.value, | |||
analyse_type, "", "", "", | |||
image_url, | |||
ai_image_name, | |||
model_info.get('modelCode'), | |||
model_info.get('detectTargetCode'), | |||
analyse_results=result)) | |||
for thread_result in task: | |||
thread_result.result() | |||
for msg in msg_list: | |||
put_queue(fb_queue, msg, timeout=2, is_ex=False) | |||
else: | |||
sleep(1) | |||
except Exception as e: | |||
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id) | |||
finally: | |||
clear_queue(image_queue) | |||
logger.info("停止图片识别图片上传线程, requestId:{}", request_id) |
@@ -1,84 +1,57 @@ | |||
# -*- coding: utf-8 -*- | |||
from threading import Thread | |||
import time | |||
from time import sleep, time | |||
from traceback import format_exc | |||
from loguru import logger | |||
from common.Constant import init_progess | |||
from common.YmlConstant import FEEDBACK | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import TimeUtils | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from entity.FeedBack import message_feedback | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue | |||
class Heartbeat(Thread): | |||
__slots__ = ('__fbQueue', '__hbQueue', '__request_id', '__analyse_type', '__progress') | |||
__slots__ = ('__fb_queue', '__hb_queue', '__request_id', '__analyse_type', "_context") | |||
def __init__(self, fbQueue, hbQueue, request_id, analyse_type): | |||
def __init__(self, *args): | |||
super().__init__() | |||
self.__fbQueue = fbQueue | |||
self.__hbQueue = hbQueue | |||
self.__request_id = request_id | |||
self.__analyse_type = analyse_type | |||
self.__progress = init_progess | |||
def getHbQueue(self): | |||
eBody = None | |||
try: | |||
eBody = self.__hbQueue.get(block=False) | |||
except Exception as e: | |||
pass | |||
return eBody | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
try: | |||
self.__fbQueue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加反馈到队列超时异常:{}, requestId:{}", format_exc(), self.__request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def sendHbQueue(self, result): | |||
try: | |||
self.__hbQueue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加心跳到队列超时异常:{}, requestId:{}", format_exc(), self.__request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def sendhbMessage(self, analysisStatus): | |||
self.sendResult({FEEDBACK: message_feedback(self.__request_id, | |||
analysisStatus, | |||
self.__analyse_type, | |||
progress=self.__progress, | |||
analyse_time=TimeUtils.now_date_to_str())}) | |||
self.__fb_queue, self.__hb_queue, self.__request_id, self.__analyse_type, self._context = args | |||
def run(self): | |||
request_id, hb_queue, progress = self.__request_id, self.__hb_queue, init_progess | |||
analyse_type, fb_queue = self.__analyse_type, self.__fb_queue | |||
service_timeout = int(self._context["service"]["timeout"]) + 120 | |||
try: | |||
logger.info("开始启动心跳线程!requestId:{}", self.__request_id) | |||
logger.info("开始启动心跳线程!requestId:{}", request_id) | |||
start_time = time() | |||
hb_init_num = 0 | |||
start_time = time.time() | |||
while True: | |||
if time.time() - start_time > 43200: | |||
logger.info("心跳线程运行超时!!!!requestId:{}", self.__request_id) | |||
break | |||
time.sleep(3) | |||
hb_msg = self.getHbQueue() | |||
if hb_msg is not None and len(hb_msg) > 0: | |||
if time() - start_time > service_timeout: | |||
logger.error("心跳运行超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
sleep(3) | |||
hb_msg = get_no_block_queue(hb_queue) | |||
if hb_msg is not None: | |||
command = hb_msg.get("command") | |||
hb_value = hb_msg.get("hb_value") | |||
if 'stop' == command: | |||
logger.info("开始终止心跳线程, requestId:{}", self.__request_id) | |||
logger.info("开始终止心跳线程, requestId:{}", request_id) | |||
break | |||
if hb_value is not None: | |||
self.__progress = hb_value | |||
progress = hb_value | |||
if hb_init_num % 30 == 0: | |||
self.sendhbMessage(AnalysisStatus.RUNNING.value) | |||
hb_init_num = 0 | |||
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.RUNNING.value, analyse_type, | |||
progress=progress), timeout=3, is_ex=True) | |||
hb_init_num += 3 | |||
del hb_msg | |||
except Exception: | |||
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), self.__request_id) | |||
logger.info("心跳线程停止完成!requestId:{}", self.__request_id) | |||
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id) | |||
finally: | |||
clear_queue(hb_queue) | |||
logger.info("心跳线程停止完成!requestId:{}", request_id) |
@@ -0,0 +1,153 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
from traceback import format_exc | |||
from multiprocessing import Process, Queue | |||
from loguru import logger | |||
from concurrency.Pull2PushStreamThread import PushSteamThread | |||
from enums.StatusEnum import PushStreamStatus, ExecuteStatus | |||
from util.LogUtils import init_log | |||
from enums.ExceptionEnum import ExceptionType | |||
from entity.FeedBack import pull_stream_feedback | |||
from exception.CustomerException import ServiceException | |||
from util.QueUtil import get_no_block_queue, put_queue | |||
class PushStreamProcess(Process): | |||
__slots__ = ('_fb_queue', 'event_queue', '_context', '_msg', '_analysisType') | |||
def __init__(self, *args): | |||
super().__init__() | |||
self._fb_queue, self._context, self._msg, self._analysisType = args | |||
self.event_queue = Queue() | |||
def sendEvent(self, eBody): | |||
try: | |||
self.event_queue.put(eBody, timeout=2) | |||
except Exception: | |||
logger.error("添加事件到队列超时异常:{}, requestId:{}", format_exc(), self._msg["request_id"]) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def run(self): | |||
msg, context = self._msg, self._context | |||
requestId, videoUrls = msg["request_id"], msg["video_urls"] | |||
base_dir, env = context['base_dir'], context['env'] | |||
fb_queue = self._fb_queue | |||
task, videoStatus = {}, {} | |||
ex = None | |||
try: | |||
init_log(base_dir, env) | |||
if videoUrls is None or len(videoUrls) == 0: | |||
raise ServiceException(ExceptionType.PUSH_STREAM_URL_IS_NULL.value[0], | |||
ExceptionType.PUSH_STREAM_URL_IS_NULL.value[1]) | |||
if len(videoUrls) > 5: | |||
logger.error("推流数量超过限制, 当前推流数量: {}, requestId:{}", len(videoUrls), requestId) | |||
raise ServiceException(ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[0], | |||
ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[1]) | |||
videoInfo = [{"id": url["id"], "status": PushStreamStatus.WAITING.value[0]} for url in videoUrls if | |||
url.get("id")] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.WAITING.value[0], "", "", videoInfo)) | |||
for videoUrl in videoUrls: | |||
pushThread = PushSteamThread(videoUrl["pull_url"], videoUrl["push_url"], requestId, videoUrl["id"]) | |||
pushThread.start() | |||
task[videoUrl["id"]] = pushThread | |||
enable_time = time.time() | |||
for video in videoInfo: | |||
videoStatus[video.get("id")] = video.get("status") | |||
count = 0 | |||
while True: | |||
# 整个推流任务超时时间 | |||
if time.time() - enable_time > 43200: | |||
logger.error("任务执行超时, requestId:{}", requestId) | |||
for t in list(task.keys()): | |||
if task[t].is_alive(): | |||
task[t].status = False | |||
task[t].pushStreamUtil.close_push_stream_sp() | |||
task[t].join(120) | |||
videoStatus[t] = PushStreamStatus.TIMEOUT.value[0] | |||
videoInfo_timeout = [{"id": k, "status": v} for k, v in videoStatus.items()] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1], | |||
videoInfo_timeout)) | |||
break | |||
# 接受停止指令 | |||
event_result = get_no_block_queue(self.event_queue) | |||
if event_result is not None: | |||
command = event_result.get("command") | |||
videoIds = event_result.get("videoIds") | |||
if "stop" == command: | |||
# 如果videoIds是空停止所有任务 | |||
if videoIds is None or len(videoIds) == 0: | |||
logger.info("停止所有执行的推流任务, requestId:{}", requestId) | |||
for t in list(task.keys()): | |||
if task[t].is_alive(): | |||
task[t].status = False | |||
task[t].pushStreamUtil.close_push_stream_sp() | |||
task[t].join(120) | |||
videoStatus[t] = PushStreamStatus.SUCCESS.value[0] | |||
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "", "", | |||
videoInfo_sucess)) | |||
break | |||
else: | |||
logger.info("停止指定的推流任务, requestId:{}", requestId) | |||
alive_thread = 0 | |||
for t in list(task.keys()): | |||
if task[t].is_alive(): | |||
if t in videoIds: | |||
task[t].status = False | |||
task[t].pushStreamUtil.close_push_stream_sp() | |||
task[t].join(120) | |||
videoStatus[t] = PushStreamStatus.SUCCESS.value[0] | |||
else: | |||
alive_thread += 1 | |||
if alive_thread == 0: | |||
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "", | |||
"", videoInfo_sucess)) | |||
break | |||
for t in list(task.keys()): | |||
if task[t].status and not task[t].is_alive(): | |||
videoStatus[t] = PushStreamStatus.FAILED.value[0] | |||
logger.error("检测到推流线程异常停止!videoId:{}, requestId:{}", t, requestId) | |||
if task[t].ex: | |||
raise task[t].ex | |||
raise Exception("检测到推流线程异常停止!") | |||
if task[t].is_alive(): | |||
videoStatus[t] = task[t].excute_status | |||
if count % 10 == 0: | |||
videoInfo_hb = [{"id": k, "status": v} for k, v in videoStatus.items()] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.RUNNING.value[0], "", "", | |||
videoInfo_hb)) | |||
count = 0 | |||
count += 1 | |||
time.sleep(1) | |||
except ServiceException as s: | |||
ex = s.code, s.msg | |||
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, requestId) | |||
except Exception: | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
logger.error("服务异常: {}, requestId: {},", format_exc(), requestId) | |||
finally: | |||
if ex: | |||
errorCode, errorMsg = ex | |||
for t in list(task.keys()): | |||
if task[t].is_alive(): | |||
task[t].status = False | |||
task[t].pushStreamUtil.close_push_stream_sp() | |||
task[t].join(120) | |||
videoStatus[t] = PushStreamStatus.FAILED.value[0] | |||
videoInfo_ex = [{"id": k, "status": v} for k, v in videoStatus.items()] | |||
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.FAILED.value[0], errorCode, errorMsg, | |||
videoInfo_ex)) | |||
for t in list(task.keys()): | |||
if task[t].is_alive(): | |||
task[t].status = False | |||
task[t].pushStreamUtil.close_push_stream_sp() | |||
task[t].join(120) | |||
logger.info("推流任务完成, requestId: {}", requestId) |
@@ -0,0 +1,55 @@ | |||
# -*- coding: utf-8 -*- | |||
from threading import Thread | |||
import time | |||
from traceback import format_exc | |||
from loguru import logger | |||
from enums.StatusEnum import PushStreamStatus | |||
from exception.CustomerException import ServiceException | |||
from util.PushStreamUtils import PushStreamUtil | |||
class PushSteamThread(Thread): | |||
__slots__ = ("pushStreamUtil", "requestId", "videoId", "status", "ex") | |||
def __init__(self, pullUrl, pushUrl, requestId, videoId): | |||
super().__init__() | |||
self.pushStreamUtil = PushStreamUtil(pullUrl, pushUrl, requestId) | |||
self.requestId = requestId | |||
self.videoId = videoId | |||
self.status = True | |||
self.excute_status = PushStreamStatus.WAITING.value[0] | |||
self.ex = None | |||
def run(self): | |||
logger.info("开始启动推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId) | |||
while True: | |||
try: | |||
self.pushStreamUtil.start_push_stream() | |||
self.excute_status = PushStreamStatus.RUNNING.value[0] | |||
out, err = self.pushStreamUtil.push_stream_sp.communicate() | |||
# 异常断流 | |||
if self.status: | |||
logger.warning("推流异常,请检测拉流地址和推流地址是否正常!") | |||
if self.pushStreamUtil.push_stream_sp.returncode != 0: | |||
logger.error("推流异常:{}, 视频id: {}, requestId:{}", err.decode(), self.videoId, | |||
self.requestId) | |||
self.excute_status = PushStreamStatus.RETRYING.value[0] | |||
self.pushStreamUtil.close_push_stream_sp() | |||
time.sleep(5) | |||
# 手动断流 | |||
if not self.status: | |||
self.pushStreamUtil.close_push_stream_sp() | |||
break | |||
except ServiceException as s: | |||
logger.error("异常: {}, 视频id: {}, requestId:{}", s.msg, self.videoId, self.requestId) | |||
self.pushStreamUtil.close_push_stream_sp() | |||
self.ex = s | |||
break | |||
except Exception as e: | |||
logger.error("异常:{}, 视频id: {}, requestId:{}", format_exc(), self.videoId, self.requestId) | |||
self.pushStreamUtil.close_push_stream_sp() | |||
self.ex = e | |||
break | |||
logger.info("结束推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId) |
@@ -1,122 +1,182 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
from queue import Queue | |||
from threading import Thread | |||
from time import time, sleep | |||
from traceback import format_exc | |||
from loguru import logger | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
from exception.CustomerException import ServiceException | |||
from util import GPUtils | |||
from util.Cv2Utils import Cv2Util | |||
from util.Cv2Utils import check_video_stream, clear_pull_p, build_video_info2, pull_read_video_stream2 | |||
from util.QueUtil import put_queue, get_no_block_queue, clear_queue, put_queue_result | |||
class PullStreamThread(Thread): | |||
def __init__(self, msg, content, pullQueue, fbQueue): | |||
super().__init__() | |||
self.command = Queue() | |||
self.msg = msg | |||
self.content = content | |||
self.pullQueue = pullQueue | |||
self.fbQueue = fbQueue | |||
self.recording_pull_stream_timeout = int(self.content["service"]["recording_pull_stream_timeout"]) | |||
def getCommand(self): | |||
eBody = None | |||
try: | |||
eBody = self.command.get(block=False) | |||
except Exception as e: | |||
pass | |||
return eBody | |||
__slots__ = ('_command', '_pull_queue', '_hb_queue', '_fb_queue', '_msg', '_context') | |||
def sendCommand(self, result): | |||
self.command.put(result) | |||
def __init__(self, *args): | |||
super().__init__() | |||
self._msg, self._context, self._pull_queue, self._hb_queue, self._fb_queue, self._frame_num = args | |||
self._command = Queue() | |||
def sendPullQueue(self, result): | |||
self.pullQueue.put(result) | |||
def sendEvent(self, result): | |||
put_queue(self._command, result, timeout=10, is_ex=False) | |||
class RecordingPullStreamThread(PullStreamThread): | |||
def run(self): | |||
cv2tool = None | |||
msg, context, frame_num = self._msg, self._context, self._frame_num | |||
request_id, pull_url = msg["request_id"], msg['pull_url'] | |||
service = context["service"] | |||
pull_stream_timeout = int(service["recording_pull_stream_timeout"]) | |||
read_stream_timeout = int(service["cv2_read_stream_timeout"]) | |||
service_timeout = int(service["timeout"]) | |||
command_queue, pull_queue, fb_queue, hb_queue = self._command, self._pull_queue, self._fb_queue, self._hb_queue | |||
width, height, width_height_3, all_frames, w, h = None, None, None, 0, None, None | |||
read_start_time, pull_p, ex = None, None, None | |||
frame_list, frame_index_list = [], [] | |||
stop_ex = True | |||
pull_stream_start_time = time() | |||
try: | |||
logger.info("录屏任务, 开启拉流, requestId:{}", self.msg.get("request_id")) | |||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), content=self.content, | |||
gpu_ids=gpu_ids, log=logger) | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
start_time_2 = time.time() | |||
concurrent_frame = 1 | |||
cv2tool.get_recording_video_info() | |||
logger.info("录屏拉流线程开始启动, requestId: {}", request_id) | |||
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1 | |||
start_time = time() | |||
while True: | |||
body = self.getCommand() | |||
if body is not None and len(body) > 0: | |||
if 'stop' == body.get("command"): | |||
logger.info("录屏任务, 拉流线程停止中, reuqestId:{}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
# 检查任务是否超时 | |||
if time() - start_time > service_timeout: | |||
logger.error("录屏拉流超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
# 最终停止拉流 | |||
event = get_no_block_queue(command_queue) | |||
if event is not None: | |||
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出 | |||
if 'stop' == event.get("command"): | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
logger.info("录屏拉流线程开始停止, requestId: {}", request_id) | |||
break | |||
if self.pullQueue.full(): | |||
time.sleep(0.1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if cv2tool.checkconfig(): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id")) | |||
pull_stream_init_timeout = time.time() - start_time | |||
if pull_stream_init_timeout > self.recording_pull_stream_timeout: | |||
logger.info("录屏拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.get_recording_video_info() | |||
continue | |||
start_time = time.time() | |||
cv2_init_num = 1 | |||
frame = cv2tool.recording_read() | |||
# 主进程异常,停止子线程 | |||
if 'stop_ex' == event.get("command"): | |||
logger.info("录屏异常拉开始停止拉流线程, requestId: {}", request_id) | |||
stop_ex = False | |||
break | |||
# 如果是离线拉流 | |||
if pull_url.startswith('http'): | |||
if check_video_stream(width, height): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
# 当是离线地址重试3次还是拉取不到视频流,关闭拉流管道,返回失败信息 | |||
if cv2_init_num > 3: | |||
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id) | |||
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id) | |||
if width is not None: | |||
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=2) | |||
else: | |||
if cv2_init_num < 2: | |||
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=2) | |||
continue | |||
# 当离线视频时, 队列满了, 等待1秒后再试 | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了: {}, requestId: {}", pull_queue.qsize(), request_id) | |||
sleep(1) | |||
continue | |||
# 如果是实时拉流 | |||
else: | |||
if check_video_stream(width, height): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
pull_stream_init_timeout = time() - pull_stream_start_time | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
if pull_stream_init_timeout > pull_stream_timeout: | |||
logger.error("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id) | |||
if width is not None: | |||
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=1) | |||
else: | |||
if cv2_init_num < 3: | |||
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=1) | |||
sleep(1) | |||
continue | |||
pull_stream_start_time = time() | |||
cv2_init_num = 1 | |||
frame, pull_p, width, height = pull_read_video_stream2(pull_p, pull_url, width, height, | |||
width_height_3, w, h, request_id) | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id")) | |||
pull_stream_read_timeout = time.time() - start_time_2 | |||
if pull_stream_read_timeout > self.recording_pull_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
self.msg.get("request_id")) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
if cv2tool.all_frames is not None and len(cv2tool.all_frames) > 0: | |||
if concurrent_frame < cv2tool.all_frames - 100: | |||
logger.info("流异常结束:requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "3"}) | |||
break | |||
logger.info("拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "2"}) | |||
if pull_url.startswith('http'): | |||
clear_pull_p(pull_p, request_id) | |||
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id) | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
if concurrent_frame < all_frames - 100: | |||
logger.info("离线拉流异常结束:requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
logger.info("离线拉流线程结束, requestId: {}", request_id) | |||
break | |||
init_pull_num += 1 | |||
time.sleep(0.5) | |||
cv2tool.recording_pull_p() | |||
continue | |||
else: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id) | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
if read_start_time is None: | |||
read_start_time = time() | |||
pull_stream_read_timeout = time() - read_start_time | |||
if pull_stream_read_timeout > read_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
init_pull_num += 1 | |||
continue | |||
init_pull_num = 1 | |||
start_time_2 = time.time() | |||
self.sendPullQueue({"status": "4", | |||
"frame": frame, | |||
"cct_frame": cv2tool.current_frame, | |||
"width": cv2tool.width, | |||
"height": cv2tool.height, | |||
"fps": cv2tool.fps, | |||
"all_frame": cv2tool.all_frames}) | |||
read_start_time = None | |||
if pull_queue.full(): | |||
sleep(1) | |||
logger.info("pull拉流队列满了:{}, requestId: {}", pull_queue.qsize(), request_id) | |||
continue | |||
frame_list.append(frame) | |||
frame_index_list.append(concurrent_frame) | |||
if len(frame_list) >= frame_num: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
concurrent_frame += 1 | |||
del frame | |||
except ServiceException as s: | |||
self.sendPullQueue({"status": "1", "error": {"code": s.code, "msg": s.msg}}) | |||
except Exception as e: | |||
logger.exception("实时拉流异常: {}, requestId:{}", e, self.msg.get("request_id")) | |||
self.sendPullQueue({"status": "1", "error": {"code": ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
"msg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1]}}) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.exception("实时拉流异常: {}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
if cv2tool: | |||
cv2tool.close() | |||
logger.info("录屏拉流线程结束, requestId: {}", self.msg.get("request_id")) | |||
clear_pull_p(pull_p, request_id) | |||
if stop_ex: | |||
if ex: | |||
error_code, error_msg = ex | |||
result = put_queue_result(pull_queue, (1, error_code, error_msg), timeout=3) | |||
else: | |||
result = put_queue_result(pull_queue, (2,), timeout=3) | |||
if result: | |||
# 3分钟超时时间 | |||
cr_time = time() | |||
while time() - cr_time < 180: | |||
event = get_no_block_queue(command_queue) | |||
if event is not None: | |||
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出 | |||
if 'stop' == event.get("command"): | |||
logger.info("录屏拉流线程开始停止, requestId: {}", request_id) | |||
break | |||
sleep(1) | |||
clear_queue(command_queue) | |||
clear_queue(pull_queue) | |||
clear_queue(hb_queue) | |||
del frame_list, frame_index_list | |||
logger.info("录屏拉流线程结束, requestId: {}", request_id) |
@@ -1,8 +1,8 @@ | |||
# -*- coding: utf-8 -*- | |||
import os | |||
import time | |||
from multiprocessing import Process, Queue | |||
from os import getpid | |||
from time import time, sleep | |||
from traceback import format_exc | |||
import psutil | |||
@@ -10,397 +10,321 @@ from loguru import logger | |||
from util.LogUtils import init_log | |||
from concurrency.FileUploadThread import ImageFileUpload | |||
from concurrency.HeartbeatThread import Heartbeat | |||
from entity.FeedBack import message_feedback | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p | |||
from util.TimeUtils import now_date_to_str | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result | |||
class PullVideoStreamProcess(Process): | |||
__slots__ = ('_command', '_msg', '_context', '_pullQueue', '_fbQueue', '_hbQueue', '_imageQueue', '_analyse_type', | |||
"_base_dir") | |||
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type", | |||
"_frame_num") | |||
def __init__(self, msg, context, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type, base_dir): | |||
def __init__(self, *args): | |||
super().__init__() | |||
self._command = Queue() | |||
self._msg = msg | |||
self._context = context | |||
self._pullQueue = pullQueue | |||
self._fbQueue = fbQueue | |||
self._hbQueue = hbQueue | |||
self._imageQueue = imageQueue | |||
self._analyse_type = analyse_type | |||
self._base_dir = base_dir | |||
def sendCommand(self, result, enable_ex=True): | |||
try: | |||
self._command.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), self._msg.get("request_id")) | |||
if enable_ex: | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def start_heartbeat(self, hb): | |||
if hb is None: | |||
hb = Heartbeat(self._fbQueue, self._hbQueue, self._msg.get("request_id"), self._analyse_type) | |||
hb.setDaemon(True) | |||
hb.start() | |||
start_time = time.time() | |||
retry_count = 0 | |||
while True: | |||
if hb.is_alive(): | |||
return hb | |||
retry_count += 1 | |||
if retry_count > 10: | |||
logger.error("心跳线程异常重试失败!requestId:{}", self._msg.get("request_id")) | |||
del hb | |||
raise Exception("心跳线程启动失败") | |||
if not hb.is_alive(): | |||
logger.warning("心跳线程异常等待中, requestId:{}", self._msg.get("request_id")) | |||
if time.time() - start_time <= 3: | |||
time.sleep(0.5) | |||
continue | |||
if time.time() - start_time > 3: | |||
logger.warning("心跳线程异常重启中, requestId:{}", self._msg.get("request_id")) | |||
hb = Heartbeat(self._fbQueue, self._hbQueue, self._msg.get("request_id"), self._analyse_type) | |||
hb.setDaemon(True) | |||
hb.start() | |||
continue | |||
def start_File_upload(self, imageFileUpload): | |||
if imageFileUpload is None: | |||
imageFileUpload = ImageFileUpload(self._fbQueue, self._context, self._msg, self._imageQueue, | |||
self._analyse_type, self._base_dir) | |||
imageFileUpload.setDaemon(True) | |||
imageFileUpload.start() | |||
start_time = time.time() | |||
retry_count = 0 | |||
while True: | |||
if imageFileUpload.is_alive(): | |||
return imageFileUpload | |||
retry_count += 1 | |||
if retry_count > 10: | |||
logger.error("图片上传线程异常重试失败!requestId:{}", self._msg.get("request_id")) | |||
raise Exception("图片线程启动失败") | |||
if not imageFileUpload.is_alive(): | |||
logger.warning("图片上传线程异常等待中, requestId:{}", self._msg.get("request_id")) | |||
if time.time() - start_time <= 3: | |||
time.sleep(0.5) | |||
continue | |||
if time.time() - start_time > 3: | |||
logger.warning("图片上传线程异常重启中, requestId:{}", self._msg.get("request_id")) | |||
imageFileUpload = ImageFileUpload(self._fbQueue, self._context, self._msg, self._imageQueue, | |||
self._analyse_type, self._base_dir) | |||
imageFileUpload.setDaemon(True) | |||
imageFileUpload.start() | |||
start_time = time.time() | |||
continue | |||
# 自带参数 | |||
self._command_queue = Queue() | |||
# 传参 | |||
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \ | |||
self._frame_num = args | |||
def putQueue(queue, result, requestId, enable_ex=True): | |||
try: | |||
queue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), requestId) | |||
if enable_ex: | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
def sendCommand(self, result): | |||
put_queue(self._command_queue, result, timeout=2, is_ex=True) | |||
@staticmethod | |||
def start_File_upload(fb_queue, context, msg, image_queue, analyse_type): | |||
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type) | |||
image_thread.setDaemon(True) | |||
image_thread.start() | |||
return image_thread | |||
def getNoBlockQueue(queue): | |||
eBody = None | |||
try: | |||
eBody = queue.get(block=False) | |||
except Exception: | |||
pass | |||
return eBody | |||
def check(start_time, service_timeout, requestId, imageFileUpload, hb): | |||
create_task_time = time.time() - start_time | |||
if create_task_time > service_timeout: | |||
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, requestId) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
@staticmethod | |||
def check(start_time, service_timeout, request_id, image_thread): | |||
if time() - start_time > service_timeout: | |||
logger.error("拉流进程运行超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
# 检测图片上传线程是否正常运行 | |||
if imageFileUpload is not None and not imageFileUpload.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", requestId) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
# 检测心跳线程是否正常运行 | |||
if hb is not None and not hb.is_alive(): | |||
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, requestId:{}", requestId) | |||
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!") | |||
if image_thread and not image_thread.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
class OnlinePullVideoStreamProcess(PullVideoStreamProcess): | |||
__slots__ = () | |||
def run(self): | |||
pull_p = None | |||
imageFileUpload = None | |||
hb = None | |||
requestId = '1' | |||
pull_queue = self._pullQueue | |||
fb_queue = self._fbQueue | |||
image_queue = self._imageQueue | |||
hb_queue = self._hbQueue | |||
# 避免循环调用性能影响, 优先赋值 | |||
context, msg, analyse_type, frame_num = self._context, self._msg, self._analyse_type, self._frame_num | |||
base_dir, env, service = context['base_dir'], context['env'], context["service"] | |||
request_id, pull_url = msg["request_id"], msg["pull_url"] | |||
pull_stream_timeout, read_stream_timeout, service_timeout = int(service["cv2_pull_stream_timeout"]), \ | |||
int(service["cv2_read_stream_timeout"]), int(service["timeout"]) + 120 | |||
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \ | |||
self._fb_queue | |||
image_thread, ex = None, None | |||
width, height, width_height_3, all_frames, w_2, h_2, pull_p = None, None, None, 0, None, None, None | |||
frame_list, frame_index_list = [], [] | |||
ex_status = True | |||
try: | |||
base_dir = self._base_dir | |||
# 加载日志框架 | |||
init_log(base_dir) | |||
requestId = self._msg.get("request_id") | |||
pull_url = self._msg.get("pull_url") | |||
logger.info("开启视频拉流进程, requestId:{}", requestId) | |||
pull_stream_timeout = int(self._context["service"]["cv2_pull_stream_timeout"]) | |||
read_stream_timeout = int(self._context["service"]["cv2_read_stream_timeout"]) | |||
service_timeout = int(self._context["service"]["timeout"]) | |||
command_queue = self._command | |||
# 视频相关配置 | |||
width = None | |||
height = None | |||
width_height_3 = None | |||
all_frames = 0 | |||
w_2 = None | |||
h_2 = None | |||
# 初始化日志 | |||
init_log(base_dir, env) | |||
logger.info("开启启动实时视频拉流进程, requestId:{}", request_id) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload(imageFileUpload) | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat(hb) | |||
# 初始化拉流工具类 | |||
cv2_init_num = 1 | |||
init_pull_num = 1 | |||
start_time = time.time() | |||
pull_stream_start_time = time.time() | |||
pull_stream_read_start_time = time.time() | |||
kill_parent_process_timeout = time.time() | |||
concurrent_frame = 1 | |||
stop_pull_stream_step = False | |||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type) | |||
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1 | |||
start_time, pull_stream_start_time, read_start_time, full_timeout = time(), None, None, None | |||
while True: | |||
# 检测任务执行是否超时、心跳线程是否正常、图片上传线程是否正常 | |||
check(start_time, service_timeout, requestId, imageFileUpload, hb) | |||
# 获取指令信息 | |||
command = getNoBlockQueue(command_queue) | |||
if command is not None: | |||
if 'stop_pull_stream' == command.get("command"): | |||
putQueue(pull_queue, ("9",), requestId) # 9 停止拉流 | |||
stop_pull_stream_step = True | |||
clear_pull_p(pull_p, requestId) | |||
continue | |||
# 停止图片上传线程 | |||
if 'stop_image_hb' == command.get("command"): | |||
putQueue(image_queue, {"command": "stop"}, requestId) | |||
putQueue(hb_queue, {"command": "stop"}, requestId) | |||
clear_pull_p(pull_p, requestId) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.error("图片线程停止完成, requestId:{}", requestId) | |||
# 检测任务执行是否超时、图片上传线程是否正常 | |||
self.check(start_time, service_timeout, request_id, image_thread) | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
break | |||
if 'stop_ex' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
ex_status = False | |||
break | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
check_vide_result = check_video_stream(width, height) | |||
if check_vide_result: | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, requestId) | |||
pull_stream_init_timeout = time.time() - pull_stream_start_time | |||
if check_video_stream(width, height): | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
if pull_stream_start_time is None: | |||
pull_stream_start_time = time() | |||
pull_stream_init_timeout = time() - pull_stream_start_time | |||
if pull_stream_init_timeout > pull_stream_timeout: | |||
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, requestId) | |||
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id) | |||
# 如果超时了, 将异常信息发送给主进程,如果队列满了,抛出异常 | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(1) | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId) | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id) | |||
if width is None: | |||
sleep(1) | |||
continue | |||
pull_stream_start_time, cv2_init_num = None, 1 | |||
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3, | |||
w_2, h_2, request_id) | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) | |||
if full_timeout is None: | |||
full_timeout = time() | |||
if time() - full_timeout > 180: | |||
logger.error("拉流队列阻塞超时, 请检查父进程是否正常!requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if psutil.Process(getpid()).ppid() == 1: | |||
clear_pull_p(pull_p, request_id) | |||
ex_status = False | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
image_thread.join(120) | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id) | |||
put_queue(self._fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]), timeout=2) | |||
break | |||
del frame | |||
continue | |||
pull_stream_start_time = time.time() | |||
cv2_init_num = 1 | |||
frame, pull_p, width, height, width_height_3 = pull_read_video_stream(pull_p, pull_url, width, | |||
height, width_height_3, w_2, h_2, | |||
requestId) | |||
full_timeout = None | |||
if frame is None: | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, requestId) | |||
pull_stream_read_timeout = time.time() - pull_stream_read_start_time | |||
clear_pull_p(pull_p, request_id) | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id) | |||
if read_start_time is None: | |||
read_start_time = time() | |||
pull_stream_read_timeout = time() - read_start_time | |||
if pull_stream_read_timeout > read_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
requestId) | |||
putQueue(pull_queue, ("3",), requestId) # 3 超时 | |||
stop_pull_stream_step = True | |||
clear_pull_p(pull_p, requestId) | |||
continue | |||
clear_pull_p(pull_p, requestId) | |||
request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
init_pull_num += 1 | |||
continue | |||
init_pull_num = 1 | |||
pull_stream_read_start_time = time.time() | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), requestId) | |||
# 如果一直有视频流,队列一直是满的,应该是父进程挂了,直接等待60退出 | |||
if time.time() - kill_parent_process_timeout > 60: | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId) | |||
putQueue(fb_queue, {"feedback": message_feedback(requestId, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_CPU_RESOURCES.value[0], | |||
ExceptionType.NO_CPU_RESOURCES.value[1], | |||
analyse_time=now_date_to_str())}, | |||
requestId) | |||
break | |||
# logger.info("当前视频帧队列处理满队列状态, requestId: {}", requestId) | |||
if psutil.Process(getpid()).ppid() == 1: | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId) | |||
putQueue(fb_queue, {"feedback": message_feedback(requestId, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_CPU_RESOURCES.value[0], | |||
ExceptionType.NO_CPU_RESOURCES.value[1], | |||
analyse_time=now_date_to_str())}, | |||
requestId) | |||
break | |||
continue | |||
kill_parent_process_timeout = time.time() | |||
putQueue(pull_queue, ("4", frame, concurrent_frame, w_2, h_2, all_frames), requestId) | |||
init_pull_num, read_start_time = 1, None | |||
frame_list.append(frame) | |||
frame_index_list.append(concurrent_frame) | |||
if len(frame_list) >= frame_num: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) | |||
frame_list, frame_index_list = [], [] | |||
concurrent_frame += 1 | |||
del frame | |||
except ServiceException as s: | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), requestId) | |||
putQueue(pull_queue, ("1", s.code, s.msg), requestId, enable_ex=False) | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("实时拉流异常: {}, requestId:{}", format_exc(), requestId) | |||
putQueue(pull_queue, ("1", ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), requestId, enable_ex=False) | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
clear_pull_p(pull_p, requestId) | |||
if imageFileUpload: | |||
putQueue(image_queue, {"command": "stop"}, requestId, enable_ex=False) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
putQueue(hb_queue, {"command": "stop"}, requestId, enable_ex=False) | |||
hb.join(60 * 3) | |||
logger.info("实时拉流线程结束, requestId: {}", requestId) | |||
clear_pull_p(pull_p, request_id) | |||
del frame_list, frame_index_list | |||
if ex_status: | |||
if ex: | |||
code, msg = ex | |||
r = put_queue_result(pull_queue, (1, code, msg), timeout=10) | |||
else: | |||
r = put_queue_result(pull_queue, (2,), timeout=10) | |||
if r: | |||
c_time = time() | |||
while time() - c_time < 60: | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
break | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}", | |||
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id) | |||
class OfflinePullVideoStreamProcess(PullVideoStreamProcess): | |||
__slots__ = () | |||
def run(self): | |||
pull_p = None | |||
imageFileUpload = None | |||
hb = None | |||
requestId = '1' | |||
pull_queue = self._pullQueue | |||
fb_queue = self._fbQueue | |||
image_queue = self._imageQueue | |||
hb_queue = self._hbQueue | |||
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type | |||
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"] | |||
ex, service_timeout, full_timeout = None, int(context["service"]["timeout"]) + 120, None | |||
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \ | |||
self._fb_queue | |||
image_thread, pull_p = None, None | |||
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None | |||
frame_list, frame_index_list = [], [] | |||
ex_status = True | |||
try: | |||
base_dir = self._base_dir | |||
init_log(base_dir) | |||
requestId = self._msg.get("request_id") | |||
pull_url = self._msg.get("original_url") | |||
logger.info("开启离线视频拉流进程, requestId:{}", requestId) | |||
service_timeout = int(self._context["service"]["timeout"]) | |||
command_queue = self._command | |||
# 初始化日志 | |||
init_log(base_dir, env) | |||
logger.info("开启离线视频拉流进程, requestId:{}", request_id) | |||
# 开启图片上传线程 | |||
imageFileUpload = self.start_File_upload(imageFileUpload) | |||
# 开启心跳线程 | |||
hb = self.start_heartbeat(hb) | |||
cv2_init_num = 1 | |||
start_time = time.time() | |||
concurrent_frame = 1 | |||
stop_pull_stream_step = False | |||
kill_parent_process_timeout = time.time() | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId) | |||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type) | |||
# 初始化拉流工具类 | |||
cv2_init_num, concurrent_frame = 0, 1 | |||
start_time = time() | |||
while True: | |||
check(start_time, service_timeout, requestId, imageFileUpload, hb) | |||
command = getNoBlockQueue(command_queue) | |||
if command is not None and len(command) > 0: | |||
if 'stop_pull_stream' == command.get("command"): | |||
putQueue(pull_queue, ("9",), requestId) # 9 停止拉流 | |||
stop_pull_stream_step = True | |||
clear_pull_p(pull_p, requestId) | |||
continue | |||
if 'stop_image_hb' == command.get("command"): | |||
putQueue(image_queue, {"command": "stop"}, requestId) | |||
putQueue(hb_queue, {"command": "stop"}, requestId) | |||
clear_pull_p(pull_p, requestId) | |||
imageFileUpload.join(60 * 3) | |||
hb.join(60 * 3) | |||
logger.error("图片线程停止完成, requestId:{}", requestId) | |||
# 检测任务执行是否超时、图片上传线程是否正常 | |||
self.check(start_time, service_timeout, request_id, image_thread) | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止离线拉流进程, requestId:{}", request_id) | |||
break | |||
if stop_pull_stream_step: | |||
time.sleep(1) | |||
continue | |||
if pull_queue.full(): | |||
logger.info("当前视频帧队列处理满队列状态, requestId: {}", requestId) | |||
# 如果一直有视频流,队列一直是满的,应该是父进程挂了,直接等待60退出 | |||
if time.time() - kill_parent_process_timeout > 60: | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId) | |||
putQueue(fb_queue, {"feedback": message_feedback(requestId, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_CPU_RESOURCES.value[0], | |||
ExceptionType.NO_CPU_RESOURCES.value[1], | |||
analyse_time=now_date_to_str())}, | |||
requestId) | |||
if 'stop_ex' == command_msg.get("command"): | |||
logger.info("开始停止离线拉流进程, requestId:{}", request_id) | |||
ex_status = False | |||
break | |||
if psutil.Process(getpid()).ppid() == 1: | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId) | |||
putQueue(fb_queue, {"feedback": message_feedback(requestId, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_CPU_RESOURCES.value[0], | |||
ExceptionType.NO_CPU_RESOURCES.value[1], | |||
analyse_time=now_date_to_str())}, | |||
requestId) | |||
break | |||
continue | |||
kill_parent_process_timeout = time.time() | |||
check_vide_result = check_video_stream(width, height) | |||
if check_vide_result: | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, requestId) | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if check_video_stream(width, height): | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
if cv2_init_num > 3: | |||
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, requestId) | |||
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id) | |||
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
time.sleep(1) | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId) | |||
sleep(1) | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id) | |||
continue | |||
frame, pull_p, width, height, width_height_3 = pull_read_video_stream(pull_p, pull_url, width, | |||
height, width_height_3, w_2, h_2, | |||
requestId) | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) | |||
if full_timeout is None: | |||
full_timeout = time() | |||
if time() - full_timeout > 180: | |||
logger.error("pull队列阻塞超时,请检测父进程是否正常!requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if psutil.Process(getpid()).ppid() == 1: | |||
clear_pull_p(pull_p, request_id) | |||
ex_status = False | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
image_thread.join(120) | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id) | |||
put_queue(self._fb_queue, message_feedback(request_id, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]), timeout=2) | |||
break | |||
continue | |||
full_timeout = None | |||
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, | |||
width_height_3, w_2, h_2, request_id) | |||
if frame is None: | |||
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, requestId) | |||
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id) | |||
clear_pull_p(pull_p, request_id) | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
# 允许100帧的误差 | |||
if concurrent_frame < all_frames - 100: | |||
logger.info("离线拉流异常结束:requestId: {}", requestId) | |||
putQueue(pull_queue, ("3",), requestId) | |||
stop_pull_stream_step = True | |||
continue | |||
logger.info("离线拉流线程结束, requestId: {}", requestId) | |||
putQueue(pull_queue, ("2",), requestId) | |||
stop_pull_stream_step = True | |||
continue | |||
putQueue(pull_queue, ("4", frame, concurrent_frame, w_2, h_2, all_frames), requestId) | |||
logger.info("离线拉流异常结束:requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
logger.info("离线拉流线程结束, requestId: {}", request_id) | |||
break | |||
frame_list.append(frame) | |||
frame_index_list.append(concurrent_frame) | |||
if len(frame_list) >= frame_num: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) | |||
frame_list, frame_index_list = [], [] | |||
concurrent_frame += 1 | |||
del frame | |||
except ServiceException as s: | |||
logger.error("离线任务拉流出现异常:{}, requestId:{}", s.msg, requestId) | |||
putQueue(pull_queue, ("1", s.code, s.msg), requestId, enable_ex=False) | |||
logger.error("离线拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("离线拉流异常: {}, requestId:{}", format_exc(), requestId) | |||
putQueue(pull_queue, ("1", ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), requestId, enable_ex=False) | |||
logger.error("离线拉流异常: {}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
clear_pull_p(pull_p, requestId) | |||
if imageFileUpload: | |||
putQueue(image_queue, {"command": "stop"}, requestId, enable_ex=False) | |||
imageFileUpload.join(60 * 3) | |||
if hb: | |||
putQueue(hb_queue, {"command": "stop"}, requestId, enable_ex=False) | |||
hb.join(60 * 3) | |||
logger.info("离线拉流线程结束, requestId: {}", requestId) | |||
clear_pull_p(pull_p, request_id) | |||
del frame_list, frame_index_list | |||
if ex_status: | |||
if ex: | |||
code, msg = ex | |||
r = put_queue_result(pull_queue, (1, code, msg), timeout=10) | |||
else: | |||
r = put_queue_result(pull_queue, (2,), timeout=10) | |||
if r: | |||
c_time = time() | |||
while time() - c_time < 180: | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
break | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}", | |||
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id) |
@@ -0,0 +1,349 @@ | |||
# -*- coding: utf-8 -*- | |||
import os | |||
from multiprocessing import Process, Queue | |||
from os import getpid | |||
from time import time, sleep | |||
from traceback import format_exc | |||
import psutil | |||
from loguru import logger | |||
from util.LogUtils import init_log | |||
from concurrency.FileUploadThread import ImageFileUpload | |||
from entity.FeedBack import message_feedback | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result | |||
class PullVideoStreamProcess2(Process): | |||
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type", | |||
"_frame_num") | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 自带参数 | |||
self._command_queue = Queue() | |||
# 传参 | |||
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \ | |||
self._frame_num = args | |||
def sendCommand(self, result): | |||
try: | |||
self._command_queue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), self._msg.get("request_id")) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
@staticmethod | |||
def start_File_upload(*args): | |||
fb_queue, context, msg, image_queue, analyse_type = args | |||
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type) | |||
image_thread.setDaemon(True) | |||
image_thread.start() | |||
return image_thread | |||
@staticmethod | |||
def check(start_time, service_timeout, request_id, image_thread): | |||
if time() - start_time > service_timeout: | |||
logger.error("分析超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1]) | |||
# 检测图片上传线程是否正常运行 | |||
if image_thread is not None and not image_thread.is_alive(): | |||
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id) | |||
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!") | |||
class OnlinePullVideoStreamProcess2(PullVideoStreamProcess2): | |||
__slots__ = () | |||
def run(self): | |||
# 避免循环调用性能影响, 优先赋值 | |||
context, msg, analyse_type = self._context, self._msg, self._analyse_type | |||
request_id, base_dir, env = msg["request_id"], context['base_dir'], context['env'] | |||
pull_url, frame_num = msg["pull_url"], self._frame_num | |||
pull_stream_timeout = int(context["service"]["cv2_pull_stream_timeout"]) | |||
read_stream_timeout = int(context["service"]["cv2_read_stream_timeout"]) | |||
service_timeout = int(context["service"]["timeout"]) | |||
command_queue, pull_queue, image_queue = self._command_queue, self._pull_queue, self._image_queue | |||
fb_queue = self._fb_queue | |||
image_thread, pull_p = None, None | |||
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None | |||
frame_list, frame_index_list = [], [] | |||
ex = None | |||
ex_status = True | |||
full_timeout = None | |||
try: | |||
# 初始化日志 | |||
init_log(base_dir, env) | |||
logger.info("开启实时视频拉流进程, requestId:{}", request_id) | |||
# 开启图片上传线程 | |||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type) | |||
# 初始化拉流工具类 | |||
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1 | |||
start_time, pull_start_time, read_start_time = time(), None, None | |||
while True: | |||
# 检测任务执行是否超时、图片上传线程是否正常 | |||
self.check(start_time, service_timeout, request_id, image_thread) | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
break | |||
if 'stop_ex' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
ex_status = False | |||
break | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if check_video_stream(width, height): | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
if pull_start_time is None: | |||
pull_start_time = time() | |||
pull_stream_init_timeout = time() - pull_start_time | |||
if pull_stream_init_timeout > pull_stream_timeout: | |||
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id) | |||
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id) | |||
if width is None: | |||
sleep(1) | |||
continue | |||
pull_start_time, cv2_init_num = None, 1 | |||
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3, | |||
w_2, h_2, request_id) | |||
if frame is None: | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id) | |||
if read_start_time is None: | |||
read_start_time = time() | |||
pull_stream_read_timeout = time() - read_start_time | |||
if pull_stream_read_timeout > read_stream_timeout: | |||
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, | |||
request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
init_pull_num += 1 | |||
continue | |||
init_pull_num, read_start_time = 1, None | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) | |||
if full_timeout is None: | |||
full_timeout = time() | |||
if time() - full_timeout > 180: | |||
logger.error("拉流队列阻塞异常, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if psutil.Process(getpid()).ppid() == 1: | |||
clear_pull_p(pull_p, request_id) | |||
ex_status = False | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
image_thread.join(120) | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id) | |||
put_queue(self._fb_queue, message_feedback(request_id, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1])) | |||
break | |||
del frame | |||
continue | |||
full_timeout = None | |||
frame_list.append(frame) | |||
frame_index_list.append(concurrent_frame) | |||
if len(frame_list) >= frame_num: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) | |||
frame_list, frame_index_list = [], [] | |||
concurrent_frame += 1 | |||
del frame | |||
except ServiceException as s: | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
clear_pull_p(pull_p, request_id) | |||
del frame_list, frame_index_list | |||
if ex_status: | |||
if ex: | |||
code, msg = ex | |||
r = put_queue_result(pull_queue, (1, code, msg), timeout=10) | |||
else: | |||
r = put_queue_result(pull_queue, (2,), timeout=10) | |||
if r: | |||
c_time = time() | |||
while time() - c_time < 180: | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止实时拉流进程, requestId:{}", request_id) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
break | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}", | |||
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id) | |||
class OfflinePullVideoStreamProcess2(PullVideoStreamProcess2): | |||
__slots__ = () | |||
def run(self): | |||
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type | |||
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"] | |||
ex, service_timeout = None, int(context["service"]["timeout"]) | |||
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \ | |||
self._fb_queue | |||
image_thread, pull_p = None, None | |||
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None | |||
frame_list, frame_index_list = [], [] | |||
ex_status = True | |||
full_timeout = None | |||
try: | |||
# 初始化日志 | |||
init_log(base_dir, env) | |||
logger.info("开启离线视频拉流进程, requestId:{}", request_id) | |||
# 开启图片上传线程 | |||
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type) | |||
# 初始化拉流工具类 | |||
cv2_init_num = 0 | |||
concurrent_frame = 1 | |||
start_time = time() | |||
while True: | |||
# 检测任务执行是否超时、图片上传线程是否正常 | |||
self.check(start_time, service_timeout, request_id, image_thread) | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止离线拉流进程, requestId:{}", request_id) | |||
break | |||
if 'stop_ex' == command_msg.get("command"): | |||
logger.info("开始停止离线拉流进程, requestId:{}", request_id) | |||
ex_status = False | |||
break | |||
# 检测视频信息是否存在或拉流对象是否存在 | |||
if check_video_stream(width, height): | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1) | |||
frame_list, frame_index_list = [], [] | |||
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id) | |||
if cv2_init_num > 3: | |||
clear_pull_p(pull_p, request_id) | |||
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id) | |||
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0], | |||
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1]) | |||
cv2_init_num += 1 | |||
sleep(1) | |||
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id) | |||
continue | |||
if pull_queue.full(): | |||
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) | |||
if full_timeout is None: | |||
full_timeout = time() | |||
if time() - full_timeout > 300: | |||
logger.error("拉流队列阻塞超时, 请检查父进程是否正常!requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
if psutil.Process(getpid()).ppid() == 1: | |||
clear_pull_p(pull_p, request_id) | |||
ex_status = False | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
image_thread.join(120) | |||
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id) | |||
put_queue(self._fb_queue, message_feedback(request_id, | |||
AnalysisStatus.FAILED.value, | |||
self._analyse_type, | |||
ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1])) | |||
break | |||
continue | |||
full_timeout = None | |||
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3, | |||
w_2, h_2, request_id) | |||
if frame is None: | |||
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id) | |||
clear_pull_p(pull_p, request_id) | |||
if len(frame_list) > 0: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=2, is_ex=False) | |||
frame_list, frame_index_list = [], [] | |||
# 允许100帧的误差 | |||
if concurrent_frame < all_frames - 100: | |||
logger.info("离线拉流异常结束:requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) | |||
logger.info("离线拉流线程结束, requestId: {}", request_id) | |||
break | |||
frame_list.append(frame) | |||
frame_index_list.append(concurrent_frame) | |||
if len(frame_list) >= frame_num: | |||
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) | |||
frame_list, frame_index_list = [], [] | |||
concurrent_frame += 1 | |||
del frame | |||
except ServiceException as s: | |||
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("实时拉流异常: {}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
clear_pull_p(pull_p, request_id) | |||
del frame_list, frame_index_list | |||
if ex_status: | |||
if ex: | |||
code, msg = ex | |||
r = put_queue_result(pull_queue, (1, code, msg), timeout=10) | |||
else: | |||
r = put_queue_result(pull_queue, (2,), timeout=10) | |||
if r: | |||
c_time = time() | |||
while time() - c_time < 180: | |||
command_msg = get_no_block_queue(command_queue) | |||
if command_msg is not None: | |||
if 'stop' == command_msg.get("command"): | |||
logger.info("开始停止离线拉流进程, requestId:{}", request_id) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
break | |||
for q in [command_queue, pull_queue, image_queue]: | |||
clear_queue(q) | |||
if image_thread and image_thread.is_alive(): | |||
put_queue(image_queue, (2, "stop"), timeout=1) | |||
logger.info("停止图片上传线程, requestId:{}", request_id) | |||
image_thread.join(120) | |||
logger.info("停止图片上传线程结束, requestId:{}", request_id) | |||
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}", | |||
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id) |
@@ -0,0 +1,181 @@ | |||
# -*- coding: utf-8 -*- | |||
from concurrent.futures import ThreadPoolExecutor | |||
from os.path import join | |||
from threading import Thread | |||
from traceback import format_exc | |||
import cv2 | |||
import numpy as np | |||
from loguru import logger | |||
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing | |||
from util.ImageUtils import url2Array, add_water_pic | |||
from util.PlotsUtils import draw_painting_joint | |||
from util.QueUtil import put_queue | |||
class OnPushStreamThread(Thread): | |||
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue') | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 传参 | |||
self._msg, self._push_queue, self._image_queue, self._context = args | |||
# 自带参数 | |||
self.ex = None | |||
self._logo = None | |||
if self._context["video"]["video_add_water"]: | |||
self._logo = self._msg.get("logo_url") | |||
if self._logo: | |||
self._logo = url2Array(self._logo, enable_ex=False) | |||
if not self._logo: | |||
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
def run(self): | |||
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue | |||
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo | |||
or_video_file, ai_video_file, push_p = None, None, None | |||
push_url = self._msg.get("push_url") | |||
try: | |||
logger.info("开始启动推流线程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
while True: | |||
push_parm = push_queue.get() | |||
if push_parm is not None: | |||
# [(1, 原视频帧, 分析视频帧)] | |||
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]] | |||
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], [])) | |||
# [(2, 操作指令)] | |||
if push_parm[0] == 1: # 视频帧操作 | |||
frame, current_frame, all_frames, ques_list = push_parm[1] | |||
copy_frame = frame.copy() | |||
det_xywh = {} | |||
if len(ques_list) > 0: | |||
for qs in ques_list: | |||
det_xywh[qs[1]] = {} | |||
detect_targets_code = int(qs[0][0]) | |||
score = qs[0][-1] | |||
label_array = qs[3][detect_targets_code] | |||
color = qs[4][detect_targets_code] | |||
if not isinstance(qs[0][1], (list, tuple, np.ndarray)): | |||
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4]) | |||
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)] | |||
else: | |||
box = qs[0][1] | |||
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop") | |||
cd = det_xywh[qs[1]].get(detect_targets_code) | |||
if cd is None: | |||
det_xywh[qs[1]][detect_targets_code] = [ | |||
[detect_targets_code, box, score, label_array, color]] | |||
else: | |||
det_xywh[qs[1]][detect_targets_code].append( | |||
[detect_targets_code, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写原视频到本地 | |||
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file, | |||
or_write_status, request_id) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file, | |||
ai_write_status, request_id) | |||
if len(det_xywh) > 0: | |||
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames))) | |||
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id) | |||
ai_video_file = write_ai_video_result.result() | |||
or_video_file = write_or_video_result.result() | |||
if push_parm[0] == 2: | |||
if 'stop' == push_parm[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
or_video_file, ai_video_file, push_p = None, None, None | |||
break | |||
except Exception as e: | |||
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id) | |||
self.ex = e | |||
finally: | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
logger.info("推流线程停止完成!requestId:{}", request_id) | |||
class OffPushStreamThread(Thread): | |||
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue') | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 传参 | |||
self._msg, self._push_queue, self._image_queue, self._context = args | |||
# 自带参数 | |||
self.ex = None | |||
self._logo = None | |||
if self._context["video"]["video_add_water"]: | |||
self._logo = self._msg.get("logo_url") | |||
if self._logo: | |||
self._logo = url2Array(self._logo, enable_ex=False) | |||
if not self._logo: | |||
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
def run(self): | |||
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue | |||
aiFilePath, logo = self._context.get("aiFilePath"), self._logo | |||
ai_video_file, push_p = None, None | |||
push_url = self._msg.get("push_url") | |||
try: | |||
logger.info("开始启动推流线程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=1) as t: | |||
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
while True: | |||
push_parm = push_queue.get() | |||
if push_parm is not None: | |||
# [(1, 原视频帧, 分析视频帧)] | |||
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]] | |||
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], [])) | |||
# [(2, 操作指令)] | |||
if push_parm[0] == 1: # 视频帧操作 | |||
frame, current_frame, all_frames, ques_list = push_parm[1] | |||
copy_frame = frame.copy() | |||
det_xywh = {} | |||
if len(ques_list) > 0: | |||
for qs in ques_list: | |||
det_xywh[qs[1]] = {} | |||
detect_targets_code = int(qs[0][0]) | |||
score = qs[0][-1] | |||
label_array = qs[3][detect_targets_code] | |||
color = qs[4][detect_targets_code] | |||
if not isinstance(qs[0][1], (list, tuple, np.ndarray)): | |||
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4]) | |||
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)] | |||
else: | |||
box = qs[0][1] | |||
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop") | |||
cd = det_xywh[qs[1]].get(detect_targets_code) | |||
if cd is None: | |||
det_xywh[qs[1]][detect_targets_code] = [ | |||
[detect_targets_code, box, score, label_array, color]] | |||
else: | |||
det_xywh[qs[1]][detect_targets_code].append( | |||
[detect_targets_code, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file, | |||
ai_write_status, request_id) | |||
if len(det_xywh) > 0: | |||
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames))) | |||
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id) | |||
ai_video_file = write_ai_video_result.result() | |||
if push_parm[0] == 2: | |||
if 'stop' == push_parm[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
close_all_p(push_p, None, ai_video_file, request_id) | |||
ai_video_file, push_p = None, None | |||
break | |||
except Exception as e: | |||
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id) | |||
self.ex = e | |||
finally: | |||
close_all_p(push_p, None, ai_video_file, request_id) | |||
logger.info("推流线程停止完成!requestId:{}", request_id) |
@@ -0,0 +1,201 @@ | |||
# -*- coding: utf-8 -*- | |||
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED | |||
from os.path import join | |||
from threading import Thread | |||
from traceback import format_exc | |||
import cv2 | |||
import numpy as np | |||
from loguru import logger | |||
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing | |||
from util.ImageUtils import url2Array, add_water_pic | |||
from util.PlotsUtils import draw_painting_joint | |||
from util.QueUtil import put_queue | |||
class OnPushStreamThread2(Thread): | |||
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue') | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 传参 | |||
self._msg, self._push_queue, self._image_queue, self._context = args | |||
# 自带参数 | |||
self.ex = None | |||
self._logo = None | |||
if self._context["video"]["video_add_water"]: | |||
self._logo = self._msg.get("logo_url") | |||
if self._logo: | |||
self._logo = url2Array(self._logo, enable_ex=False) | |||
if not self._logo: | |||
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
def run(self): | |||
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue | |||
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo | |||
or_video_file, ai_video_file, push_p = None, None, None | |||
push_url = self._msg.get("push_url") | |||
try: | |||
logger.info("开始启动推流线程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
with ThreadPoolExecutor(max_workers=5) as tt: | |||
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
while True: | |||
push_r = push_queue.get() | |||
if push_r is not None: | |||
# [(1, 原视频帧, 分析视频帧)] | |||
# [(code, retResults[2])] | |||
# [(2, 操作指令)] | |||
if push_r[0] == 1: # 视频帧操作 | |||
frame_list, frame_index_list, all_frames = push_r[1] | |||
allowedList, rainbows, label_arrays, font_config = push_r[2] | |||
for i, frame in enumerate(frame_list): | |||
copy_frame = frame.copy() | |||
det_xywh = {} | |||
# 每帧可能存在多模型,多模型问题处理 | |||
thread_p = [] | |||
for det in push_r[3]: | |||
code, retResults = det | |||
det_xywh[code] = {} | |||
# 如果识别到了检测目标 | |||
if len(retResults[i]) > 0: | |||
for qs in retResults[i]: | |||
detect_targets_code = int(qs[6]) | |||
if detect_targets_code not in allowedList: | |||
logger.warning("当前检测目标不在检测目标中: {}, requestId: {}", detect_targets_code, request_id) | |||
continue | |||
score = qs[5] | |||
label_array = label_arrays[detect_targets_code] | |||
color = rainbows[detect_targets_code] | |||
if not isinstance(qs[1], (list, tuple, np.ndarray)): | |||
xc, yc, x2, y2 = int(qs[1]), int(qs[2]), int(qs[3]), int(qs[4]) | |||
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)] | |||
else: | |||
box = qs[1] | |||
# box, img, label_array, score=0.5, color=None, config=None | |||
dp = tt.submit(draw_painting_joint, box, copy_frame, label_array, score, | |||
color, font_config) | |||
thread_p.append(dp) | |||
cd = det_xywh[code].get(detect_targets_code) | |||
if cd is None: | |||
det_xywh[code][detect_targets_code] = [ | |||
[detect_targets_code, box, score, label_array, color]] | |||
else: | |||
det_xywh[code][detect_targets_code].append( | |||
[detect_targets_code, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
if len(thread_p) > 0: | |||
completed_results = wait(thread_p, timeout=60, return_when=ALL_COMPLETED) | |||
completed_futures = completed_results.done | |||
for r in completed_futures: | |||
if r.exception(): | |||
raise r.exception() | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写原视频到本地 | |||
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file, | |||
or_write_status, request_id) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file, | |||
ai_write_status, request_id) | |||
if len(det_xywh) > 0: | |||
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames, | |||
font_config))) | |||
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id) | |||
ai_video_file = write_ai_video_result.result() | |||
or_video_file = write_or_video_result.result() | |||
if push_r[0] == 2: | |||
if 'stop' == push_r[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
or_video_file, ai_video_file, push_p = None, None, None | |||
break | |||
except Exception as e: | |||
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id) | |||
self.ex = e | |||
finally: | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
logger.info("推流线程停止完成!requestId:{}", request_id) | |||
# class OffPushStreamThread(Thread): | |||
# __slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue') | |||
# | |||
# def __init__(self, *args): | |||
# super().__init__() | |||
# # 传参 | |||
# self._msg, self._push_queue, self._image_queue, self._context = args | |||
# # 自带参数 | |||
# self.ex = None | |||
# self._logo = None | |||
# if self._context["video"]["video_add_water"]: | |||
# self._logo = self._msg.get("logo_url") | |||
# if self._logo: | |||
# self._logo = url2Array(self._logo, enable_ex=False) | |||
# if not self._logo: | |||
# self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
# | |||
# def run(self): | |||
# request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue | |||
# aiFilePath, logo = self._context.get("aiFilePath"), self._logo | |||
# ai_video_file, push_p = None, None | |||
# push_url = self._msg.get("push_url") | |||
# try: | |||
# logger.info("开始启动推流线程!requestId:{}", request_id) | |||
# with ThreadPoolExecutor(max_workers=1) as t: | |||
# p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
# while True: | |||
# push_parm = push_queue.get() | |||
# if push_parm is not None: | |||
# # [(1, 原视频帧, 分析视频帧)] | |||
# # # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]] | |||
# # res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], [])) | |||
# # [(2, 操作指令)] | |||
# if push_parm[0] == 1: # 视频帧操作 | |||
# frame, current_frame, all_frames, ques_list = push_parm[1] | |||
# copy_frame = frame.copy() | |||
# det_xywh = {} | |||
# if len(ques_list) > 0: | |||
# for qs in ques_list: | |||
# det_xywh[qs[1]] = {} | |||
# detect_targets_code = int(qs[0][0]) | |||
# score = qs[0][-1] | |||
# label_array = qs[3][detect_targets_code] | |||
# color = qs[4][detect_targets_code] | |||
# if not isinstance(qs[0][1], (list, tuple, np.ndarray)): | |||
# xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4]) | |||
# box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)] | |||
# else: | |||
# box = qs[0][1] | |||
# draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop") | |||
# cd = det_xywh[qs[1]].get(detect_targets_code) | |||
# if cd is None: | |||
# det_xywh[qs[1]][detect_targets_code] = [ | |||
# [detect_targets_code, box, score, label_array, color]] | |||
# else: | |||
# det_xywh[qs[1]][detect_targets_code].append( | |||
# [detect_targets_code, box, score, label_array, color]) | |||
# if logo: | |||
# frame = add_water_pic(frame, logo, request_id) | |||
# copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
# frame_merge = video_conjuncing(frame, copy_frame) | |||
# # 写识别视频到本地 | |||
# write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file, | |||
# ai_write_status, request_id) | |||
# if len(det_xywh) > 0: | |||
# put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames))) | |||
# push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id) | |||
# ai_video_file = write_ai_video_result.result() | |||
# if push_parm[0] == 2: | |||
# if 'stop' == push_parm[1]: | |||
# logger.info("停止推流线程, requestId: {}", request_id) | |||
# close_all_p(push_p, None, ai_video_file, request_id) | |||
# ai_video_file, push_p = None, None | |||
# break | |||
# except Exception as e: | |||
# logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id) | |||
# self.ex = e | |||
# finally: | |||
# close_all_p(push_p, None, ai_video_file, request_id) | |||
# logger.info("推流线程停止完成!requestId:{}", request_id) |
@@ -0,0 +1,362 @@ | |||
# -*- coding: utf-8 -*- | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Process | |||
from os import getpid | |||
from os.path import join | |||
from time import time, sleep | |||
from traceback import format_exc | |||
import cv2 | |||
import psutil | |||
from loguru import logger | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import ImageUtils | |||
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p | |||
from util.ImageUtils import url2Array, add_water_pic | |||
from util.LogUtils import init_log | |||
from util.PlotsUtils import draw_painting_joint, xywh2xyxy2 | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue | |||
class PushStreamProcess(Process): | |||
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context") | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 传参 | |||
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args | |||
def build_logo_url(self): | |||
logo = None | |||
if self._context["video"]["video_add_water"]: | |||
logo = self._msg.get("logo_url") | |||
if logo: | |||
logo = url2Array(logo, enable_ex=False) | |||
if logo is None: | |||
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
self._context["logo"] = logo | |||
@staticmethod | |||
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list): | |||
code, det_result = det | |||
# 每个单独模型处理 | |||
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片 | |||
if len(det_result) > 0: | |||
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"] | |||
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] | |||
for qs in det_result: | |||
box, score, cls = xywh2xyxy2(qs) | |||
if cls not in allowedList or score < frame_score: | |||
continue | |||
label_array, color = label_arrays[cls], rainbows[cls] | |||
draw_painting_joint(box, copy_frame, label_array, score, color, font_config) | |||
if det_xywh.get(code) is None: | |||
det_xywh[code], code_list[code] = {}, {} | |||
cd = det_xywh[code].get(cls) | |||
if cd is None: | |||
code_list[code][cls] = 1 | |||
det_xywh[code][cls] = [[cls, box, score, label_array, color]] | |||
else: | |||
code_list[code][cls] += 1 | |||
det_xywh[code][cls].append([cls, box, score, label_array, color]) | |||
class OnPushStreamProcess(PushStreamProcess): | |||
__slots__ = () | |||
def run(self): | |||
self.build_logo_url() | |||
msg, context = self._msg, self._context | |||
base_dir, env, orFilePath, aiFilePath, logo, service_timeout, frame_score = context["base_dir"], \ | |||
context['env'], context["orFilePath"], context["aiFilePath"], context["logo"], \ | |||
int(context["service"]["timeout"]) + 120, context["service"]["filter"]["frame_score"] | |||
request_id, push_url = msg["request_id"], msg["push_url"] | |||
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \ | |||
self._hb_queue | |||
or_video_file, ai_video_file, push_p, ex = None, None, None, None | |||
ex_status = True | |||
high_score_image = {} | |||
# 相似度, 默认值0.65 | |||
similarity = context["service"]["filter"]["similarity"] | |||
# 图片相似度开关 | |||
picture_similarity = bool(context["service"]["filter"]["picture_similarity"]) | |||
frame_step = int(context["service"]["filter"]["frame_step"]) | |||
try: | |||
init_log(base_dir, env) | |||
logger.info("开始实时启动推流进程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
# 定义三种推流、写原视频流、写ai视频流策略 | |||
# 第一个参数时间, 第二个参数重试次数 | |||
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
start_time = time() | |||
while True: | |||
# 检测推流执行超时时间, 1.防止任务运行超时 2.主进程挂了,子进程运行超时 | |||
if time() - start_time > service_timeout: | |||
logger.error("推流超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己 | |||
if psutil.Process(getpid()).ppid() == 1: | |||
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id) | |||
ex_status = False | |||
for q in [push_queue, image_queue, push_ex_queue, hb_queue]: | |||
clear_queue(q) | |||
break | |||
# 获取推流的视频帧 | |||
push_r = get_no_block_queue(push_queue) | |||
if push_r is not None: | |||
if push_r[0] == 1: | |||
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] | |||
for i, frame in enumerate(frame_list): | |||
# 复制帧用来画图 | |||
copy_frame = frame.copy() | |||
det_xywh, thread_p = {}, [] | |||
for det in push_objs[i]: | |||
code, det_result = det | |||
# 每个单独模型处理 | |||
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片 | |||
if len(det_result) > 0: | |||
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"] | |||
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] | |||
for qs in det_result: | |||
box, score, cls = xywh2xyxy2(qs) | |||
if cls not in allowedList or score < frame_score: | |||
continue | |||
label_array, color = label_arrays[cls], rainbows[cls] | |||
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config) | |||
thread_p.append(rr) | |||
if det_xywh.get(code) is None: | |||
det_xywh[code] = {} | |||
cd = det_xywh[code].get(cls) | |||
if cd is None: | |||
det_xywh[code][cls] = [[cls, box, score, label_array, color]] | |||
else: | |||
det_xywh[code][cls].append([cls, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
if len(thread_p) > 0: | |||
for r in thread_p: | |||
r.result() | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写原视频到本地 | |||
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file, | |||
or_write_status, request_id) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, | |||
ai_video_file, ai_write_status, request_id) | |||
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url, | |||
p_push_status, request_id) | |||
# 如果有问题, 走下面的逻辑 | |||
if len(det_xywh) > 0: | |||
flag = True | |||
if picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity_1 = 1 - dist * 1.0 / 64 | |||
if similarity_1 >= similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_index_list[i] - high_score_image.get("current_frame") | |||
if diff_frame_num < frame_step: | |||
flag = False | |||
if flag: | |||
high_score_image["or_frame"] = frame | |||
high_score_image["current_frame"] = frame_index_list[i] | |||
put_queue(image_queue, (1, [det_xywh, frame, frame_index_list[i], all_frames, draw_config["font_config"]])) | |||
push_p = push_stream_result.result(timeout=60) | |||
ai_video_file = write_ai_video_result.result(timeout=60) | |||
or_video_file = write_or_video_result.result(timeout=60) | |||
# 接收停止指令 | |||
if push_r[0] == 2: | |||
if 'stop' == push_r[1]: | |||
logger.info("停止推流进程, requestId: {}", request_id) | |||
break | |||
if 'stop_ex' == push_r[1]: | |||
ex_status = False | |||
logger.info("停止推流进程, requestId: {}", request_id) | |||
break | |||
del push_r | |||
else: | |||
sleep(1) | |||
except ServiceException as s: | |||
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
# 关闭推流管, 原视频写对象, 分析视频写对象 | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
if ex: | |||
code, msg = ex | |||
put_queue(push_ex_queue, (1, code, msg), timeout=2) | |||
else: | |||
if ex_status: | |||
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片 | |||
c_time = time() | |||
while time() - c_time < 60: | |||
if image_queue.qsize() == 0 or image_queue.empty(): | |||
break | |||
sleep(2) | |||
for q in [push_queue, image_queue, hb_queue]: | |||
clear_queue(q) | |||
logger.info("推流进程停止完成!图片队列大小: {}, requestId:{}", image_queue.qsize(), request_id) | |||
class OffPushStreamProcess(PushStreamProcess): | |||
__slots__ = () | |||
def run(self): | |||
self.build_logo_url() | |||
msg, context = self._msg, self._context | |||
request_id = msg["request_id"] | |||
base_dir, env = context["base_dir"], context['env'] | |||
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \ | |||
self._hb_queue | |||
aiFilePath, logo = context["aiFilePath"], context["logo"] | |||
ai_video_file, push_p, push_url = None, None, msg["push_url"] | |||
service_timeout = int(context["service"]["timeout"]) + 120 | |||
frame_score = context["service"]["filter"]["frame_score"] | |||
ex = None | |||
ex_status = True | |||
high_score_image = {} | |||
# 相似度, 默认值0.65 | |||
similarity = context["service"]["filter"]["similarity"] | |||
# 图片相似度开关 | |||
picture_similarity = bool(context["service"]["filter"]["picture_similarity"]) | |||
frame_step = int(context["service"]["filter"]["frame_step"]) | |||
try: | |||
init_log(base_dir, env) | |||
logger.info("开始启动离线推流进程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
# 定义三种推流、写原视频流、写ai视频流策略 | |||
# 第一个参数时间, 第二个参数重试次数 | |||
p_push_status, ai_write_status = [0, 0], [0, 0] | |||
start_time = time() | |||
while True: | |||
# 检测推流执行超时时间 | |||
if time() - start_time > service_timeout: | |||
logger.error("离线推流超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], | |||
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) | |||
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己 | |||
if psutil.Process(getpid()).ppid() == 1: | |||
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id) | |||
ex_status = False | |||
for q in [push_queue, image_queue, push_ex_queue, hb_queue]: | |||
clear_queue(q) | |||
break | |||
# 获取推流的视频帧 | |||
push_r = get_no_block_queue(push_queue) | |||
if push_r is not None: | |||
# [(1, ...] 视频帧操作 | |||
# [(2, 操作指令)] 指令操作 | |||
if push_r[0] == 1: | |||
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] | |||
# 处理每一帧图片 | |||
for i, frame in enumerate(frame_list): | |||
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames: | |||
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames)) | |||
put_queue(hb_queue, {"hb_value": task_process}, timeout=2) | |||
# 复制帧用来画图 | |||
copy_frame = frame.copy() | |||
# 所有问题记录字典 | |||
det_xywh, thread_p = {}, [] | |||
for det in push_objs[i]: | |||
code, det_result = det | |||
# 每个单独模型处理 | |||
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片 | |||
if len(det_result) > 0: | |||
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"] | |||
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] | |||
for qs in det_result: | |||
box, score, cls = xywh2xyxy2(qs) | |||
if cls not in allowedList or score < frame_score: | |||
continue | |||
label_array, color = label_arrays[cls], rainbows[cls] | |||
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config) | |||
thread_p.append(rr) | |||
if det_xywh.get(code) is None: | |||
det_xywh[code] = {} | |||
cd = det_xywh[code].get(cls) | |||
if cd is None: | |||
det_xywh[code][cls] = [[cls, box, score, label_array, color]] | |||
else: | |||
det_xywh[code][cls].append([cls, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
if len(thread_p) > 0: | |||
for r in thread_p: | |||
r.result() | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, | |||
ai_video_file, | |||
ai_write_status, request_id) | |||
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url, | |||
p_push_status, request_id) | |||
if len(det_xywh) > 0: | |||
flag = True | |||
if picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity_1 = 1 - dist * 1.0 / 64 | |||
if similarity_1 >= similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_index_list[i] - high_score_image.get("current_frame") | |||
if diff_frame_num < frame_step: | |||
flag = False | |||
if flag: | |||
high_score_image["or_frame"] = frame | |||
high_score_image["current_frame"] = frame_index_list[i] | |||
put_queue(image_queue, (1, [det_xywh, frame, frame_index_list[i], all_frames, draw_config["font_config"]])) | |||
push_p = push_stream_result.result(timeout=60) | |||
ai_video_file = write_ai_video_result.result(timeout=60) | |||
# 接收停止指令 | |||
if push_r[0] == 2: | |||
if 'stop' == push_r[1]: | |||
logger.info("停止推流进程, requestId: {}", request_id) | |||
break | |||
if 'stop_ex' == push_r[1]: | |||
logger.info("停止推流进程, requestId: {}", request_id) | |||
ex_status = False | |||
break | |||
del push_r | |||
else: | |||
sleep(1) | |||
except ServiceException as s: | |||
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
# 关闭推流管, 分析视频写对象 | |||
close_all_p(push_p, None, ai_video_file, request_id) | |||
if ex: | |||
code, msg = ex | |||
put_queue(push_ex_queue, (1, code, msg), timeout=2) | |||
else: | |||
if ex_status: | |||
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片 | |||
c_time = time() | |||
while time() - c_time < 60: | |||
if image_queue.qsize() == 0 or image_queue.empty(): | |||
break | |||
sleep(2) | |||
for q in [push_queue, image_queue, hb_queue]: | |||
clear_queue(q) | |||
logger.info("推流进程停止完成!requestId:{}", request_id) |
@@ -0,0 +1,342 @@ | |||
# -*- coding: utf-8 -*- | |||
from concurrent.futures import ThreadPoolExecutor | |||
from multiprocessing import Process | |||
from os import getpid | |||
from os.path import join | |||
from time import time, sleep | |||
from traceback import format_exc | |||
import cv2 | |||
import psutil | |||
from loguru import logger | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util import ImageUtils | |||
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p | |||
from util.ImageUtils import url2Array, add_water_pic | |||
from util.LogUtils import init_log | |||
from util.PlotsUtils import draw_painting_joint, xywh2xyxy2 | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue | |||
class PushStreamProcess2(Process): | |||
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context") | |||
def __init__(self, *args): | |||
super().__init__() | |||
# 传参 | |||
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args | |||
def build_logo_url(self): | |||
logo = None | |||
if self._context["video"]["video_add_water"]: | |||
logo = self._msg.get("logo_url") | |||
if logo: | |||
logo = url2Array(logo, enable_ex=False) | |||
if logo is None: | |||
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1) | |||
self._context["logo"] = logo | |||
class OnPushStreamProcess2(PushStreamProcess2): | |||
__slots__ = () | |||
def run(self): | |||
msg, context = self._msg, self._context | |||
self.build_logo_url() | |||
request_id = msg["request_id"] | |||
base_dir, env = context["base_dir"], context['env'] | |||
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \ | |||
self._hb_queue | |||
orFilePath, aiFilePath, logo = context["orFilePath"], context["aiFilePath"], context["logo"] | |||
or_video_file, ai_video_file, push_p, push_url = None, None, None, msg["push_url"] | |||
service_timeout = int(context["service"]["timeout"]) + 120 | |||
frame_score = context["service"]["filter"]["frame_score"] | |||
ex = None | |||
ex_status = True | |||
high_score_image = {} | |||
# 相似度, 默认值0.65 | |||
similarity = context["service"]["filter"]["similarity"] | |||
# 图片相似度开关 | |||
picture_similarity = bool(context["service"]["filter"]["picture_similarity"]) | |||
frame_step = int(context["service"]["filter"]["frame_step"]) | |||
try: | |||
init_log(base_dir, env) | |||
logger.info("开始启动推流进程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=3) as t: | |||
# 定义三种推流、写原视频流、写ai视频流策略 | |||
# 第一个参数时间, 第二个参数重试次数 | |||
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0] | |||
start_time = time() | |||
while True: | |||
# 检测推流执行超时时间 | |||
if time() - start_time > service_timeout: | |||
logger.error("推流超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1]) | |||
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己 | |||
if psutil.Process(getpid()).ppid() == 1: | |||
ex_status = False | |||
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id) | |||
for q in [push_queue, image_queue, push_ex_queue, hb_queue]: | |||
clear_queue(q) | |||
break | |||
# 获取推流的视频帧 | |||
push_r = get_no_block_queue(push_queue) | |||
if push_r is not None: | |||
# [(1, ...] 视频帧操作 | |||
# [(2, 操作指令)] 指令操作 | |||
if push_r[0] == 1: | |||
# 如果是多模型push_objs数组可能包含[模型1识别数组, 模型2识别数组, 模型3识别数组] | |||
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] | |||
# 处理每一帧图片 | |||
for i, frame in enumerate(frame_list): | |||
# 复制帧用来画图 | |||
copy_frame = frame.copy() | |||
# 所有问题记录字典 | |||
det_xywh, thread_p = {}, [] | |||
# [模型1识别数组, 模型2识别数组, 模型3识别数组] | |||
for s_det_list in push_objs: | |||
code, det_result = s_det_list[0], s_det_list[1][i] | |||
if len(det_result) > 0: | |||
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"] | |||
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] | |||
for qs in det_result: | |||
box, score, cls = xywh2xyxy2(qs) | |||
if cls not in allowedList or score < frame_score: | |||
continue | |||
label_array, color = label_arrays[cls], rainbows[cls] | |||
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config) | |||
thread_p.append(rr) | |||
if det_xywh.get(code) is None: | |||
det_xywh[code] = {} | |||
cd = det_xywh[code].get(cls) | |||
if cd is None: | |||
det_xywh[code][cls] = [[cls, box, score, label_array, color]] | |||
else: | |||
det_xywh[code][cls].append([cls, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
if len(thread_p) > 0: | |||
for r in thread_p: | |||
r.result() | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写原视频到本地 | |||
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file, | |||
or_write_status, request_id) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, | |||
ai_video_file, ai_write_status, request_id) | |||
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url, | |||
p_push_status, | |||
request_id) | |||
if len(det_xywh) > 0: | |||
flag = True | |||
if picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity_1 = 1 - dist * 1.0 / 64 | |||
if similarity_1 >= similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_index_list[i] - high_score_image.get("current_frame") | |||
if diff_frame_num < frame_step: | |||
flag = False | |||
if flag: | |||
high_score_image["or_frame"] = frame | |||
high_score_image["current_frame"] = frame_index_list[i] | |||
put_queue(image_queue, (1, [det_xywh, frame, frame_index_list[i], all_frames, draw_config["font_config"]])) | |||
push_p = push_p_result.result(timeout=60) | |||
ai_video_file = write_ai_video_result.result(timeout=60) | |||
or_video_file = write_or_video_result.result(timeout=60) | |||
# 接收停止指令 | |||
if push_r[0] == 2: | |||
if 'stop' == push_r[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
break | |||
if 'stop_ex' == push_r[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
ex_status = False | |||
break | |||
del push_r | |||
else: | |||
sleep(1) | |||
except ServiceException as s: | |||
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
# 关闭推流管, 原视频写对象, 分析视频写对象 | |||
close_all_p(push_p, or_video_file, ai_video_file, request_id) | |||
if ex: | |||
code, msg = ex | |||
put_queue(push_ex_queue, (1, code, msg), timeout=2) | |||
else: | |||
if ex_status: | |||
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片 | |||
c_time = time() | |||
while time() - c_time < 60: | |||
if image_queue.qsize() == 0 or image_queue.empty(): | |||
break | |||
sleep(2) | |||
for q in [push_queue, image_queue, hb_queue]: | |||
clear_queue(q) | |||
logger.info("推流进程停止完成!requestId:{}", request_id) | |||
class OffPushStreamProcess2(PushStreamProcess2): | |||
__slots__ = () | |||
def run(self): | |||
self.build_logo_url() | |||
msg, context = self._msg, self._context | |||
request_id = msg["request_id"] | |||
base_dir, env = context["base_dir"], context['env'] | |||
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \ | |||
self._hb_queue | |||
aiFilePath, logo = context["aiFilePath"], context["logo"] | |||
ai_video_file, push_p, push_url = None, None, msg["push_url"] | |||
service_timeout = int(context["service"]["timeout"]) + 120 | |||
frame_score = context["service"]["filter"]["frame_score"] | |||
ex = None | |||
ex_status = True | |||
high_score_image = {} | |||
# 相似度, 默认值0.65 | |||
similarity = context["service"]["filter"]["similarity"] | |||
# 图片相似度开关 | |||
picture_similarity = bool(context["service"]["filter"]["picture_similarity"]) | |||
frame_step = int(context["service"]["filter"]["frame_step"]) | |||
try: | |||
init_log(base_dir, env) | |||
logger.info("开始启动离线推流进程!requestId:{}", request_id) | |||
with ThreadPoolExecutor(max_workers=2) as t: | |||
# 定义三种推流、写原视频流、写ai视频流策略 | |||
# 第一个参数时间, 第二个参数重试次数 | |||
p_push_status, ai_write_status = [0, 0], [0, 0] | |||
start_time = time() | |||
while True: | |||
# 检测推流执行超时时间 | |||
if time() - start_time > service_timeout: | |||
logger.error("离线推流超时, requestId: {}", request_id) | |||
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0], | |||
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1]) | |||
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己 | |||
if psutil.Process(getpid()).ppid() == 1: | |||
ex_status = False | |||
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id) | |||
for q in [push_queue, image_queue, push_ex_queue, hb_queue]: | |||
clear_queue(q) | |||
break | |||
# 获取推流的视频帧 | |||
push_r = get_no_block_queue(push_queue) | |||
if push_r is not None: | |||
# [(1, ...] 视频帧操作 | |||
# [(2, 操作指令)] 指令操作 | |||
if push_r[0] == 1: | |||
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] | |||
# 处理每一帧图片 | |||
for i, frame in enumerate(frame_list): | |||
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames: | |||
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames)) | |||
put_queue(hb_queue, {"hb_value": task_process}, timeout=2) | |||
# 复制帧用来画图 | |||
copy_frame = frame.copy() | |||
# 所有问题记录字典 | |||
det_xywh, thread_p = {}, [] | |||
for s_det_list in push_objs: | |||
code, det_result = s_det_list[0], s_det_list[1][i] | |||
if len(det_result) > 0: | |||
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"] | |||
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] | |||
for qs in det_result: | |||
box, score, cls = xywh2xyxy2(qs) | |||
if cls not in allowedList or score < frame_score: | |||
continue | |||
label_array, color = label_arrays[cls], rainbows[cls] | |||
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config) | |||
thread_p.append(rr) | |||
if det_xywh.get(code) is None: | |||
det_xywh[code] = {} | |||
cd = det_xywh[code].get(cls) | |||
if cd is None: | |||
det_xywh[code][cls] = [[cls, box, score, label_array, color]] | |||
else: | |||
det_xywh[code][cls].append([cls, box, score, label_array, color]) | |||
if logo: | |||
frame = add_water_pic(frame, logo, request_id) | |||
copy_frame = add_water_pic(copy_frame, logo, request_id) | |||
if len(thread_p) > 0: | |||
for r in thread_p: | |||
r.result() | |||
frame_merge = video_conjuncing(frame, copy_frame) | |||
# 写识别视频到本地 | |||
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, | |||
ai_video_file, | |||
ai_write_status, request_id) | |||
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url, | |||
p_push_status, | |||
request_id) | |||
if len(det_xywh) > 0: | |||
flag = True | |||
if picture_similarity and len(high_score_image) > 0: | |||
hash1 = ImageUtils.dHash(high_score_image.get("or_frame")) | |||
hash2 = ImageUtils.dHash(frame) | |||
dist = ImageUtils.Hamming_distance(hash1, hash2) | |||
similarity_1 = 1 - dist * 1.0 / 64 | |||
if similarity_1 >= similarity: | |||
flag = False | |||
if len(high_score_image) > 0: | |||
diff_frame_num = frame_index_list[i] - high_score_image.get("current_frame") | |||
if diff_frame_num < frame_step: | |||
flag = False | |||
if flag: | |||
high_score_image["or_frame"] = frame | |||
high_score_image["current_frame"] = frame_index_list[i] | |||
put_queue(image_queue, (1, [det_xywh, frame, frame_index_list[i], all_frames, draw_config["font_config"]])) | |||
push_p = push_p_result.result(timeout=60) | |||
ai_video_file = write_ai_video_result.result(timeout=60) | |||
# 接收停止指令 | |||
if push_r[0] == 2: | |||
if 'stop' == push_r[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
break | |||
if 'stop_ex' == push_r[1]: | |||
logger.info("停止推流线程, requestId: {}", request_id) | |||
ex_status = False | |||
break | |||
del push_r | |||
else: | |||
sleep(1) | |||
except ServiceException as s: | |||
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id) | |||
ex = s.code, s.msg | |||
except Exception: | |||
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id) | |||
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1] | |||
finally: | |||
# 关闭推流管, 分析视频写对象 | |||
close_all_p(push_p, None, ai_video_file, request_id) | |||
if ex: | |||
code, msg = ex | |||
put_queue(push_ex_queue, (1, code, msg), timeout=2) | |||
else: | |||
if ex_status: | |||
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片 | |||
c_time = time() | |||
while time() - c_time < 60: | |||
if image_queue.qsize() == 0 or image_queue.empty(): | |||
break | |||
sleep(2) | |||
for q in [push_queue, image_queue, hb_queue]: | |||
clear_queue(q) | |||
logger.info("推流进程停止完成!requestId:{}", request_id) |
@@ -1,51 +1,50 @@ | |||
# -*- coding: utf-8 -*- | |||
from threading import Thread | |||
import time | |||
from traceback import format_exc | |||
from loguru import logger | |||
from entity.FeedBack import recording_feedback | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
from util.QueUtil import get_no_block_queue, put_queue, clear_queue | |||
class RecordingHeartbeat(Thread): | |||
def __init__(self, fbQueue, hbQueue, request_id): | |||
super().__init__() | |||
self.fbQueue = fbQueue | |||
self.hbQueue = hbQueue | |||
self.request_id = request_id | |||
def getHbQueue(self): | |||
eBody = None | |||
try: | |||
eBody = self.hbQueue.get(block=False) | |||
except Exception as e: | |||
pass | |||
return eBody | |||
__slots__ = ('_fb_queue', '_hb_queue', '_request_id') | |||
# 推送执行结果 | |||
def sendResult(self, result): | |||
self.fbQueue.put(result) | |||
def sendHbQueue(self, result): | |||
self.hbQueue.put(result) | |||
def sendhbMessage(self, statusl): | |||
self.sendResult({"recording": recording_feedback(self.request_id, statusl)}) | |||
def __init__(self, fb_queue, hb_queue, request_id): | |||
super().__init__() | |||
self._fb_queue = fb_queue | |||
self._hb_queue = hb_queue | |||
self._request_id = request_id | |||
def run(self): | |||
logger.info("开始启动录屏心跳线程!requestId:{}", self.request_id) | |||
hb_init_num = 0 | |||
while True: | |||
try: | |||
request_id = self._request_id | |||
hb_queue, fb_queue = self._hb_queue, self._fb_queue | |||
logger.info("开始启动录屏心跳线程!requestId:{}", request_id) | |||
hb_init_num, progress = 0, '0.0000' | |||
status = RecordingStatus.RECORDING_WAITING.value[0] | |||
try: | |||
while True: | |||
time.sleep(3) | |||
hb_msg = self.getHbQueue() | |||
hb_msg = get_no_block_queue(hb_queue) | |||
if hb_msg is not None and len(hb_msg) > 0: | |||
command = hb_msg.get("command") | |||
if 'stop' == command: | |||
logger.info("开始终止心跳线程, requestId:{}", self.request_id) | |||
command_que = hb_msg.get("command") | |||
progress_que = hb_msg.get("progress") | |||
status_que = hb_msg.get("status") | |||
if progress_que is not None: | |||
progress = progress_que | |||
if status_que is not None: | |||
status = status_que | |||
if 'stop' == command_que: | |||
logger.info("开始终止心跳线程, requestId:{}", request_id) | |||
break | |||
if hb_init_num % 30 == 0: | |||
self.sendhbMessage(RecordingStatus.RECORDING_RUNNING.value[0]) | |||
put_queue(fb_queue, recording_feedback(request_id, status, progress=progress), timeout=5, is_ex=True) | |||
hb_init_num += 3 | |||
except Exception as e: | |||
logger.exception("心跳线程异常:{}, requestId:{}", e, self.request_id) | |||
logger.info("心跳线程停止完成!requestId:{}", self.request_id) | |||
except Exception: | |||
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id) | |||
finally: | |||
clear_queue(hb_queue) | |||
logger.info("心跳线程停止完成!requestId:{}", request_id) |
@@ -0,0 +1,10 @@ | |||
access_key: "LTAI5tSJ62TLMUb4SZuf285A" | |||
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7" | |||
oss: | |||
endpoint: "http://oss-cn-shanghai.aliyuncs.com" | |||
bucket: "ta-tech-image" | |||
connect_timeout: 30 | |||
vod: | |||
host_address: "https://vod.play.t-aaron.com/" | |||
ecsRegionId: "cn-shanghai" | |||
cateId: 1000468341 |
@@ -0,0 +1,10 @@ | |||
access_key: "LTAI5tSJ62TLMUb4SZuf285A" | |||
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7" | |||
oss: | |||
endpoint: "http://oss-cn-shanghai.aliyuncs.com" | |||
bucket: "ta-tech-image" | |||
connect_timeout: 30 | |||
vod: | |||
host_address: "https://vod.play.t-aaron.com/" | |||
ecsRegionId: "cn-shanghai" | |||
cateId: 1000468340 |
@@ -0,0 +1,11 @@ | |||
access_key: "LTAI5tSJ62TLMUb4SZuf285A" | |||
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7" | |||
oss: | |||
endpoint: "http://oss-cn-shanghai.aliyuncs.com" | |||
bucket: "ta-tech-image" | |||
connect_timeout: 30 | |||
vod: | |||
host_address: "https://vod.play.t-aaron.com/" | |||
ecsRegionId: "cn-shanghai" | |||
cateId: 1000468338 | |||
@@ -0,0 +1,12 @@ | |||
orc: | |||
APP_ID: 28173504 | |||
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi" | |||
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf" | |||
vehicle: | |||
APP_ID: 31096670 | |||
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7" | |||
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa" | |||
person: | |||
APP_ID: 31096755 | |||
API_KEY: "CiWrt4iyxOly36n3kR7utiAG" | |||
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v" |
@@ -0,0 +1,12 @@ | |||
orc: | |||
APP_ID: 28173504 | |||
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi" | |||
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf" | |||
vehicle: | |||
APP_ID: 31096670 | |||
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7" | |||
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa" | |||
person: | |||
APP_ID: 31096755 | |||
API_KEY: "CiWrt4iyxOly36n3kR7utiAG" | |||
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v" |
@@ -0,0 +1,12 @@ | |||
orc: | |||
APP_ID: 28173504 | |||
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi" | |||
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf" | |||
vehicle: | |||
APP_ID: 31096670 | |||
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7" | |||
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa" | |||
person: | |||
APP_ID: 31096755 | |||
API_KEY: "CiWrt4iyxOly36n3kR7utiAG" | |||
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v" |
@@ -1,22 +0,0 @@ | |||
{ | |||
"access_key": "LTAI5tSJ62TLMUb4SZuf285A", | |||
"access_secret": "MWYynm30filZ7x0HqSHlU3pdLVNeI7", | |||
"oss": { | |||
"endpoint": "http://oss-cn-shanghai.aliyuncs.com", | |||
"bucket": "ta-tech-image", | |||
"connect_timeout": 30 | |||
}, | |||
"vod": { | |||
"host_address": "https://vod.play.t-aaron.com/", | |||
"ecsRegionId": "cn-shanghai", | |||
"dev": { | |||
"CateId": 1000468341 | |||
}, | |||
"test": { | |||
"CateId": 1000468338 | |||
}, | |||
"prod": { | |||
"CateId": 1000468340 | |||
} | |||
} | |||
} |
@@ -1,116 +0,0 @@ | |||
{ | |||
"dsp": { | |||
"active": "dev" | |||
}, | |||
"kafka": { | |||
"topic": { | |||
"dsp-alg-online-tasks-topic": "dsp-alg-online-tasks", | |||
"dsp-alg-offline-tasks-topic": "dsp-alg-offline-tasks", | |||
"dsp-alg-image-tasks-topic": "dsp-alg-image-tasks", | |||
"dsp-alg-results-topic": "dsp-alg-task-results", | |||
"dsp-recording-task-topic": "dsp-recording-task", | |||
"dsp-recording-result-topic": "dsp-recording-result" | |||
}, | |||
"dev": { | |||
"bootstrap_servers": ["192.168.11.13:9092"], | |||
"dsp-alg-online-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-offline-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-task-results": { | |||
"partition": [0] | |||
}, | |||
"producer": { | |||
"acks": -1, | |||
"retries": 3, | |||
"linger_ms": 50, | |||
"retry_backoff_ms": 1000, | |||
"max_in_flight_requests_per_connection": 5 | |||
}, | |||
"consumer": { | |||
"client_id": "dsp_ai_server", | |||
"group_id": "dsp-ai-dev", | |||
"auto_offset_reset": "latest", | |||
"enable_auto_commit": 0, | |||
"max_poll_records": 1 | |||
} | |||
}, | |||
"test": { | |||
"bootstrap_servers": ["106.14.96.218:19092"], | |||
"dsp-alg-online-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-offline-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-task-results": { | |||
"partition": [0] | |||
}, | |||
"producer": { | |||
"acks": -1, | |||
"retries": 3, | |||
"linger_ms": 50, | |||
"retry_backoff_ms": 1000, | |||
"max_in_flight_requests_per_connection": 5 | |||
}, | |||
"consumer": { | |||
"client_id": "dsp_ai_server", | |||
"group_id": "dsp-ai-test", | |||
"auto_offset_reset": "latest", | |||
"enable_auto_commit": 0, | |||
"max_poll_records": 1 | |||
} | |||
}, | |||
"prod": { | |||
"bootstrap_servers": ["101.132.127.1:19094"], | |||
"dsp-alg-online-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-offline-tasks": { | |||
"partition": [0] | |||
}, | |||
"dsp-alg-task-results": { | |||
"partition": [0] | |||
}, | |||
"producer": { | |||
"acks": -1, | |||
"retries": 3, | |||
"linger_ms": 50, | |||
"retry_backoff_ms": 1000, | |||
"max_in_flight_requests_per_connection": 5 | |||
}, | |||
"consumer": { | |||
"client_id": "dsp_ai_server", | |||
"group_id": "dsp-ai-prod", | |||
"auto_offset_reset": "latest", | |||
"enable_auto_commit": 0, | |||
"max_poll_records": 1 | |||
} | |||
} | |||
}, | |||
"video": { | |||
"file_path": "../dsp/video/", | |||
"video_add_water": 0 | |||
}, | |||
"service": { | |||
"frame_score": 0.4, | |||
"filter": { | |||
"picture_similarity": 1, | |||
"similarity": 0.65, | |||
"frame_step": 160 | |||
}, | |||
"timeout": 21600, | |||
"cv2_pull_stream_timeout": 1000, | |||
"cv2_read_stream_timeout": 1000, | |||
"recording_pull_stream_timeout": 600 | |||
}, | |||
"model": { | |||
"limit": 3 | |||
}, | |||
"task": { | |||
"limit": 5 | |||
} | |||
} | |||
@@ -1,17 +0,0 @@ | |||
{ | |||
"orc": { | |||
"APP_ID": 28173504, | |||
"API_KEY": "kqrFE7VuygIaFer7z6cRxzoi", | |||
"SECRET_KEY": "yp7xBokyl4TItyGhay7skAN1cMwfvEXf" | |||
}, | |||
"vehicle": { | |||
"APP_ID": 31096670, | |||
"API_KEY": "Dam3O4tgPRN3qh4OYE82dbg7", | |||
"SECRET_KEY": "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa" | |||
}, | |||
"person": { | |||
"APP_ID": 31096755, | |||
"API_KEY": "CiWrt4iyxOly36n3kR7utiAG", | |||
"SECRET_KEY": "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v" | |||
} | |||
} |
@@ -1,12 +0,0 @@ | |||
{ | |||
"enable_file_log": 1, | |||
"enable_stderr": 1, | |||
"base_path": "../dsp/logs", | |||
"log_name": "dsp.log", | |||
"log_fmt": "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}", | |||
"level": "INFO", | |||
"rotation": "00:00", | |||
"retention": "7 days", | |||
"encoding": "utf8" | |||
} | |||
@@ -0,0 +1,25 @@ | |||
bootstrap_servers: ["192.168.11.13:9092"] | |||
topic: | |||
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks" | |||
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks" | |||
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks" | |||
dsp-alg-results-topic: "dsp-alg-task-results" | |||
dsp-recording-task-topic: "dsp-recording-task" | |||
dsp-recording-result-topic: "dsp-recording-result" | |||
dsp-push-stream-task-topic: "dsp-push-stream-task" | |||
dsp-push-stream-result-topic: "dsp-push-stream-result" | |||
producer: | |||
acks: -1 | |||
retries: 3 | |||
linger_ms: 50 | |||
retry_backoff_ms: 1000 | |||
max_in_flight_requests_per_connection: 5 | |||
consumer: | |||
client_id: "dsp_ai_server" | |||
group_id: "dsp-ai-dev" | |||
auto_offset_reset: "latest" | |||
enable_auto_commit: false | |||
max_poll_records: 1 | |||
@@ -0,0 +1,22 @@ | |||
bootstrap_servers: ["101.132.127.1:19094"] | |||
topic: | |||
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks" | |||
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks" | |||
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks" | |||
dsp-alg-results-topic: "dsp-alg-task-results" | |||
dsp-recording-task-topic: "dsp-recording-task" | |||
dsp-recording-result-topic: "dsp-recording-result" | |||
dsp-push-stream-task-topic: "dsp-push-stream-task" | |||
dsp-push-stream-result-topic: "dsp-push-stream-result" | |||
producer: | |||
acks: -1 | |||
retries: 3 | |||
linger_ms: 50 | |||
retry_backoff_ms: 1000 | |||
max_in_flight_requests_per_connection: 5 | |||
consumer: | |||
client_id: "dsp_ai_server" | |||
group_id: "dsp-ai-prod" | |||
auto_offset_reset: "latest" | |||
enable_auto_commit: false | |||
max_poll_records: 1 |
@@ -0,0 +1,24 @@ | |||
bootstrap_servers: ["106.14.96.218:19092"] | |||
topic: | |||
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks" | |||
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks" | |||
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks" | |||
dsp-alg-results-topic: "dsp-alg-task-results" | |||
dsp-recording-task-topic: "dsp-recording-task" | |||
dsp-recording-result-topic: "dsp-recording-result" | |||
dsp-push-stream-task-topic: "dsp-push-stream-task" | |||
dsp-push-stream-result-topic: "dsp-push-stream-result" | |||
producer: | |||
acks: -1 | |||
retries: 3 | |||
linger_ms: 50 | |||
retry_backoff_ms: 1000 | |||
max_in_flight_requests_per_connection: 5 | |||
consumer: | |||
client_id: "dsp_ai_server" | |||
group_id: "dsp-ai-test" | |||
auto_offset_reset: "latest" | |||
enable_auto_commit: false | |||
max_poll_records: 1 | |||
@@ -0,0 +1,10 @@ | |||
enable_file_log: true | |||
enable_stderr: true | |||
base_path: "../dsp/logs" | |||
log_name: "dsp.log" | |||
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}" | |||
level: "INFO" | |||
rotation: "00:00" | |||
retention: "1 days" | |||
encoding: "utf8" | |||
@@ -0,0 +1,10 @@ | |||
enable_file_log: true | |||
enable_stderr: false | |||
base_path: "../dsp/logs" | |||
log_name: "dsp.log" | |||
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}" | |||
level: "INFO" | |||
rotation: "00:00" | |||
retention: "7 days" | |||
encoding: "utf8" | |||
@@ -0,0 +1,10 @@ | |||
enable_file_log: true | |||
enable_stderr: false | |||
base_path: "../dsp/logs" | |||
log_name: "dsp.log" | |||
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}" | |||
level: "INFO" | |||
rotation: "00:00" | |||
retention: "3 days" | |||
encoding: "utf8" | |||
@@ -0,0 +1,30 @@ | |||
video: | |||
# 视频本地保存地址 | |||
file_path: "../dsp/video/" | |||
# 是否添加水印 | |||
video_add_water: false | |||
service: | |||
filter: | |||
# 图片得分多少分以上返回图片 | |||
frame_score: 0.4 | |||
# 图片相似度过滤 | |||
picture_similarity: true | |||
similarity: 0.65 | |||
frame_step: 160 | |||
timeout: 21600 | |||
cv2_pull_stream_timeout: 1000 | |||
cv2_read_stream_timeout: 1000 | |||
recording_pull_stream_timeout: 600 | |||
model: | |||
# 使用哪种识别方式 | |||
# 1 普通方式 | |||
# 2 模型追踪 | |||
model_type: 1 | |||
limit: 3 | |||
task: | |||
# 任务限制5个 | |||
limit: 5 | |||
image: | |||
limit: 20 | |||
@@ -0,0 +1,29 @@ | |||
video: | |||
# 视频本地保存地址 | |||
file_path: "../dsp/video/" | |||
# 是否添加水印 | |||
video_add_water: false | |||
service: | |||
filter: | |||
# 图片得分多少分以上返回图片 | |||
frame_score: 0.4 | |||
# 图片相似度过滤 | |||
picture_similarity: true | |||
similarity: 0.65 | |||
frame_step: 160 | |||
timeout: 21600 | |||
cv2_pull_stream_timeout: 1000 | |||
cv2_read_stream_timeout: 1000 | |||
recording_pull_stream_timeout: 600 | |||
model: | |||
# 使用哪种识别方式 | |||
# 1 普通方式 | |||
# 2 模型追踪 | |||
model_type: 1 | |||
limit: 3 | |||
task: | |||
# 任务限制5个 | |||
limit: 5 | |||
image: | |||
limit: 20 | |||
@@ -0,0 +1,29 @@ | |||
video: | |||
# 视频本地保存地址 | |||
file_path: "../dsp/video/" | |||
# 是否添加水印 | |||
video_add_water: false | |||
service: | |||
filter: | |||
# 图片得分多少分以上返回图片 | |||
frame_score: 0.4 | |||
# 图片相似度过滤 | |||
picture_similarity: true | |||
similarity: 0.65 | |||
frame_step: 160 | |||
timeout: 21600 | |||
cv2_pull_stream_timeout: 1000 | |||
cv2_read_stream_timeout: 1000 | |||
recording_pull_stream_timeout: 600 | |||
model: | |||
# 使用哪种识别方式 | |||
# 1 普通方式 | |||
# 2 模型追踪 | |||
model_type: 1 | |||
limit: 3 | |||
task: | |||
# 任务限制5个 | |||
limit: 5 | |||
image: | |||
limit: 20 | |||
@@ -14,11 +14,13 @@ from util.LogUtils import init_log | |||
if __name__ == '__main__': | |||
multiprocessing.set_start_method('spawn') | |||
base_dir = dirname(realpath(__file__)) | |||
init_log(base_dir) | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||
# 获取主程序执行根路径 | |||
arg = argv | |||
logger.info("脚本启动参数: {}", arg) | |||
print("脚本启动参数: ", arg) | |||
envs = ('dev', 'test', 'prod') | |||
env = envs[0] | |||
active = [env for env in envs if env in arg] | |||
DispatcherService(base_dir, active) | |||
if len(active) != 0: | |||
env = active[0] | |||
init_log(base_dir, env) | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||
DispatcherService(base_dir, env) |
@@ -1,30 +1,53 @@ | |||
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="", | |||
modelCode="", detectTargetCode="", analyse_time="", analyse_results=""): | |||
taskfb = {} | |||
results = [] | |||
result_msg = {} | |||
taskfb["request_id"] = requestId | |||
taskfb["status"] = status | |||
taskfb["type"] = type | |||
taskfb["error_code"] = error_code | |||
taskfb["error_msg"] = error_msg | |||
taskfb["progress"] = progress | |||
result_msg["original_url"] = original_url | |||
result_msg["sign_url"] = sign_url | |||
result_msg["analyse_results"] = analyse_results | |||
result_msg["model_code"] = modelCode | |||
result_msg["detect_targets_code"] = detectTargetCode | |||
result_msg["analyse_time"] = analyse_time | |||
results.append(result_msg) | |||
taskfb["results"] = results | |||
return taskfb | |||
from json import dumps | |||
from util.TimeUtils import now_date_to_str | |||
def recording_feedback(requestId, status, error_code="", error_msg="", recording_video_url=""): | |||
rdfb = {} | |||
rdfb["request_id"] = requestId | |||
rdfb["status"] = status | |||
rdfb["error_code"] = error_code | |||
rdfb["error_msg"] = error_msg | |||
rdfb["recording_video_url"] = recording_video_url | |||
return rdfb | |||
def message_feedback(requestId, status, analyse_type, error_code="", error_msg="", progress="", original_url="", | |||
sign_url="", modelCode="", detectTargetCode="", analyse_results="", video_url="", ai_video_url=""): | |||
if len(analyse_results) > 0: | |||
analyse_results = dumps(analyse_results) | |||
taskbar = { | |||
"request_id": requestId, | |||
"status": status, | |||
"type": analyse_type, | |||
"video_url": video_url, | |||
"ai_video_url": ai_video_url, | |||
"error_code": error_code, | |||
"error_msg": error_msg, | |||
"progress": progress, | |||
"results": [ | |||
{ | |||
"original_url": original_url, | |||
"sign_url": sign_url, | |||
"analyse_results": analyse_results, | |||
"model_code": modelCode, | |||
"detect_targets_code": detectTargetCode, | |||
"analyse_time": now_date_to_str() | |||
} | |||
] | |||
} | |||
return {"feedback": taskbar} | |||
def recording_feedback(requestId, status, error_code="", error_msg="", progress="", video_url=""): | |||
rdf = { | |||
"request_id": requestId, | |||
"status": status, | |||
"error_code": error_code, | |||
"error_msg": error_msg, | |||
"progress": progress, | |||
"video_url": video_url | |||
} | |||
return {"recording": rdf} | |||
def pull_stream_feedback(requestId, status, error_code="", error_msg="", videoInfo=[]): | |||
return {"pull_stream": { | |||
"request_id": requestId, | |||
"video_info_list": videoInfo, | |||
"push_stream_status": status, | |||
"error_code": error_code, | |||
"error_msg": error_msg, | |||
"current_time": now_date_to_str() | |||
}} |
@@ -1,12 +0,0 @@ | |||
class Param: | |||
__slots__ = ('fbqueue', 'msg', 'analyse_type', 'base_dir', 'context', 'gpu_name') | |||
def __init__(self, fbqueue, msg, analyse_type, base_dir, context, gpu_name): | |||
self.fbqueue = fbqueue | |||
self.msg = msg | |||
self.analyse_type = analyse_type | |||
self.base_dir = base_dir | |||
self.context = context | |||
self.gpu_name = gpu_name |
@@ -16,6 +16,9 @@ class AnalysisType(Enum): | |||
# 录屏 | |||
RECORDING = "9999" | |||
# 转推流 | |||
PULLTOPUSH = "10000" | |||
@@ -63,9 +63,17 @@ class ExceptionType(Enum): | |||
PUSH_STREAM_EXCEPTION = ("SP028", "推流异常!") | |||
NOT_REQUESTID_TASK_EXCEPTION = ("SP993", "未查询到该任务,无法停止任务!") | |||
MODEL_DUPLICATE_EXCEPTION = ("SP029", "存在重复模型配置!") | |||
DETECTION_TARGET_NOT_SUPPORT = ("SP031", "存在不支持的检测目标!") | |||
TASK_EXCUTE_TIMEOUT = ("SP032", "任务执行超时!") | |||
GPU_EXCEPTION = ("SP994", "GPU出现异常!") | |||
PUSH_STREAM_URL_IS_NULL = ("SP033", "拉流、推流地址不能为空!") | |||
PULL_STREAM_NUM_LIMIT_EXCEPTION = ("SP034", "转推流数量超过限制!") | |||
NOT_REQUESTID_TASK_EXCEPTION = ("SP993", "未查询到该任务,无法停止任务!") | |||
NO_RESOURCES = ("SP995", "服务器暂无资源可以使用,请稍后30秒后再试!") | |||
@@ -1,50 +1,452 @@ | |||
import sys | |||
from enum import Enum, unique | |||
from common.Constant import COLOR | |||
sys.path.extend(['..', '../AIlib2']) | |||
from DMPR import DMPRModel | |||
from DMPRUtils.jointUtil import dmpr_yolo | |||
from segutils.segmodel import SegModel | |||
from utilsK.queRiver import riverDetSegMixProcess | |||
from segutils.trafficUtils import tracfficAccidentMixFunction | |||
from utilsK.drownUtils import mixDrowing_water_postprocess | |||
from utilsK.noParkingUtils import mixNoParking_road_postprocess | |||
from utilsK.illParkingUtils import illParking_postprocess | |||
''' | |||
参数说明 | |||
1. 编号 | |||
1. 编号 | |||
2. 模型编号 | |||
3. 模型名称 | |||
4. 选用的模型名称 | |||
5. 模型配置 | |||
6. 模型引用配置[Detweights文件, Segweights文件, 引用计数] | |||
''' | |||
@unique | |||
class ModelType(Enum): | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river') | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"], | |||
'seg_nclass': 2, | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'segRegionCnt': 1, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': riverDetSegMixProcess, | |||
'pars': { | |||
'slopeIndex': [5, 6, 7], | |||
'riverIou': 0.1 | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName, | |||
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName | |||
}) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2') | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"], | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3] | |||
'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'slopeIndex': [], | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2') | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: { | |||
'device': str(device), | |||
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"], | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'seg_nclass': 3, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': tracfficAccidentMixFunction, | |||
'pars': { | |||
'modelSize': (640, 360), | |||
'RoadArea': 16000, | |||
'roadVehicleAngle': 15, | |||
'speedRoadVehicleAngleMax': 75, | |||
'roundness': 1.0, | |||
'cls': 9, | |||
'vehicleFactor': 0.1, | |||
'confThres': 0.25, | |||
'roadIou': 0.6, | |||
'radius': 50, | |||
'vehicleFlag': False, | |||
'distanceFlag': False | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3] | |||
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName, | |||
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3] | |||
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName | |||
}) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None, None) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle') | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["车辆"], | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'slopeIndex': [], | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName, | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian') | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["行人"], | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName, | |||
'slopeIndex': [], | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire') | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["烟雾", "火焰"], | |||
'seg_nclass': 2, # 分割模型类别数目,默认2类 | |||
'segRegionCnt': 0, | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName, | |||
'slopeIndex': [], | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer') | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["钓鱼", "游泳"], | |||
'seg_nclass': 2, # 分割模型类别数目,默认2类 | |||
'segRegionCnt': 0, | |||
'slopeIndex': [], | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName, | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad') | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["违法种植"], | |||
'seg_nclass': 2, # 分割模型类别数目,默认2类 | |||
'segRegionCnt': 0, | |||
'slopeIndex': [], | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName, | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship2') | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: { | |||
'model_size': (608, 608), | |||
'K': 100, | |||
'conf_thresh': 0.18, | |||
'device': 'cuda:%s' % device, | |||
'down_ratio': 4, | |||
'num_classes': 15, | |||
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName, | |||
'dataset': 'dota', | |||
'half': False, | |||
'mean': (0.5, 0.5, 0.5), | |||
'std': (1, 1, 1), | |||
'heads': {'hm': None, 'wh': 10, 'reg': 2, 'cls_theta': 1}, | |||
'decoder': None, | |||
'test_flag': True, | |||
"rainbows": COLOR, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'drawBox': False, | |||
'label_array': None, | |||
'labelnames': ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"), | |||
}) | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None) | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None) | |||
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency') | |||
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["人"], | |||
'seg_nclass': 2, # 分割模型类别数目,默认2类 | |||
'segRegionCnt': 0, | |||
'slopeIndex': [], | |||
'trtFlag_det': True, | |||
'trtFlag_seg': False, | |||
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName, | |||
'segPar': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': None | |||
}) | |||
RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2') | |||
RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只", | |||
"蓝藻"], | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 1, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': riverDetSegMixProcess, | |||
'pars': { | |||
'slopeIndex': [1, 3, 4, 7], | |||
'riverIou': 0.1 | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.3, | |||
"ovlap_thres_crossCategory": 0.65, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3] | |||
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName, | |||
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3] | |||
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName | |||
}) | |||
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement') | |||
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: { | |||
'device': device, | |||
'gpu_name': gpuName, | |||
'labelnames': ["车辆", "垃圾", "商贩", "违停"], | |||
'seg_nclass': 4, # 分割模型类别数目,默认2类 | |||
'segRegionCnt': 2, | |||
'trtFlag_det': True, | |||
'trtFlag_seg': True, | |||
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName, | |||
'segPar': { | |||
'depth_factor': 32, | |||
'NUM_FEATURE_MAP_CHANNEL': 6, | |||
'dmpr_thresh': 0.3, | |||
'dmprimg_size': 640, | |||
'mixFunction': { | |||
'function': dmpr_yolo, | |||
'pars': {'carCls': 0, 'illCls': 3} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName | |||
}) | |||
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning') | |||
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["人头", "人", "船只"], | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': mixDrowing_water_postprocess, | |||
'pars': { | |||
'modelSize': (640, 360) | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3] | |||
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName, | |||
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3] | |||
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName | |||
}) | |||
NOPARKING_MODEL = ("18", "018", "城市违章模型", 'noParking') | |||
NOPARKING_MODEL = ( | |||
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["车辆", "违停"], | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'seg_nclass': 4, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, ###分割模型预处理参数 | |||
'mixFunction': { | |||
'function': mixNoParking_road_postprocess, | |||
'pars': { | |||
'modelSize': (640, 360), | |||
'roundness': 0.3, | |||
'cls': 9, | |||
'laneArea': 10, | |||
'laneAngleCha': 5, | |||
'RoadArea': 16000 | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName, | |||
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName | |||
}) | |||
ILLPARKING_MODEL = ("19", "019", "车辆违停模型", 'illParking', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["车", "T角点", "L角点", "违停"], | |||
'trtFlag_seg': False, | |||
'trtFlag_det': True, | |||
'seg_nclass': 4, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'mixFunction': { | |||
'function': illParking_postprocess, | |||
'pars': {} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
'Detweights': "../AIlib2/weights/illParking/yolov5_%s_fp16.engine" % gpuName, | |||
'Segweights': None | |||
}) | |||
@staticmethod | |||
def checkCode(code): | |||
for model in ModelType: | |||
if model.value[1] == code: | |||
@@ -75,3 +477,15 @@ BAIDU_MODEL_TARGET_CONFIG = { | |||
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION, | |||
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING | |||
} | |||
EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"} | |||
# 模型分析方式 | |||
@unique | |||
class ModelMethodTypeEnum(Enum): | |||
# 方式一: 正常识别方式 | |||
NORMAL = 1 | |||
# 方式二: 追踪识别方式 | |||
TRACE = 2 |
@@ -0,0 +1,688 @@ | |||
import sys | |||
from enum import Enum, unique | |||
from common.Constant import COLOR | |||
sys.path.extend(['..', '../AIlib2']) | |||
from segutils.segmodel import SegModel | |||
from utilsK.queRiver import riverDetSegMixProcess | |||
from segutils.trafficUtils import tracfficAccidentMixFunction | |||
from utilsK.drownUtils import mixDrowing_water_postprocess | |||
from utilsK.noParkingUtils import mixNoParking_road_postprocess | |||
from utilsK.illParkingUtils import illParking_postprocess | |||
from DMPR import DMPRModel | |||
from DMPRUtils.jointUtil import dmpr_yolo | |||
''' | |||
参数说明 | |||
1. 编号 | |||
2. 模型编号 | |||
3. 模型名称 | |||
4. 选用的模型名称 | |||
''' | |||
@unique | |||
class ModelType2(Enum): | |||
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName, | |||
'detModelpara': [], | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), 'numpy': False, | |||
'RGB_convert_first': True, # 分割模型预处理参数 | |||
'mixFunction': { | |||
'function': riverDetSegMixProcess, | |||
'pars': { | |||
'slopeIndex': [5, 6, 7], | |||
'riverIou': 0.1 | |||
} | |||
} | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 80, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 80, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName, | |||
'seg_nclass': 3, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': tracfficAccidentMixFunction, | |||
'pars': { | |||
'RoadArea': 16000, | |||
'modelSize': (640, 360), | |||
'vehicleArea': 10, | |||
'roadVehicleAngle': 15, | |||
'speedRoadVehicleAngleMax': 75, | |||
'roundness': 1.0, | |||
'cls': 9, | |||
'vehicleFactor': 0.1, | |||
'confThres': 0.25, | |||
'roadIou': 0.6, | |||
'radius': 50, | |||
'vehicleFlag': False, | |||
'distanceFlag': False | |||
} | |||
} | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 20, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 2 | |||
} | |||
}) | |||
EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None) | |||
PLATE_MODEL = ("5", "005", "车牌模型", None, None) | |||
VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["车辆"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 40, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["行人"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["烟雾", "火焰"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 40, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["钓鱼", "游泳"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 40, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
}, | |||
}) | |||
COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["违法种植"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 40, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: { | |||
'obbModelPar': { | |||
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"], | |||
'model_size': (608, 608), | |||
'K': 100, | |||
'conf_thresh': 0.3, | |||
'down_ratio': 4, | |||
'num_classes': 15, | |||
'dataset': 'dota', | |||
'heads': { | |||
'hm': None, | |||
'wh': 10, | |||
'reg': 2, | |||
'cls_theta': 1 | |||
}, | |||
'mean': (0.5, 0.5, 0.5), | |||
'std': (1, 1, 1), | |||
'half': False, | |||
'test_flag': True, | |||
'decoder': None, | |||
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'device': "cuda:%s" % device, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'drawBox': False, | |||
'drawPar': { | |||
"rainbows": COLOR, | |||
'digitWordFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'wordSize': 40, | |||
'fontSize': 1.0, | |||
'label_location': 'leftTop' | |||
} | |||
}, | |||
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"] | |||
}) | |||
BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None) | |||
CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["人"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': None, | |||
'Segweights': None, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 40, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只", | |||
"蓝藻"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName, | |||
'seg_nclass': 2, | |||
'segRegionCnt': 0, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), 'numpy': False, | |||
'RGB_convert_first': True, # 分割模型预处理参数 | |||
'mixFunction': { | |||
'function': riverDetSegMixProcess, | |||
'pars': { | |||
'slopeIndex': [1, 3, 4, 7], | |||
'riverIou': 0.1 | |||
} | |||
} | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.3, | |||
"ovlap_thres_crossCategory": 0.65, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 80, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 3 | |||
} | |||
}) | |||
CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement2', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["车辆", "垃圾", "商贩", "违停"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/cityMangement2/yolov5_%s_fp16.engine" % gpuName, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'seg_nclass': 4, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'depth_factor': 32, | |||
'NUM_FEATURE_MAP_CHANNEL': 6, | |||
'dmpr_thresh': 0.3, | |||
'dmprimg_size': 640, | |||
'mixFunction': { | |||
'function': dmpr_yolo, | |||
'pars': {'carCls': 0, 'illCls': 3} | |||
} | |||
}, | |||
'Segweights': '../AIlib2/weights/cityMangement2/dmpr_%s.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 20, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'segLineShow': False, | |||
'waterLineColor': (0, 255, 255), | |||
'waterLineWidth': 2 | |||
} | |||
}) | |||
DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["人头", "人", "船只"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName, | |||
'seg_nclass': 4, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': mixDrowing_water_postprocess, | |||
'pars': { | |||
'modelSize': (640, 360), | |||
} | |||
} | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 20, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 2 | |||
} | |||
}) | |||
NOPARKING_MODEL = ( | |||
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: { | |||
'device': device, | |||
'labelnames': ["车辆", "违停"], | |||
'half': True, | |||
'trtFlag_det': True, | |||
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName, | |||
'seg_nclass': 4, | |||
'segRegionCnt': 2, | |||
'segPar': { | |||
'trtFlag_seg': True, | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'predResize': True, | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': mixNoParking_road_postprocess, | |||
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5, | |||
'RoadArea': 16000} | |||
} | |||
}, | |||
'trackPar': { | |||
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
'sort_iou_thresh': 0.2, # 检测最小的置信度。 | |||
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。 | |||
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。 | |||
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。 | |||
}, | |||
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.25, | |||
"classes": 9, | |||
"rainbows": COLOR | |||
}, | |||
'txtFontSize': 20, | |||
'digitFont': { | |||
'line_thickness': 2, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.0, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 2 | |||
} | |||
} | |||
) | |||
@staticmethod | |||
def checkCode(code): | |||
for model in ModelType2: | |||
if model.value[1] == code: | |||
return True | |||
return False | |||
''' | |||
参数1: 检测目标名称 | |||
参数2: 检测目标 | |||
参数3: 初始化百度检测客户端 | |||
''' | |||
@unique | |||
class BaiduModelTarget2(Enum): | |||
VEHICLE_DETECTION = ( | |||
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id)) | |||
HUMAN_DETECTION = ( | |||
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id)) | |||
PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id)) | |||
BAIDU_MODEL_TARGET_CONFIG2 = { | |||
BaiduModelTarget2.VEHICLE_DETECTION.value[1]: BaiduModelTarget2.VEHICLE_DETECTION, | |||
BaiduModelTarget2.HUMAN_DETECTION.value[1]: BaiduModelTarget2.HUMAN_DETECTION, | |||
BaiduModelTarget2.PEOPLE_COUNTING.value[1]: BaiduModelTarget2.PEOPLE_COUNTING | |||
} | |||
EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"} | |||
# 模型分析方式 | |||
@unique | |||
class ModelMethodTypeEnum2(Enum): | |||
# 方式一: 正常识别方式 | |||
NORMAL = 1 | |||
# 方式二: 追踪识别方式 | |||
TRACE = 2 |
@@ -7,10 +7,12 @@ class RecordingStatus(Enum): | |||
RECORDING_WAITING = ("5", "待录制") | |||
RECORDING_RUNNING = ("10", "录制中") | |||
RECORDING_RETRYING = ("10", "重试中") | |||
RECORDING_SUCCESS = ("15", "录制完成") | |||
RECORDING_RUNNING = ("15", "录制中") | |||
RECORDING_TIMEOUT = ("20", "录制超时") | |||
RECORDING_SUCCESS = ("20", "录制完成") | |||
RECORDING_FAILED = ("25", "录制失败") | |||
RECORDING_TIMEOUT = ("25", "录制超时") | |||
RECORDING_FAILED = ("30", "录制失败") |
@@ -0,0 +1,33 @@ | |||
from enum import Enum, unique | |||
@unique | |||
class PushStreamStatus(Enum): | |||
WAITING = (5, "待推流") | |||
RETRYING = (10, "重试中") | |||
RUNNING = (15, "推流中") | |||
STOPPING = (20, "停止中") | |||
SUCCESS = (25, "完成") | |||
TIMEOUT = (30, "超时") | |||
FAILED = (35, "失败") | |||
@unique | |||
class ExecuteStatus(Enum): | |||
WAITING = (5, "待执行") | |||
RUNNING = (10, "执行中") | |||
STOPPING = (15, "停止中") | |||
SUCCESS = (20, "执行完成") | |||
TIMEOUT = (25, "超时") | |||
FAILED = (30, "失败") |
@@ -1,29 +1,34 @@ | |||
# -*- coding: utf-8 -*- | |||
import time | |||
from os.path import join | |||
from traceback import format_exc | |||
from cerberus import Validator | |||
from torch.cuda import is_available | |||
from common.YmlConstant import SCHEMA | |||
from common.Constant import ONLINE_START_SCHEMA, ONLINE_STOP_SCHEMA, OFFLINE_START_SCHEMA, OFFLINE_STOP_SCHEMA, \ | |||
IMAGE_SCHEMA, RECORDING_START_SCHEMA, RECORDING_STOP_SCHEMA, PULL2PUSH_START_SCHEMA, PULL2PUSH_STOP_SCHEMA | |||
from common.YmlConstant import service_yml_path, kafka_yml_path | |||
from concurrency.FeedbackThread import FeedbackThread | |||
from entity.FeedBack import message_feedback, recording_feedback | |||
from entity.TaskParam import Param | |||
from concurrency.IntelligentRecognitionProcess2 import OnlineIntelligentRecognitionProcess2, \ | |||
OfflineIntelligentRecognitionProcess2, PhotosIntelligentRecognitionProcess2 | |||
from concurrency.Pull2PushStreamProcess import PushStreamProcess | |||
from entity.FeedBack import message_feedback, recording_feedback, pull_stream_feedback | |||
from enums.AnalysisStatusEnum import AnalysisStatus | |||
from enums.AnalysisTypeEnum import AnalysisType | |||
from enums.ExceptionEnum import ExceptionType | |||
from enums.ModelTypeEnum import ModelMethodTypeEnum, ModelType | |||
from enums.RecordingStatusEnum import RecordingStatus | |||
from enums.StatusEnum import PushStreamStatus, ExecuteStatus | |||
from exception.CustomerException import ServiceException | |||
from util import TimeUtils | |||
from loguru import logger | |||
from multiprocessing import Queue | |||
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \ | |||
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess, ScreenRecordingProcess | |||
from util import GPUtils | |||
from util.CpuUtils import check_cpu, print_cpu_ex_status | |||
from util.CpuUtils import print_cpu_ex_status | |||
from util.FileUtils import create_dir_not_exist | |||
from util.GPUtils import get_first_gpu_name, print_gpu_ex_status | |||
from util.GPUtils import get_first_gpu_name, print_gpu_ex_status, check_cude_is_available | |||
from util.KafkaUtils import CustomerKafkaConsumer | |||
from util.QueUtil import put_queue | |||
from util.RWUtils import getConfigs | |||
''' | |||
@@ -32,44 +37,37 @@ from util.RWUtils import getConfigs | |||
class DispatcherService: | |||
__slots__ = ( | |||
'__base_dir', | |||
'__context', | |||
'__feedbackThread', | |||
'__listeningProcesses', | |||
'__fbQueue', | |||
'__topics', | |||
'__analysisType', | |||
'__gpu_name', | |||
'__resource_status' | |||
) | |||
""" | |||
初始化 | |||
""" | |||
def __init__(self, base_dir, active): | |||
if not is_available(): | |||
raise Exception("cuda不在活动状态, 请检测显卡驱动是否正常!!!!") | |||
self.__context = getConfigs(base_dir, 'config/dsp_application.json') | |||
create_dir_not_exist(base_dir, self.__context["video"]["file_path"]) | |||
self.__base_dir = base_dir | |||
if len(active) > 0: | |||
self.__context["dsp"]["active"] = active[0] | |||
self.__resource_status = False | |||
self.__feedbackThread = None # 初始化反馈线程对象 | |||
__slots__ = ('__context', '__feedbackThread', '__listeningProcesses', '__fbQueue', '__topics', '__task_type', | |||
'__kafka_config', '__recordingProcesses', '__pull2PushProcesses') | |||
def __init__(self, base_dir, env): | |||
# 检测cuda是否活动 | |||
check_cude_is_available() | |||
# 获取全局上下文配置 | |||
self.__context = getConfigs(join(base_dir, service_yml_path % env)) | |||
# 创建任务执行, 视频保存路径 | |||
create_dir_not_exist(join(base_dir, self.__context["video"]["file_path"])) | |||
# 将根路径和环境设置到上下文中 | |||
self.__context["base_dir"], self.__context["env"] = base_dir, env | |||
# 问题反馈线程 | |||
self.__feedbackThread, self.__fbQueue = None, Queue() | |||
# 实时、离线、图片任务进程字典 | |||
self.__listeningProcesses = {} | |||
self.__fbQueue = Queue() | |||
# 录屏任务进程字典 | |||
self.__recordingProcesses = {} | |||
# 转推流任务进程字典 | |||
self.__pull2PushProcesses = {} | |||
self.__kafka_config = getConfigs(join(base_dir, kafka_yml_path % env)) | |||
self.__topics = ( | |||
self.__context["kafka"]["topic"]["dsp-alg-online-tasks-topic"], | |||
self.__context["kafka"]["topic"]["dsp-alg-offline-tasks-topic"], | |||
self.__context["kafka"]["topic"]["dsp-alg-image-tasks-topic"], | |||
self.__context["kafka"]["topic"]["dsp-recording-task-topic"] | |||
self.__kafka_config["topic"]["dsp-alg-online-tasks-topic"], # 实时监听topic | |||
self.__kafka_config["topic"]["dsp-alg-offline-tasks-topic"], # 离线监听topic | |||
self.__kafka_config["topic"]["dsp-alg-image-tasks-topic"], # 图片监听topic | |||
self.__kafka_config["topic"]["dsp-recording-task-topic"], # 录屏监听topic | |||
self.__kafka_config["topic"]["dsp-push-stream-task-topic"] # 推流监听topic | |||
) | |||
self.__analysisType = { | |||
# 对应topic的各个lambda表达式 | |||
self.__task_type = { | |||
self.__topics[0]: (AnalysisType.ONLINE.value, lambda x, y: self.online(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.__topics[1]: (AnalysisType.OFFLINE.value, lambda x, y: self.offline(x, y), | |||
@@ -77,242 +75,197 @@ class DispatcherService: | |||
self.__topics[2]: (AnalysisType.IMAGE.value, lambda x, y: self.image(x, y), | |||
lambda x, y, z: self.identify_method(x, y, z)), | |||
self.__topics[3]: (AnalysisType.RECORDING.value, lambda x, y: self.recording(x, y), | |||
lambda x, y, z: self.recording_method(x, y, z)) | |||
lambda x, y, z: self.recording_method(x, y, z)), | |||
self.__topics[4]: (AnalysisType.PULLTOPUSH.value, lambda x, y: self.pullStream(x, y), | |||
lambda x, y, z: self.push_stream_method(x, y, z)) | |||
} | |||
gpu_name_array = get_first_gpu_name() | |||
gpu_codes = ('3090', '2080', '4090', 'A10') | |||
gpu_array = [g for g in gpu_codes if g in gpu_name_array] | |||
self.__gpu_name = '2080Ti' | |||
gpu_array = [g for g in ('3090', '2080', '4090', 'A10') if g in gpu_name_array] | |||
gpu_name = '2080Ti' | |||
if len(gpu_array) > 0: | |||
if gpu_array[0] != '2080': | |||
self.__gpu_name = gpu_array[0] | |||
gpu_name = gpu_array[0] | |||
else: | |||
raise Exception("GPU资源不在提供的模型所支持的范围内!请先提供对应的GPU模型!") | |||
logger.info("当前服务环境为: {}, 服务器GPU使用型号: {}", self.__context["dsp"]["active"], self.__gpu_name) | |||
logger.info("当前服务环境为: {}, 服务器GPU使用型号: {}", env, gpu_name) | |||
self.__context["gpu_name"] = gpu_name | |||
self.start_service() | |||
# 服务调用启动方法 | |||
def start_service(self): | |||
# 初始化kafka监听者 | |||
customerKafkaConsumer = CustomerKafkaConsumer(self.__context, topics=self.__topics) | |||
customerKafkaConsumer = CustomerKafkaConsumer(self.__kafka_config, topics=self.__topics) | |||
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||
# 循环消息处理 | |||
start_time = time.time() | |||
# persistent_time = time.time() | |||
# full_count = 0 | |||
while True: | |||
try: | |||
# 检查任务进程运行情况,去除活动的任务 | |||
# 检查任务进程运行情况,去除结束的任务 | |||
self.check_process_task() | |||
start_time = self.check_service_resource(start_time) | |||
# if len(self.__listeningProcesses) > 0: | |||
# now = time.time() | |||
# requestIds = list(self.__listeningProcesses.keys()) | |||
# requestId = requestIds[-1] | |||
# task_process = self.__listeningProcesses.get(requestId) | |||
# end_time = now - task_process.start_proccess_time | |||
# if end_time > 80 and task_process.pullQueue.full() and time.time() - persistent_time < 10: | |||
# full_count += 1 | |||
# if full_count > 4: | |||
# logger.error("服务器资源限制, 暂无资源可以使用! requestId:{}", requestId) | |||
# task_process.sendEvent({"command": "stop_ex"}) | |||
# full_count = 0 | |||
# persistent_time = time.time() | |||
# if end_time > 80 and task_process.pullQueue.full() and time.time() - persistent_time >= 10: | |||
# full_count = 0 | |||
# persistent_time = time.time() | |||
# 启动反馈线程 | |||
self.start_feedback_thread() | |||
msg = customerKafkaConsumer.poll() | |||
time.sleep(1) | |||
if msg is not None and len(msg) > 0: | |||
for k, v in msg.items(): | |||
for m in v: | |||
message = m.value | |||
customerKafkaConsumer.commit_offset(m) | |||
requestId = self.getRequestId(message.get("request_id")) | |||
requestId = message.get("request_id") | |||
if requestId is None: | |||
logger.error("请求参数格式错误, 请检查请求体格式是否正确!") | |||
continue | |||
customerKafkaConsumer.commit_offset(m, requestId) | |||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}", | |||
m.topic, m.offset, m.partition, message, requestId) | |||
topic_method = self.__analysisType.get(m.topic) | |||
topic_method[2](m.topic, message, topic_method[0]) | |||
topic_method = self.__task_type[m.topic] | |||
topic_method[2](topic_method[1], message, topic_method[0]) | |||
else: | |||
print_gpu_ex_status() | |||
print_cpu_ex_status(self.__context["base_dir"]) | |||
time.sleep(1) | |||
except Exception: | |||
logger.exception("主线程异常:{}", format_exc()) | |||
logger.error("主线程异常:{}", format_exc()) | |||
''' | |||
考虑到requestId为空的场景 | |||
''' | |||
@staticmethod | |||
def getRequestId(request_id): | |||
if not request_id: | |||
return '1' | |||
return request_id | |||
def identify_method(self, topic, message, analysisType): | |||
""" | |||
实时、离线、图片识别逻辑 | |||
1. topic topic | |||
2. 请求消息体 | |||
3. 分析类型:实时、离线、图片 | |||
""" | |||
def identify_method(self, handle_method, message, analysisType): | |||
try: | |||
check_cude_is_available() | |||
handle_method(message, analysisType) | |||
except ServiceException as s: | |||
logger.error("消息监听异常:{}, requestId: {}", s.msg, message["request_id"]) | |||
put_queue(self.__fbQueue, message_feedback(message["request_id"], AnalysisStatus.FAILED.value, analysisType, | |||
s.code, s.msg), timeout=1) | |||
except Exception: | |||
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message["request_id"]) | |||
put_queue(self.__fbQueue, message_feedback(message["request_id"], AnalysisStatus.FAILED.value, analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=1) | |||
finally: | |||
del message | |||
def push_stream_method(self, handle_method, message, analysisType): | |||
try: | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
if not is_available(): | |||
raise ServiceException(ExceptionType.GPU_EXCEPTION.value[0], | |||
ExceptionType.GPU_EXCEPTION.value[1]) | |||
self.__analysisType.get(topic)[1](message, analysisType) | |||
check_cude_is_available() | |||
handle_method(message, analysisType) | |||
except ServiceException as s: | |||
logger.error("消息监听异常:{}, requestId: {}", s.msg, | |||
self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.__fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
s.code, | |||
s.msg, | |||
analyse_time=TimeUtils.now_date_to_str())}, timeout=10) | |||
logger.error("消息监听异常:{}, requestId: {}", s.msg, message['request_id']) | |||
videoInfo = [{"id": url.get("id"), "status": PushStreamStatus.FAILED.value[0]} for url in | |||
message.get("video_urls", []) if url.get("id") is not None] | |||
put_queue(self.__fbQueue, pull_stream_feedback(message['request_id'], ExecuteStatus.FAILED.value[0], | |||
s.code, s.msg, videoInfo), timeout=1) | |||
except Exception: | |||
logger.error("消息监听异常:{}, requestId: {}", format_exc(), | |||
self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.__fbQueue.put({ | |||
"feedback": message_feedback(message.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}, timeout=10) | |||
def recording_method(self, topic, message, analysisType): | |||
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message['request_id']) | |||
videoInfo = [{"id": url.get("id"), "status": PushStreamStatus.FAILED.value[0]} for url in | |||
message.get("video_urls", []) if url.get("id") is not None] | |||
put_queue(self.__fbQueue, pull_stream_feedback(message.get("request_id"), ExecuteStatus.FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], videoInfo), | |||
timeout=1) | |||
finally: | |||
del message | |||
def recording_method(self, handle_method, message, analysisType): | |||
try: | |||
# 校验参数 | |||
check_result = self.check_msg(message) | |||
if not check_result: | |||
return | |||
self.__analysisType.get(topic)[1](message, analysisType) | |||
check_cude_is_available() | |||
handle_method(message, analysisType) | |||
except ServiceException as s: | |||
logger.error("消息监听异常:{}, requestId: {}", s.msg, | |||
self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.__fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, | |||
error_msg=s.msg)}, timeout=10) | |||
logger.error("消息监听异常:{}, requestId: {}", s.msg, message["request_id"]) | |||
put_queue(self.__fbQueue, | |||
recording_feedback(message["request_id"], RecordingStatus.RECORDING_FAILED.value[0], | |||
error_code=s.code, error_msg=s.msg), timeout=1) | |||
except Exception: | |||
logger.error("消息监听异常:{}, requestId: {}", format_exc(), | |||
self.getRequestId(message.get("request_id"))) | |||
if message.get("request_id"): | |||
self.__fbQueue.put({ | |||
"recording": recording_feedback(message.get("request_id"), | |||
RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}, timeout=10) | |||
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message["request_id"]) | |||
put_queue(self.__fbQueue, | |||
recording_feedback(message["request_id"], RecordingStatus.RECORDING_FAILED.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=1) | |||
finally: | |||
del message | |||
# 开启实时进程 | |||
def startOnlineProcess(self, msg, analysisType): | |||
if self.__listeningProcesses.get(msg.get("request_id")): | |||
logger.warning("实时重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
if self.__listeningProcesses.get(msg["request_id"]): | |||
logger.warning("实时重复任务,请稍后再试!requestId:{}", msg["request_id"]) | |||
return | |||
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name) | |||
# 创建在线识别进程并启动 | |||
coir = OnlineIntelligentRecognitionProcess(param) | |||
model_type = self.__context["service"]["model"]["model_type"] | |||
codes = [model.get("code") for model in msg["models"] if model.get("code")] | |||
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes: | |||
coir = OnlineIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context) | |||
else: | |||
coir = OnlineIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context) | |||
coir.start() | |||
# 记录请求与进程映射 | |||
self.__listeningProcesses[msg.get("request_id")] = coir | |||
self.__listeningProcesses[msg["request_id"]] = coir | |||
# 结束实时进程 | |||
def stopOnlineProcess(self, msg, analysisType): | |||
ps = self.__listeningProcesses.get(msg.get("request_id")) | |||
def stopOnlineProcess(self, msg): | |||
ps = self.__listeningProcesses.get(msg["request_id"]) | |||
if ps is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
putQueue(self.__fbQueue, { | |||
"feedback": message_feedback(msg.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0], | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id")) | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"]) | |||
return | |||
ps.sendEvent({"command": "stop"}) | |||
def check_service_resource(self, start_time, requestId=None): | |||
if len(self.__listeningProcesses) > 0: | |||
gpu_result = print_gpu_ex_status(requestId) | |||
cpu_result = print_cpu_ex_status(self.__base_dir, requestId) | |||
if gpu_result or cpu_result: | |||
self.__resource_status = True | |||
return time.time() | |||
if not gpu_result and not cpu_result and time.time() - start_time > 30: | |||
self.__resource_status = False | |||
return time.time() | |||
return start_time | |||
@staticmethod | |||
def check_process(listeningProcess): | |||
for requestId in list(listeningProcess.keys()): | |||
if not listeningProcess[requestId].is_alive(): | |||
del listeningProcess[requestId] | |||
def check_process_task(self): | |||
for requestId in list(self.__listeningProcesses.keys()): | |||
if not self.__listeningProcesses[requestId].is_alive(): | |||
del self.__listeningProcesses[requestId] | |||
self.check_process(self.__listeningProcesses) | |||
self.check_process(self.__recordingProcesses) | |||
self.check_process(self.__pull2PushProcesses) | |||
# 开启离线进程 | |||
def startOfflineProcess(self, msg, analysisType): | |||
if self.__listeningProcesses.get(msg.get("request_id")): | |||
logger.warning("离线重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
if self.__listeningProcesses.get(msg["request_id"]): | |||
logger.warning("离线重复任务,请稍后再试!requestId:{}", msg["request_id"]) | |||
return | |||
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name) | |||
first = OfflineIntelligentRecognitionProcess(param) | |||
model_type = self.__context["service"]["model"]["model_type"] | |||
codes = [model.get("code") for model in msg["models"] if model.get("code")] | |||
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes: | |||
first = OfflineIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context) | |||
else: | |||
first = OfflineIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context) | |||
first.start() | |||
self.__listeningProcesses[msg.get("request_id")] = first | |||
self.__listeningProcesses[msg["request_id"]] = first | |||
# 结束离线进程 | |||
def stopOfflineProcess(self, msg, analysisType): | |||
ps = self.__listeningProcesses.get(msg.get("request_id")) | |||
def stopOfflineProcess(self, msg): | |||
ps = self.__listeningProcesses.get(msg["request_id"]) | |||
if ps is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
putQueue(self.__fbQueue, { | |||
"feedback": message_feedback(msg.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0], | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id")) | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"]) | |||
return | |||
ps.sendEvent({"command": "stop"}) | |||
# 开启图片分析进程 | |||
def startImageProcess(self, msg, analysisType): | |||
pp = self.__listeningProcesses.get(msg.get("request_id")) | |||
pp = self.__listeningProcesses.get(msg["request_id"]) | |||
if pp is not None: | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"]) | |||
return | |||
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name) | |||
model_type = self.__context["service"]["model"]["model_type"] | |||
codes = [model.get("code") for model in msg["models"] if model.get("code")] | |||
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes: | |||
imaged = PhotosIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context) | |||
else: | |||
imaged = PhotosIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context) | |||
# 创建在线识别进程并启动 | |||
imaged = PhotosIntelligentRecognitionProcess(param) | |||
imaged.start() | |||
self.__listeningProcesses[msg.get("request_id")] = imaged | |||
self.__listeningProcesses[msg["request_id"]] = imaged | |||
''' | |||
校验kafka消息 | |||
''' | |||
@staticmethod | |||
def check_msg(msg): | |||
def check_msg(msg, schema): | |||
try: | |||
v = Validator(SCHEMA, allow_unknown=True) | |||
v = Validator(schema, allow_unknown=True) | |||
result = v.validate(msg) | |||
if not result: | |||
logger.error("参数校验异常: {}", v.errors) | |||
if msg.get("request_id"): | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], v.errors) | |||
return result | |||
logger.error("参数校验异常: {}, requestId: {}", v.errors, msg["request_id"]) | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
except ServiceException as s: | |||
raise s | |||
except Exception: | |||
logger.error("参数校验异常: {}", format_exc()) | |||
logger.error("参数校验异常: {}, requestId: {}", format_exc(), msg["request_id"]) | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
@@ -322,29 +275,16 @@ class DispatcherService: | |||
def start_feedback_thread(self): | |||
if self.__feedbackThread is None: | |||
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__context) | |||
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__kafka_config) | |||
self.__feedbackThread.setDaemon(True) | |||
self.__feedbackThread.start() | |||
start_time = time.time() | |||
retry_count = 0 | |||
while True: | |||
if self.__feedbackThread.is_alive(): | |||
break | |||
retry_count += 1 | |||
if retry_count > 8: | |||
self.__feedbackThread = None | |||
logger.error("反馈线程异常重试失败!!!!!!") | |||
break | |||
if time.time() - start_time <= 3: | |||
logger.error("反馈线程异常等待中") | |||
time.sleep(1) | |||
continue | |||
logger.error("反馈线程异常重启中") | |||
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__context) | |||
time.sleep(1) | |||
if self.__feedbackThread and not self.__feedbackThread.is_alive(): | |||
logger.error("反馈线程异常停止, 开始重新启动反馈线程!!!!!") | |||
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__kafka_config) | |||
self.__feedbackThread.setDaemon(True) | |||
self.__feedbackThread.start() | |||
start_time = time.time() | |||
continue | |||
time.sleep(1) | |||
''' | |||
在线分析逻辑 | |||
@@ -352,74 +292,101 @@ class DispatcherService: | |||
def online(self, message, analysisType): | |||
if "start" == message.get("command"): | |||
if self.__resource_status or len(self.__listeningProcesses) >= int(self.__context["task"]["limit"]): | |||
self.check_msg(message, ONLINE_START_SCHEMA) | |||
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["limit"]): | |||
raise ServiceException(ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]) | |||
self.startOnlineProcess(message, analysisType) | |||
elif "stop" == message.get("command"): | |||
self.stopOnlineProcess(message, analysisType) | |||
self.check_msg(message, ONLINE_STOP_SCHEMA) | |||
self.stopOnlineProcess(message) | |||
else: | |||
pass | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
def offline(self, message, analysisType): | |||
if "start" == message.get("command"): | |||
if self.__resource_status or len(self.__listeningProcesses) >= int(self.__context["task"]["limit"]): | |||
self.check_msg(message, OFFLINE_START_SCHEMA) | |||
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["limit"]): | |||
raise ServiceException(ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]) | |||
self.startOfflineProcess(message, analysisType) | |||
elif "stop" == message.get("command"): | |||
self.stopOfflineProcess(message, analysisType) | |||
self.check_msg(message, OFFLINE_STOP_SCHEMA) | |||
self.stopOfflineProcess(message) | |||
else: | |||
pass | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
def image(self, message, analysisType): | |||
if "start" == message.get("command"): | |||
self.check_msg(message, IMAGE_SCHEMA) | |||
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["image"]["limit"]): | |||
raise ServiceException(ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]) | |||
self.startImageProcess(message, analysisType) | |||
else: | |||
pass | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
def recording(self, message, analysisType): | |||
if "start" == message.get("command"): | |||
logger.info("开始录屏") | |||
check_cpu(self.__base_dir, message.get("request_id")) | |||
GPUtils.check_gpu_resource(message.get("request_id")) | |||
self.check_msg(message, RECORDING_START_SCHEMA) | |||
if len(self.__recordingProcesses) >= int(self.__context['service']["task"]["limit"]): | |||
raise ServiceException(ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]) | |||
self.startRecordingProcess(message, analysisType) | |||
elif "stop" == message.get("command"): | |||
self.stopRecordingProcess(message, analysisType) | |||
self.check_msg(message, RECORDING_STOP_SCHEMA) | |||
self.stopRecordingProcess(message) | |||
else: | |||
pass | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
# 开启录屏进程 | |||
def startRecordingProcess(self, msg, analysisType): | |||
if self.__listeningProcesses.get(msg.get("request_id")): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id")) | |||
if self.__listeningProcesses.get(msg["request_id"]): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"]) | |||
return | |||
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name) | |||
srp = ScreenRecordingProcess(param) | |||
srp = ScreenRecordingProcess(self.__fbQueue, self.__context, msg, analysisType) | |||
srp.start() | |||
self.__listeningProcesses[msg.get("request_id")] = srp | |||
self.__recordingProcesses[msg["request_id"]] = srp | |||
# 结束录屏进程 | |||
def stopRecordingProcess(self, msg, analysisType): | |||
rdp = self.__listeningProcesses.get(msg.get("request_id")) | |||
def stopRecordingProcess(self, msg): | |||
rdp = self.__recordingProcesses.get(msg["request_id"]) | |||
if rdp is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id")) | |||
putQueue(self.__fbQueue, { | |||
"recording": message_feedback(msg.get("request_id"), | |||
AnalysisStatus.FAILED.value, | |||
analysisType, | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0], | |||
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1], | |||
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id")) | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"]) | |||
return | |||
rdp.sendEvent({"command": "stop"}) | |||
def pullStream(self, message, analysisType): | |||
if "start" == message.get("command"): | |||
self.check_msg(message, PULL2PUSH_START_SCHEMA) | |||
if len(self.__pull2PushProcesses) >= int(self.__context['service']["task"]["limit"]): | |||
raise ServiceException(ExceptionType.NO_RESOURCES.value[0], | |||
ExceptionType.NO_RESOURCES.value[1]) | |||
def putQueue(queue, result, requestId, enable_ex=True): | |||
try: | |||
queue.put(result, timeout=10) | |||
except Exception: | |||
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), requestId) | |||
if enable_ex: | |||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||
self.startPushStreamProcess(message, analysisType) | |||
elif "stop" == message.get("command"): | |||
self.check_msg(message, PULL2PUSH_STOP_SCHEMA) | |||
self.stopPushStreamProcess(message) | |||
else: | |||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||
def startPushStreamProcess(self, msg, analysisType): | |||
if self.__pull2PushProcesses.get(msg["request_id"]): | |||
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"]) | |||
return | |||
srp = PushStreamProcess(self.__fbQueue, self.__context, msg, analysisType) | |||
srp.start() | |||
self.__pull2PushProcesses[msg["request_id"]] = srp | |||
# 结束录屏进程 | |||
def stopPushStreamProcess(self, msg): | |||
srp = self.__pull2PushProcesses.get(msg["request_id"]) | |||
if srp is None: | |||
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"]) | |||
return | |||
srp.sendEvent({"command": "stop", "videoIds": msg.get("video_ids", [])}) |
@@ -0,0 +1,40 @@ | |||
#!/bin/bash | |||
current_dir=$(cd "$(dirname "$0")"; pwd) | |||
active=$(basename "$(dirname "$current_dir")") | |||
conda_env="alg" | |||
echo "当前程序所在目录: $current_dir, 当前程序启动环境: $active" | |||
if [[ "a${active}" != "adev" && "a${active}" != "atest" && "a${active}" != "aprod" ]]; then | |||
echo "###############################################################"; | |||
echo "启动失败, 当前环境只支持dev、test、prod"; | |||
echo "环境是根据程序所在目录自动匹配的, 请检测程序路径配置是否正确!"; | |||
echo "###############################################################"; | |||
exit 1 | |||
fi | |||
cd $current_dir | |||
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'` | |||
if [ -n "$pid" ]; then | |||
echo "alg进程已存在, 进程id: $pid" | |||
kill -9 ${pid}; | |||
echo "杀掉当前alg进程, 进程号:$pid" | |||
sleep 1 | |||
pid_1=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'` | |||
if [ -n "$pid_1" ]; then | |||
echo "###############################################################"; | |||
echo "杀掉alg进程失败!" | |||
echo "###############################################################"; | |||
exit 1 | |||
else | |||
echo "杀掉alg进程成功!!" | |||
fi | |||
fi | |||
nohup /home/th/anaconda3/envs/${conda_env}/bin/python3.8 dsp_master.py ${active} > /dev/null 2>&1 & | |||
sleep 1 | |||
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'` | |||
if [ -n "$pid_end" ]; then | |||
echo "alg启动成功, $pid_end" | |||
else | |||
echo "###############################################################"; | |||
echo "alg启动失败!!!!!!!!!!!!!!!!!!!!!!" | |||
echo "###############################################################"; | |||
exit 1 | |||
fi |
@@ -0,0 +1,19 @@ | |||
#!/bin/bash | |||
conda_env="alg" | |||
pid=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'` | |||
if [ -n "$pid" ]; then | |||
kill -9 ${pid}; | |||
echo "杀掉当前alg进程, 进程号:$pid" | |||
fi | |||
sleep 1 | |||
pid_end=`ps x | grep "/home/th/anaconda3/envs/${conda_env}/bin/python3.8" | grep -v grep | awk '{print $1}'` | |||
if [ -n "$pid_end" ]; then | |||
echo "###############################################################"; | |||
echo "alg停止失败!!!!!, $pid_end" | |||
echo "###############################################################"; | |||
exit 1 | |||
else | |||
echo "###############################################################"; | |||
echo "alg停止成功!!!!!!!!!!!!!!!!!!!!!!" | |||
echo "###############################################################"; | |||
fi |
@@ -0,0 +1,15 @@ | |||
import multiprocessing as mp | |||
import time | |||
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor | |||
from multiprocessing import Queue, shared_memory | |||
import tensorrt as trt | |||
# multiprocessing.set_start_method('spawn') | |||
Detweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/yolov5_2080Ti_fp16.engine" | |||
with open(Detweights, "rb") as f: | |||
model = f.read() | |||
Segweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/stdc_360X640_2080Ti_fp16.engine" | |||
with open(Segweights, "rb") as f: | |||
segmodel = f.read() | |||
print(type(model), type(segmodel)) |
@@ -0,0 +1,32 @@ | |||
import multiprocessing as mp | |||
import time | |||
from concurrent.futures import ProcessPoolExecutor | |||
from multiprocessing import Queue, shared_memory | |||
import tensorrt as trt | |||
# multiprocessing.set_start_method('spawn') | |||
Detweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/yolov5_2080Ti_fp16.engine" | |||
start = time.time() | |||
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
model = runtime.deserialize_cuda_engine(f.read()) | |||
print(time.time() - start) | |||
start1 = time.time() | |||
Segweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/stdc_360X640_2080Ti_fp16.engine" | |||
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
segmodel = runtime.deserialize_cuda_engine(f.read()) | |||
print(time.time() - start1) | |||
def aa(buf): | |||
print(id(buf[0]), id(buf[1])) | |||
shm = shared_memory.SharedMemory(name='share', create=True, size=10000) | |||
buf = shm.buf | |||
buf = model | |||
buf[1] = segmodel | |||
print(id(model), id(segmodel)) | |||
p = mp.Process(target=aa, args=(buf,)) | |||
p1 = mp.Process(target=aa, args=(buf,)) | |||
p.start() | |||
p1.start() | |||
time.sleep(10) | |||
@@ -0,0 +1,16 @@ | |||
import multiprocessing as mp | |||
def aa1(aa2): | |||
print("1111111111", id(aa2), aa2) | |||
aa = [1, 2, 3, 4, 5, 6] | |||
print(id(aa)) | |||
# num = mp.Array('i', aa) | |||
p = mp.Process(target=aa1, args=(aa,)) | |||
p1 = mp.Process(target=aa1, args=(aa,)) | |||
p2 = mp.Process(target=aa1, args=(aa,)) | |||
p.start() | |||
p1.start() | |||
p2.start() |
@@ -0,0 +1,25 @@ | |||
import multiprocessing | |||
from multiprocessing import Process, Lock, Queue | |||
import time | |||
import sys; print('Python %s on %s' % (sys.version, sys.platform)) | |||
sys.path.extend([r'D:\tuoheng\codenew\update\tuoheng_alg\test\demo\demo6.py']) | |||
# Detweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/yolov5_2080Ti_fp16.engine" | |||
# with open(Detweights, "rb") as f: | |||
# model = f.read() | |||
# Segweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/stdc_360X640_2080Ti_fp16.engine" | |||
# with open(Segweights, "rb") as f: | |||
# segmodel = f.read() | |||
def add_one(aaa): | |||
aaa.put("111111111") | |||
aaa.cancel_join_thread() | |||
if __name__ == '__main__': | |||
aa = Queue() | |||
p1 = Process(target=add_one, args=(aa,)) | |||
p1.start() | |||
time.sleep(2) | |||
print(aa.get()) | |||
@@ -0,0 +1,167 @@ | |||
import multiprocessing as mp | |||
import sys | |||
import time | |||
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, wait, ALL_COMPLETED | |||
from multiprocessing import Queue | |||
import cv2 | |||
import tensorrt as trt | |||
sys.path.extend(['/home/th/tuo_heng/dev/tuoheng_alg', '/home/th/tuo_heng/dev/tuoheng_alg/util']) | |||
from util.PlotsUtils import get_label_arrays | |||
from util.TorchUtils import select_device | |||
sys.path.extend(['/home/th/tuo_heng/', '/home/th/tuo_heng/dev', '/home/th/tuo_heng/dev/AIlib2', '/home/th/tuo_heng/dev/AIlib2/segutils']) | |||
from segutils.segmodel import SegModel | |||
from models.experimental import attempt_load | |||
from AI import AI_process | |||
from utilsK.queRiver import riverDetSegMixProcess | |||
COLOR = ( | |||
[0, 0, 255], | |||
[255, 0, 0], | |||
[211, 0, 148], | |||
[0, 127, 0], | |||
[0, 69, 255], | |||
[0, 255, 0], | |||
[255, 0, 255], | |||
[0, 0, 127], | |||
[127, 0, 255], | |||
[255, 129, 0], | |||
[139, 139, 0], | |||
[255, 255, 0], | |||
[127, 255, 0], | |||
[0, 127, 255], | |||
[0, 255, 127], | |||
[255, 127, 255], | |||
[8, 101, 139], | |||
[171, 130, 255], | |||
[139, 112, 74], | |||
[205, 205, 180]) | |||
par = { | |||
'device': '0', | |||
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"], | |||
'seg_nclass': 2, | |||
'trtFlag_seg': True, | |||
'trtFlag_det': True, | |||
'segRegionCnt': 1, | |||
'segPar': { | |||
'modelSize': (640, 360), | |||
'mean': (0.485, 0.456, 0.406), | |||
'std': (0.229, 0.224, 0.225), | |||
'numpy': False, | |||
'RGB_convert_first': True, | |||
'mixFunction': { | |||
'function': riverDetSegMixProcess, | |||
'pars': { | |||
'slopeIndex': [5, 6, 7], | |||
'riverIou': 0.1 | |||
} | |||
} | |||
}, | |||
'postFile': { | |||
"name": "post_process", | |||
"conf_thres": 0.25, | |||
"iou_thres": 0.45, | |||
"classes": 5, | |||
"rainbows": COLOR | |||
}, | |||
'Detweights': "/home/th/tuo_heng/dev/AIlib2/weights/river/yolov5_2080Ti_fp16.engine", | |||
'Segweights': '/home/th/tuo_heng/dev/AIlib2/weights/river/stdc_360X640_2080Ti_fp16.engine' | |||
} | |||
mode, postPar, segPar = par.get('mode', 'others'), par.get('postPar'), par.get('segPar') | |||
new_device = select_device(par.get('device')) | |||
names = par['labelnames'] | |||
half = new_device.type != 'cpu' | |||
Detweights = par['Detweights'] | |||
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
model = runtime.deserialize_cuda_engine(f.read()) | |||
Segweights = par['Segweights'] | |||
if Segweights: | |||
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
segmodel = runtime.deserialize_cuda_engine(f.read()) | |||
else: | |||
segmodel = None | |||
postFile = par['postFile'] | |||
rainbows = postFile["rainbows"] | |||
objectPar = { | |||
'half': half, | |||
'device': new_device, | |||
'conf_thres': postFile["conf_thres"], | |||
'ovlap_thres_crossCategory': postFile.get("ovlap_thres_crossCategory"), | |||
'iou_thres': postFile["iou_thres"], | |||
'allowedList': [], | |||
'segRegionCnt': par['segRegionCnt'], | |||
'trtFlag_det': par['trtFlag_det'], | |||
'trtFlag_seg': par['trtFlag_seg'] | |||
} | |||
Detweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/yolov5_2080Ti_fp16.engine" | |||
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
model = runtime.deserialize_cuda_engine(f.read()) | |||
Segweights = "/home/th/tuo_heng/dev/AIlib2/weights/river2/stdc_360X640_2080Ti_fp16.engine" | |||
with open(Segweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: | |||
segmodel = runtime.deserialize_cuda_engine(f.read()) | |||
allowedList = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] | |||
def one_label(width, height, model_param): | |||
names = model_param[4] | |||
rainbows = model_param[6] | |||
digitFont, label_arraylist, font_config = get_label_arraylist(width, height, names, rainbows) | |||
""" | |||
font_config, frame, model, segmodel, names, label_arraylist, rainbows, objectPar, font, segPar, mode, postPar, | |||
requestId | |||
""" | |||
model_param[5] = label_arraylist | |||
model_param[8] = digitFont | |||
model_param[0] = font_config | |||
def get_label_arraylist(*args): | |||
width, height, names, rainbows = args | |||
# line = int(round(0.002 * (height + width) / 2) + 1) | |||
line = int(width / 1920 * 3 - 1) | |||
label = ' 0.95' | |||
tf = max(line, 1) | |||
fontScale = line * 0.33 | |||
text_width, text_height = cv2.getTextSize(label, 0, fontScale=fontScale, thickness=tf)[0] | |||
fontsize = int(width / 1920 * 40) | |||
numFontSize = float(format(width / 1920 * 1.1, '.1f')) | |||
digitFont = {'line_thickness': line, | |||
'boxLine_thickness': line, | |||
'fontSize': numFontSize, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': line} | |||
label_arraylist = get_label_arrays(names, rainbows, text_height, fontSize=fontsize, | |||
fontPath="/home/th/tuo_heng/dev/AIlib2/conf/platech.ttf") | |||
return digitFont, label_arraylist, (line, text_width, text_height, fontScale, tf) | |||
image = cv2.imread("/home/th/tuo_heng/dev/ompv2fn94m_1687259193110.jpg") | |||
start_time1 = time.time() | |||
with ThreadPoolExecutor(max_workers=3) as t: | |||
rs = [] | |||
for i in range(500): | |||
rr = t.submit(AI_process, [image], model, segmodel, names, None, rainbows, objectPar=objectPar, | |||
font={'line_thickness': 1, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.1, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 1}, segPar=segPar, mode=mode, postPar=postPar) | |||
rs.append(rr) | |||
for i in rs: | |||
i.result() | |||
print(time.time() - start_time1) | |||
start_time = time.time() | |||
for i in range(500): | |||
AI_process([image], model, segmodel, names, None, rainbows, objectPar=objectPar, | |||
font={'line_thickness': 1, | |||
'boxLine_thickness': 1, | |||
'fontSize': 1.1, | |||
'waterLineColor': (0, 255, 255), | |||
'segLineShow': False, | |||
'waterLineWidth': 1}, segPar=segPar, mode=mode, postPar=postPar) | |||
print(time.time() - start_time) |
@@ -0,0 +1,10 @@ | |||
import cv2 | |||
from PIL import ImageFont | |||
label = ' 0.95' | |||
# fontScale=fontScale, thickness=tf | |||
text_width, text_height = cv2.getTextSize(label, 0, fontScale=1, thickness=1)[0] | |||
print(text_height) | |||
font = ImageFont.truetype("/home/th/tuo_heng/dev/AIlib2/conf/platech.ttf", 22, encoding='utf-8') | |||
x, y, width, height = font.getbbox("植被") | |||
print(x, y, width, height) |
@@ -1,27 +1,843 @@ | |||
import asyncio | |||
import aiohttp | |||
import json | |||
import sys, yaml | |||
from easydict import EasyDict as edict | |||
from concurrent.futures import ThreadPoolExecutor | |||
sys.path.extend(['..','../AIlib2' ]) | |||
from AI import AI_process,AI_process_forest,get_postProcess_para,get_postProcess_para_dic,ocr_process,AI_det_track,AI_det_track_batch | |||
import cv2,os,time | |||
from segutils.segmodel import SegModel | |||
from segutils.segmodel import SegModel | |||
from segutils.trafficUtils import tracfficAccidentMixFunction | |||
from models.experimental import attempt_load | |||
from utils.torch_utils import select_device | |||
from utilsK.queRiver import get_labelnames,get_label_arrays,save_problem_images,riverDetSegMixProcess | |||
from ocrUtils.ocrUtils import CTCLabelConverter,AlignCollate | |||
from trackUtils.sort import Sort,track_draw_boxAndTrace,track_draw_trace_boxes,moving_average_wang,drawBoxTraceSimplied | |||
from trackUtils.sort_obb import OBB_Sort,obbTohbb,track_draw_all_boxes,track_draw_trace | |||
from obbUtils.shipUtils import OBB_infer,OBB_tracker,draw_obb,OBB_tracker_batch | |||
from utilsK.noParkingUtils import mixNoParking_road_postprocess | |||
from obbUtils.load_obb_model import load_model_decoder_OBB | |||
import numpy as np | |||
import torch,glob | |||
import tensorrt as trt | |||
from utilsK.masterUtils import get_needed_objectsIndex | |||
from copy import deepcopy | |||
from scipy import interpolate | |||
from utilsK.drownUtils import mixDrowing_water_postprocess | |||
#import warnings | |||
#warnings.filterwarnings("error") | |||
def view_bar(num, total,time1,prefix='prefix'): | |||
rate = num / total | |||
time_n=time.time() | |||
rate_num = int(rate * 30) | |||
rate_nums = np.round(rate * 100) | |||
r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 ) | |||
sys.stdout.write(r) | |||
sys.stdout.flush() | |||
''' | |||
多线程 | |||
''' | |||
def process_v1(frame): | |||
#try: | |||
print('demo.py beging to :',frame[8]) | |||
time00 = time.time() | |||
H,W,C = frame[0][0].shape | |||
p_result,timeOut = AI_process(frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],objectPar=frame[6],font=frame[7],segPar=frame[9],mode=frame[10],postPar=frame[11]) | |||
time11 = time.time() | |||
image_array = p_result[1] | |||
cv2.imwrite(os.path.join('images/results/',frame[8] ) ,image_array) | |||
bname = frame[8].split('.')[0] | |||
if len(p_result)==5: | |||
image_mask = p_result[4] | |||
cv2.imwrite(os.path.join('images/results/',bname+'_mask.png' ) , (image_mask).astype(np.uint8)) | |||
boxes=p_result[2] | |||
with open( os.path.join('images/results/',bname+'.txt' ),'w' ) as fp: | |||
for box in boxes: | |||
box_str=[str(x) for x in box] | |||
out_str=','.join(box_str)+'\n' | |||
fp.write(out_str) | |||
time22 = time.time() | |||
print('%s,%d*%d,AI-process: %.1f,image save:%.1f , %s'%(frame[8],H,W, (time11 - time00) * 1000.0, (time22-time11)*1000.0,timeOut), boxes) | |||
return 'success' | |||
#except Exception as e: | |||
# return 'failed:'+str(e) | |||
def process_video(video,par0,mode='detSeg'): | |||
cap=cv2.VideoCapture(video) | |||
if not cap.isOpened(): | |||
print('#####error url:',video) | |||
return False | |||
bname=os.path.basename(video).split('.')[0] | |||
fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5) | |||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5) | |||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5) | |||
framecnt=int(cap.get(7)+0.5) | |||
save_path_AI = os.path.join(par0['outpth'],os.path.basename(video)) | |||
problem_image_dir= os.path.join( par0['outpth'], 'probleImages' ) | |||
os.makedirs(problem_image_dir,exist_ok=True) | |||
vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height)) | |||
num=0 | |||
iframe=0;post_results=[];fpsample=30*10 | |||
imgarray_list = []; iframe_list = [] | |||
patch_cnt = par0['trackPar']['patchCnt'] | |||
##windowsize 对逐帧插值后的结果做平滑,windowsize为平滑的长度,没隔det_cnt帧做一次跟踪。 | |||
trackPar={'det_cnt':10,'windowsize':29 } | |||
##track_det_result_update= np.empty((0,8)) ###每100帧跑出来的结果,放在track_det_result_update,只保留当前100帧里有的tracker Id. | |||
while cap.isOpened(): | |||
ret, imgarray = cap.read() #读取摄像头画面 | |||
iframe +=1 | |||
if not ret:break | |||
if mode=='detSeg': | |||
p_result,timeOut = AI_process([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],objectPar=par0['objectPar'],font=par0['digitFont'],segPar=par0['segPar']) | |||
elif mode == 'track': | |||
#sampleCount=10 | |||
imgarray_list.append( imgarray ) | |||
iframe_list.append(iframe ) | |||
if iframe%patch_cnt==0: | |||
time_patch0 = time.time() | |||
retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar']) | |||
#print('###line111:',retResults[2]) | |||
###需要保存成一个二维list,每一个list是一帧检测结果。 | |||
###track_det_result 内容格式:x1, y1, x2, y2, conf, cls,iframe,trackId | |||
time_patch2 = time.time() | |||
frame_min = iframe_list[0];frame_max=iframe_list[-1] | |||
for iiframe in range(frame_min,frame_max+1): | |||
img_draw = imgarray_list[ iiframe- frame_min ] | |||
img_draw = drawBoxTraceSimplied(retResults[1] ,iiframe, img_draw,rainbows=par0['drawPar']['rainbows'],boxFlag=True,traceFlag=True,names=par0['drawPar']['names'] ) | |||
ret = vid_writer_AI.write(img_draw) | |||
view_bar(iiframe, framecnt,time.time(),prefix=os.path.basename(video)) | |||
imgarray_list=[];iframe_list=[] | |||
elif mode =='obbTrack': | |||
imgarray_list.append( imgarray ) | |||
iframe_list.append(iframe ) | |||
if iframe%patch_cnt==0: | |||
time_patch0 = time.time() | |||
track_det_results, timeInfos = OBB_tracker_batch(imgarray_list,iframe_list,par0['modelPar'],par0['obbModelPar'],par0['sort_tracker'],par0['trackPar'],segPar=None) | |||
print( timeInfos ) | |||
#对结果画图 | |||
track_det_np = track_det_results[1] | |||
frame_min = iframe_list[0];frame_max=iframe_list[-1] | |||
for iiframe in range(frame_min,frame_max+1): | |||
img_draw = imgarray_list[ iiframe- frame_min ] | |||
if len( track_det_results[2][ iiframe- frame_min]) > 0: | |||
img_draw = draw_obb( track_det_results[2][iiframe- frame_min ] ,img_draw,par0['drawPar']) | |||
if True: | |||
frameIdex=12;trackIdex=13; | |||
boxes_oneFrame = track_det_np[ track_det_np[:,frameIdex]==iiframe ] | |||
###在某一帧上,画上轨迹 | |||
track_ids = boxes_oneFrame[:,trackIdex].tolist() | |||
boxes_before_oneFrame = track_det_np[ track_det_np[:,frameIdex]<=iiframe ] | |||
for trackId in track_ids: | |||
boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,trackIdex]==trackId] | |||
xcs = boxes_before_oneFrame_oneId[:,8] | |||
ycs = boxes_before_oneFrame_oneId[:,9] | |||
[cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ), | |||
( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2) | |||
for i,_ in enumerate(xcs) if i < len(xcs)-1 ] | |||
ret = vid_writer_AI.write(img_draw) | |||
#sys.exit(0) | |||
#print('vide writer ret:',ret) | |||
imgarray_list=[];iframe_list=[] | |||
view_bar(iframe, framecnt,time.time(),prefix=os.path.basename(video)) | |||
else: | |||
p_result,timeOut = AI_process_forest([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],par0['half'],par0['device'],par0['conf_thres'], par0['iou_thres'],par0['allowedList'],font=par0['digitFont'],trtFlag_det=par0['trtFlag_det']) | |||
if mode not in [ 'track','obbTrack']: | |||
image_array = p_result[1];num+=1 | |||
ret = vid_writer_AI.write(image_array) | |||
view_bar(num, framecnt,time.time(),prefix=os.path.basename(video)) | |||
##每隔 fpsample帧处理一次,如果有问题就保存图片 | |||
if (iframe % fpsample == 0) and (len(post_results)>0) : | |||
parImage=save_problem_images(post_results,iframe,par0['names'],streamName=bname,outImaDir=problem_image_dir,imageTxtFile=False) | |||
post_results=[] | |||
if len(p_result[2] )>0: | |||
post_results.append(p_result) | |||
vid_writer_AI.release(); | |||
def det_track_demo(business, videopaths): | |||
''' | |||
跟踪参数说明: | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100} | |||
sort_max_age--跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。 | |||
sort_min_hits--每隔目标连续出现的次数,超过这个次数才认为是一个目标。 | |||
sort_iou_thresh--检测最小的置信度。 | |||
det_cnt--每隔几次做一个跟踪和检测,默认10。 | |||
windowsize--轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。 | |||
patchCnt--每次送入图像的数量,不宜少于100帧。 | |||
''' | |||
''' 以下是基于检测和分割的跟踪模型,分割用来修正检测的结果''' | |||
####河道巡检的跟踪模型参数 | |||
if opt['business'] == 'river' or opt['business'] == 'river2' : | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表 | |||
'gpuname':'2080Ti',###显卡名称 | |||
'max_workers':1, ###并行线程数 | |||
'half':True, | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'seg_nclass':2,###分割模型类别数目,默认2类 | |||
'segRegionCnt':0,###分割模型结果需要保留的等值线数目 | |||
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数 | |||
'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数 | |||
}, | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%( opt['business'] ),###后处理参数文件 | |||
'txtFontSize':80,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
#'testImgPath':'images/videos/river',###测试图像的位置 | |||
'testImgPath':'images/tt',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
async def get_video_info(url: str) -> dict: | |||
async with aiohttp.ClientSession() as session: | |||
params = { | |||
'format': 'json', | |||
'url': url | |||
} | |||
async with session.get('rtmp://192.168.10.101:19350/rlive/stream_107?sign=NQe66OXS') as resp: | |||
if resp.status == 200: | |||
text = await resp.text() | |||
return json.loads(text) | |||
return {} | |||
if opt['business'] == 'highWay2': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表 | |||
'half':True, | |||
'gpuname':'3090',###显卡名称 | |||
'max_workers':1, ###并行线程数 | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
#'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt", | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出 | |||
'seg_nclass':3,###分割模型类别数目,默认2类 | |||
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 | |||
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数 | |||
'mixFunction':{'function':tracfficAccidentMixFunction, | |||
'pars':{ 'RoadArea': 16000, 'vehicleArea': 10, 'roadVehicleAngle': 15, 'speedRoadVehicleAngleMax': 75,'radius': 50 , 'roundness': 1.0, 'cls': 9, 'vehicleFactor': 0.1,'cls':9, 'confThres':0.25,'roadIou':0.6,'vehicleFlag':False,'distanceFlag': False } | |||
} | |||
}, | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'mode':'highWay3.0', | |||
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件 | |||
'txtFontSize':20,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置 | |||
#'testImgPath':'images/trafficAccident/8.png',###测试图像的位置 | |||
'testImgPath':'images/noParking/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize'] | |||
if opt['business'] == 'noParking': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表 | |||
'half':True, | |||
'gpuname':'3090',###显卡名称 | |||
'max_workers':1, ###并行线程数 | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
#'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt", | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出 | |||
'seg_nclass':4,###分割模型类别数目,默认2类 | |||
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 | |||
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数 | |||
'mixFunction':{'function':mixNoParking_road_postprocess, | |||
'pars': | |||
#{ 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000, } | |||
{'RoadArea': 16000, 'roadVehicleAngle': 15,'radius': 50, 'distanceFlag': False, 'vehicleFlag': False} | |||
} | |||
}, | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'mode':'highWay3.0', | |||
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件 | |||
'txtFontSize':20,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置 | |||
'testImgPath':'images/noParking/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize'] | |||
if opt['business'] == 'drowning': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%( opt['business'] ), ###检测类别对照表 | |||
'half':True, | |||
'gpuname':'3090',###显卡名称 | |||
'max_workers':1, ###并行线程数 | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
#'Detweights':"../AIlib2/weights/conf/highWay2/yolov5.pt", | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出 | |||
'seg_nclass':2,###分割模型类别数目,默认2类 | |||
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 | |||
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数 | |||
'mixFunction':{'function':mixDrowing_water_postprocess, | |||
'pars':{ } | |||
} | |||
}, | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%('highWay2' ),###后处理参数文件 | |||
'txtFontSize':20,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置 | |||
'testImgPath':'images/drowning/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize'] | |||
''' 以下是基于检测的跟踪模型,只有检测没有分割 ''' | |||
if opt['business'] == 'forest2': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/forest2/labelnames.json", ###检测类别对照表 | |||
'gpuname':opt['gpu'],###显卡名称 | |||
'max_workers':1, ###并行线程数 | |||
'half':True, | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
#'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ], | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###分割模型类别数目,默认2类 | |||
'segRegionCnt':0,###分割模型结果需要保留的等值线数目 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/forest/para.json',###后处理参数文件 | |||
'txtFontSize':80,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/forest2/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###车辆巡检参数 | |||
if opt['business'] == 'vehicle': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/vehicle/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###分割模型类别数目,默认2类 | |||
'segRegionCnt':0,###分割模型结果需要保留的等值线数目 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/vehicle/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'images/videos/vehicle/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###行人检测模型 | |||
if opt['business'] == 'pedestrian': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/pedestrian/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###分割模型类别数目,默认2类 | |||
'segRegionCnt':0,###分割模型结果需要保留的等值线数目 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/pedestrian/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/pedestrian/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
if opt['business'] == 'smogfire': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/smogfire/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###没有分割模型,此处不用 | |||
'segRegionCnt':0,###没有分割模型,此处不用 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/smogfire/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/smogfire/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###钓鱼游泳检测 | |||
if opt['business'] == 'AnglerSwimmer': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/AnglerSwimmer/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###没有分割模型,此处不用 | |||
'segRegionCnt':0,###没有分割模型,此处不用 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/AnglerSwimmer/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/AnglerSwimmer/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###航道应急,做落水人员检测, channelEmergency | |||
if opt['business'] == 'channelEmergency': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/channelEmergency/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
#'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###没有分割模型,此处不用 | |||
'segRegionCnt':0,###没有分割模型,此处不用 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/channelEmergency/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/channelEmergency/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###乡村路违法种植 | |||
if opt['business'] == 'countryRoad': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/countryRoad/labelnames.json", ###检测类别对照表 | |||
'gpuname':'2080T',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###没有分割模型,此处不用 | |||
'segRegionCnt':0,###没有分割模型,此处不用 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/countryRoad/para.json',###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'../AIdemo2/images/countryRoad/',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
###城管项目,检测城市垃圾和车辆 | |||
if opt['business'] == 'cityMangement': | |||
par={ | |||
'device':'0', ###显卡号,如果用TRT模型,只支持0(单显卡) | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表 | |||
'gpuname':'2080Ti',###显卡名称 | |||
'half':True, | |||
'max_workers':1, ###并行线程数 | |||
'trtFlag_det':True,###检测模型是否采用TRT | |||
'trtFlag_seg':False,###分割模型是否采用TRT | |||
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径 | |||
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出 | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'seg_nclass':2,###没有分割模型,此处不用 | |||
'segRegionCnt':0,###没有分割模型,此处不用 | |||
'segPar':None,###分割模型预处理参数 | |||
'Segweights' : None,###分割模型权重位置 | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business']),###后处理参数文件 | |||
'txtFontSize':40,###文本字符的大小 | |||
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置 | |||
'testImgPath':'images/cityMangement',###测试图像的位置 | |||
'testOutPath':'images/results/',###输出测试图像位置 | |||
} | |||
par['trtFlag_det']=True if par['Detweights'].endswith('.engine') else False | |||
if par['Segweights']: | |||
par['segPar']['trtFlag_seg']=True if par['Segweights'].endswith('.engine') else False | |||
##使用森林,道路模型,business 控制['forest','road'] | |||
##预先设置的参数 | |||
gpuname=par['gpuname']#如果用trt就需要此参数,只能是"3090" "2080Ti" | |||
device_=par['device'] ##选定模型,可选 cpu,'0','1' | |||
device = select_device(device_) | |||
half = device.type != 'cpu' # half precision only supported on CUDA | |||
trtFlag_det=par['trtFlag_det'] ###是否采用TRT模型加速 | |||
##以下参数目前不可改 | |||
imageW=1080 ####道路模型 | |||
digitFont= par['digitFont'] | |||
####加载检测模型 | |||
if trtFlag_det: | |||
Detweights=par['Detweights'] | |||
logger = trt.Logger(trt.Logger.ERROR) | |||
with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime: | |||
model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 | |||
print('####load TRT model :%s'%(Detweights)) | |||
else: | |||
Detweights=par['Detweights'] | |||
model = attempt_load(Detweights, map_location=device) # load FP32 model | |||
if half: model.half() | |||
####加载分割模型 | |||
seg_nclass = par['seg_nclass'] | |||
segPar=par['segPar'] | |||
if par['Segweights']: | |||
if par['segPar']['trtFlag_seg']: | |||
Segweights = par['Segweights'] | |||
logger = trt.Logger(trt.Logger.ERROR) | |||
with open(Segweights, "rb") as f, trt.Runtime(logger) as runtime: | |||
segmodel=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 | |||
print('############locad seg model trt success: ',Segweights) | |||
else: | |||
Segweights = par['Segweights'] | |||
segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device) | |||
print('############locad seg model pth success:',Segweights) | |||
else: | |||
segmodel=None | |||
trackPar=par['trackPar'] | |||
sort_tracker = Sort(max_age=trackPar['sort_max_age'], | |||
min_hits=trackPar['sort_min_hits'], | |||
iou_threshold=trackPar['sort_iou_thresh']) | |||
labelnames = par['labelnames'] | |||
postFile= par['postFile'] | |||
print( Detweights,labelnames ) | |||
conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile) | |||
detPostPar = get_postProcess_para_dic(postFile) | |||
conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"] | |||
if 'ovlap_thres_crossCategory' in detPostPar.keys(): iou2nd=detPostPar['ovlap_thres_crossCategory'] | |||
else:iou2nd = None | |||
####模型选择参数用如下: | |||
mode_paras=par['detModelpara'] | |||
allowedList,allowedList_string=get_needed_objectsIndex(mode_paras) | |||
#slopeIndex = par['slopeIndex'] | |||
##只加载检测模型,准备好显示字符 | |||
names=get_labelnames(labelnames) | |||
#imageW=4915;###默认是1920,在森林巡检的高清图像中是4920 | |||
outfontsize=int(imageW/1920*40);### | |||
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf") | |||
##图像测试和视频 | |||
outpth = par['testOutPath'] | |||
impth = par['testImgPath'] | |||
imgpaths=[]###获取文件里所有的图像 | |||
videopaths=videopaths###获取文件里所有的视频 | |||
img_postfixs = ['.jpg','.JPG','.PNG','.png']; | |||
vides_postfixs= ['.MP4','.mp4','.avi'] | |||
if os.path.isdir(impth): | |||
for postfix in img_postfixs: | |||
imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) ) | |||
for postfix in ['.MP4','.mp4','.avi']: | |||
videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) ) | |||
else: | |||
postfix = os.path.splitext(impth)[-1] | |||
if postfix in img_postfixs: imgpaths=[ impth ] | |||
if postfix in vides_postfixs: videopaths = [impth ] | |||
imgpaths.sort() | |||
modelPar={ 'det_Model': model,'seg_Model':segmodel } | |||
processPar={'half':par['half'],'device':device,'conf_thres':conf_thres,'iou_thres':iou_thres,'trtFlag_det':trtFlag_det,'iou2nd':iou2nd} | |||
drawPar={'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,'font': par['digitFont'],'allowedList':allowedList} | |||
for i in range(len(imgpaths)): | |||
#for i in range(2): | |||
#imgpath = os.path.join(impth, folders[i]) | |||
imgpath = imgpaths[i] | |||
bname = os.path.basename(imgpath ) | |||
im0s=[cv2.imread(imgpath)] | |||
time00 = time.time() | |||
retResults,timeOut = AI_det_track_batch(im0s, [i] ,modelPar,processPar,sort_tracker ,trackPar,segPar) | |||
#print('###line627:',retResults[2]) | |||
#retResults,timeInfos = AI_det_track_batch(imgarray_list, iframe_list ,par0['modelPar'],par0['processPar'],par0['sort_tracker'] ,par0['trackPar'],segPar=par0['segPar']) | |||
if len(retResults[1])>0: | |||
retResults[0][0] = drawBoxTraceSimplied(retResults[1],i, retResults[0][0],rainbows=rainbows,boxFlag=True,traceFlag=False,names=drawPar['names']) | |||
time11 = time.time() | |||
image_array = retResults[0][0] | |||
''' | |||
返回值retResults[2] --list,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ cls , x0 ,y0 ,x1 ,y1 ,conf,ifrmae,trackId ] | |||
--etc. retResults[2][j][k]表示第j帧的第k个框。 | |||
''' | |||
cv2.imwrite( os.path.join( outpth,bname ) ,image_array ) | |||
print('----image:%s, process:%s ( %s ),save:%s'%(bname,(time11-time00) * 1000, timeOut,(time.time() - time11) * 1000) ) | |||
##process video | |||
print('##begin to process videos, total %d videos'%( len(videopaths))) | |||
for i,video in enumerate(videopaths): | |||
print('process video%d :%s '%(i,video)) | |||
par0={'modelPar':modelPar,'processPar':processPar,'drawPar':drawPar,'outpth':par['testOutPath'], 'sort_tracker':sort_tracker,'trackPar':trackPar,'segPar':segPar} | |||
process_video(video,par0,mode='track') | |||
def OCR_demo2(opt): | |||
from ocrUtils2 import crnn_model | |||
from ocrUtils2.ocrUtils import get_cfg,recognition_ocr,strLabelConverter | |||
if opt['business'] == 'ocr2': | |||
par={ | |||
'image_dir':'images/ocr_en', | |||
'outtxt':'images/results', | |||
'weights':'../AIlib2/weights/conf/ocr2/crnn_448X32.pth', | |||
#'weights':'../weights/2080Ti/AIlib2/ocr2/crnn_2080Ti_fp16_448X32.engine', | |||
'device':'cuda:0', | |||
'cfg':'../AIlib2/weights/conf/ocr2/360CC_config.yaml', | |||
'char_file':'../AIlib2/weights/conf/ocr2/chars.txt', | |||
'imgH':32, | |||
'imgW':448, | |||
'workers':1 | |||
} | |||
image_dir=par['image_dir'] | |||
outtxt=par['outtxt'] | |||
workers=par['workers'] | |||
weights= par['weights'] | |||
device=par['device'] | |||
char_file=par['char_file'] | |||
imgH=par['imgH'] | |||
imgW=par['imgW'] | |||
cfg = par['cfg'] | |||
config = get_cfg(cfg, char_file) | |||
par['contextFlag']=False | |||
device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') | |||
if weights.endswith('.pth'): | |||
model = crnn_model.get_crnn(config,weights=weights).to(device) | |||
par['model_mode']='pth' | |||
else: | |||
logger = trt.Logger(trt.Logger.ERROR) | |||
with open(weights, "rb") as f, trt.Runtime(logger) as runtime: | |||
model = runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 | |||
print('#####load TRT file:',weights,'success #####') | |||
context = model.create_execution_context() | |||
par['model_mode']='trt';par['contextFlag']=context | |||
converter = strLabelConverter(config.DATASET.ALPHABETS) | |||
img_urls=glob.glob('%s/*.jpg'%( image_dir )) | |||
img_urls.extend( glob.glob('%s/*.png'%( image_dir )) ) | |||
cnt=len(img_urls) | |||
print('%s has %d images'%(image_dir ,len(img_urls) ) ) | |||
# 准备数据 | |||
parList=[] | |||
for i in range(cnt): | |||
img_patch=cv2.imread( img_urls[i] , cv2.IMREAD_GRAYSCALE) | |||
started = time.time() | |||
img = cv2.imread(img_urls[i]) | |||
sim_pred = recognition_ocr(config, img, model, converter, device,par=par) | |||
finished = time.time() | |||
print('{0}: elapsed time: {1} prd:{2} '.format( os.path.basename( img_urls[i] ), finished - started, sim_pred )) | |||
def OBB_track_demo(opt): | |||
###倾斜框(OBB)的ship目标检测 | |||
''' | |||
par={ | |||
'model_size':(608,608), #width,height | |||
'K':100, #Maximum of objects' | |||
'conf_thresh':0.18,##Confidence threshold, 0.1 for general evaluation | |||
'device':"cuda:0", | |||
'down_ratio':4,'num_classes':15, | |||
#'weights':'../AIlib2/weights/conf/ship2/obb_608X608.engine', | |||
'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']), | |||
'dataset':'dota', | |||
'test_dir': '/mnt/thsw2/DSP2/videos/obbShips', | |||
'outpth': 'images/results', | |||
'half': False, | |||
'mean':(0.5, 0.5, 0.5), | |||
'std':(1, 1, 1), | |||
'model_size':(608,608),##width,height | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1}, | |||
'decoder':None, | |||
'test_flag':True, | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件 | |||
'drawBox':True,#####是否画框 | |||
'digitWordFont': { 'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'}, | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表 | |||
} | |||
''' | |||
par={ | |||
'obbModelPar':{ | |||
'model_size':(608,608),'K':100,'conf_thresh':0.3, 'down_ratio':4,'num_classes':15,'dataset':'dota', | |||
'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1}, | |||
'mean':(0.5, 0.5, 0.5),'std':(1, 1, 1), 'half': False,'decoder':None, | |||
'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']), | |||
}, | |||
'outpth': 'images/results', | |||
'trackPar':{'sort_max_age':2,'sort_min_hits':3,'sort_iou_thresh':0.2,'det_cnt':10,'windowsize':29,'patchCnt':100}, | |||
'device':"cuda:0", | |||
#'test_dir': '/mnt/thsw2/DSP2/videos/obbShips/DJI_20230208110806_0001_W_6M.MP4', | |||
'test_dir':'/mnt/thsw2/DSP2/videos/obbShips/freighter2.mp4', | |||
'test_flag':True, | |||
'postFile': '../AIlib2/weights/conf/%s/para.json'%(opt['business'] ),###后处理参数文件 | |||
'drawBox':True,#####是否画框 | |||
'drawPar': { 'digitWordFont' :{'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'}} , | |||
'labelnames':"../AIlib2/weights/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表 | |||
} | |||
#par['model_size'],par['mean'],par['std'],par['half'],par['saveType'],par['heads'],par['labelnames'],par['decoder'],par['down_ratio'],par['drawBox'] | |||
#par['rainbows'],par['label_array'],par['digitWordFont'] | |||
obbModelPar = par['obbModelPar'] | |||
####加载模型 | |||
model,decoder2=load_model_decoder_OBB(obbModelPar) | |||
obbModelPar['decoder']=decoder2 | |||
names=get_labelnames(par['labelnames']);obbModelPar['labelnames']=names | |||
_,_,_,rainbows=get_postProcess_para(par['postFile']);par['drawPar']['rainbows']=rainbows | |||
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['drawPar']['digitWordFont']['wordSize'],fontpath="../AIlib2/conf/platech.ttf") | |||
#par['label_array']=label_arraylist | |||
trackPar=par['trackPar'] | |||
sort_tracker = OBB_Sort(max_age=trackPar['sort_max_age'], | |||
min_hits=trackPar['sort_min_hits'], | |||
iou_threshold=trackPar['sort_iou_thresh']) | |||
##图像测试和视频 | |||
impth = par['test_dir'] | |||
img_urls=[]###获取文件里所有的图像 | |||
video_urls=[]###获取文件里所有的视频 | |||
img_postfixs = ['.jpg','.JPG','.PNG','.png']; | |||
vides_postfixs= ['.MP4','.mp4','.avi'] | |||
if os.path.isdir(impth): | |||
for postfix in img_postfixs: | |||
img_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) ) | |||
for postfix in ['.MP4','.mp4','.avi']: | |||
video_urls.extend(glob.glob('%s/*%s'%(impth,postfix )) ) | |||
else: | |||
postfix = os.path.splitext(impth)[-1] | |||
if postfix in img_postfixs: img_urls=[ impth ] | |||
if postfix in vides_postfixs: video_urls = [impth ] | |||
parIn = {'obbModelPar':obbModelPar,'modelPar':{'obbmodel': model},'sort_tracker':sort_tracker,'outpth':par['outpth'],'trackPar':trackPar,'drawPar':par['drawPar']} | |||
par['drawPar']['label_array']=label_arraylist | |||
for img_url in img_urls: | |||
#print(img_url) | |||
ori_image=cv2.imread(img_url) | |||
#ori_image_list,infos = OBB_infer(model,ori_image,obbModelPar) | |||
ori_image_list,infos = OBB_tracker_batch([ori_image],[0],parIn['modelPar'],parIn['obbModelPar'],None,parIn['trackPar'],None) | |||
ori_image_list[1] = draw_obb(ori_image_list[2] ,ori_image_list[1],par['drawPar']) | |||
imgName = os.path.basename(img_url) | |||
saveFile = os.path.join(par['outpth'], imgName) | |||
ret=cv2.imwrite(saveFile, ori_image_list[1]) | |||
if not ret: | |||
print(saveFile, ' not created ') | |||
print( os.path.basename(img_url),':',infos,ori_image_list[2]) | |||
###处理视频 | |||
async def main(): | |||
urls = ['rtmp://192.168.10.101:19350/rlive/stream_107?sign=NQe66OXS'] # 一个RTMP流URL的列表 | |||
tasks = [get_video_info(url) for url in urls] | |||
results = await asyncio.gather(*tasks) | |||
for video_url in video_urls: | |||
process_video(video_url, parIn ,mode='obbTrack') | |||
for result in results: | |||
print(result) | |||
if __name__=="__main__": | |||
if __name__ == '__main__': | |||
asyncio.run(main()) | |||
#jkm_demo() | |||
#businessAll=['river', 'river2','highWay2','noParking','drowning','forest2','vehicle','pedestrian','smogfire' , 'AnglerSwimmer','channelEmergency', 'countryRoad','cityMangement','ship2'] | |||
businessAll = ['river2'] | |||
videopaths = ['/home/th/tuo_heng/dev/DJI_20211229100908_0002_S.mp4'] | |||
for busi in businessAll: | |||
print('-'*40,'beg to test:',busi,'-'*40) | |||
opt={'gpu':'2080Ti','business':busi} | |||
if busi in ['ship2']: | |||
OBB_track_demo(opt) | |||
else: | |||
#if opt['business'] in ['river','highWay2','noParking','drowning','']: | |||
det_track_demo(opt, videopaths) |
@@ -0,0 +1,122 @@ | |||
import json | |||
import time | |||
import subprocess as sp | |||
from concurrent.futures import ThreadPoolExecutor | |||
from traceback import format_exc | |||
import ffmpeg | |||
import sys | |||
import numpy as np | |||
""" | |||
获取视频基本信息 | |||
""" | |||
def get_video_info(in_file): | |||
try: | |||
probe = ffmpeg.probe( | |||
'https://vod.play.t-aaron.com/customerTrans/edc96ea2115a0723a003730956208134/55547af9-184f0827dae-0004-f90c-f2c-7ec68.mp4') | |||
# format = probe['format'] | |||
# size = int(format['size'])/1024/1024 | |||
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) | |||
if video_stream is None: | |||
print('No video stream found', file=sys.stderr) | |||
return | |||
width = int(video_stream['width']) | |||
height = int(video_stream['height']) | |||
num_frames = int(video_stream['nb_frames']) | |||
up, down = str(video_stream['r_frame_rate']).split('/') | |||
fps = eval(up) / eval(down) | |||
print("fbs:", fps) | |||
# duration = float(video_stream['duration']) | |||
bit_rate = int(video_stream['bit_rate']) / 1000 | |||
print('width: {}'.format(width)) | |||
print('height: {}'.format(height)) | |||
# print('num_frames: {}'.format(num_frames)) | |||
print('bit_rate: {}k'.format(bit_rate)) | |||
# print('fps: {}'.format(fps)) | |||
# print('size: {}MB'.format(size)) | |||
# print('duration: {}'.format(duration)) | |||
return video_stream | |||
except Exception as err: | |||
if isinstance(err, ffmpeg._run.Error): | |||
print(err.stderr.decode(encoding='utf-8')) | |||
raise err | |||
def aa(p1, in_bytes): | |||
try: | |||
p1.stdin.write(in_bytes) | |||
except Exception: | |||
print(format_exc()) | |||
if __name__ == '__main__': | |||
file_path = 'rtsp://localhost:8554/live' | |||
command = ['ffmpeg', | |||
'-c:v', 'h264_cuvid', | |||
'-i', file_path, | |||
'-f', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-an', | |||
'-'] | |||
p = sp.Popen(command, stdout=sp.PIPE) | |||
command1 = ['ffmpeg', | |||
'-y', | |||
"-an", | |||
'-f', 'rawvideo', | |||
'-vcodec', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-thread_queue_size', '1024', | |||
'-s', "{}x{}".format(1280, 720), | |||
'-i', '-', # 指定输入文件 | |||
'-r', str(25), | |||
'-g', str(25), | |||
'-maxrate', '6000k', | |||
'-b:v', '4000k', | |||
'-c:v', 'h264_nvenc', # | |||
'-bufsize', '4000k', | |||
'-pix_fmt', 'yuv420p', | |||
'-preset', 'p6', | |||
'-tune', 'll', | |||
'-f', 'flv', | |||
"rtmp://192.168.10.101:19350/rlive/stream_124?sign=YJ8aBPFp"] | |||
# # 管道配置 | |||
p1 = sp.Popen(command1, stdin=sp.PIPE, shell=False) | |||
command2 = ['ffmpeg', | |||
'-y', | |||
"-an", | |||
'-f', 'rawvideo', | |||
'-vcodec', 'rawvideo', | |||
'-pix_fmt', 'bgr24', | |||
'-thread_queue_size', '1024', | |||
'-s', "{}x{}".format(1280, 720), | |||
'-i', '-', # 指定输入文件 | |||
'-r', str(25), | |||
'-g', str(25), | |||
'-maxrate', '6000k', | |||
'-b:v', '4000k', | |||
'-c:v', 'h264_nvenc', # | |||
'-bufsize', '4000k', | |||
'-pix_fmt', 'yuv420p', | |||
'-preset', 'p6', | |||
'-tune', 'll', | |||
'-f', 'flv', | |||
"rtmp://192.168.10.101:19350/rlive/stream_125?sign=uMdRHj9R"] | |||
# # 管道配置 | |||
p2 = sp.Popen(command1, stdin=sp.PIPE, shell=False) | |||
start1 = time.time() | |||
num = 0 | |||
with ThreadPoolExecutor(max_workers=100) as t: | |||
while True: | |||
in_bytes = p.stdout.read(1280 * 720 * 3) | |||
if in_bytes: | |||
img = (np.frombuffer(in_bytes, np.uint8)).reshape((720, 1280, 3)) | |||
for i in range(1): | |||
t.submit(aa, p1, in_bytes) | |||
t.submit(aa, p2, in_bytes) | |||
else: | |||
break |
@@ -0,0 +1,80 @@ | |||
from traceback import format_exc | |||
import cv2 | |||
import numpy as np | |||
from loguru import logger | |||
import subprocess as sp | |||
from common import Constant | |||
from enums.ExceptionEnum import ExceptionType | |||
from exception.CustomerException import ServiceException | |||
from util.Cv2Utils import push_video_stream, clear_pull_p | |||
pull_p = None | |||
push_p =None | |||
p_push_status=[0,0] | |||
def start_pull_p(pull_url, requestId): | |||
try: | |||
command = ['ffmpeg'] | |||
# if pull_url.startswith("rtsp://"): | |||
# command.extend(['-timeout', '20000000', '-rtsp_transport', 'tcp']) | |||
# if pull_url.startswith("http") or pull_url.startswith("rtmp"): | |||
# command.extend(['-rw_timeout', '20000000']) | |||
command.extend(['-re', | |||
'-y', | |||
'-an', | |||
# '-hwaccel', 'cuda', cuvid | |||
'-c:v', 'h264_cuvid', | |||
# '-resize', self.wah, | |||
'-i', pull_url, | |||
'-f', 'rawvideo', | |||
# '-pix_fmt', 'bgr24', | |||
'-r', '25', | |||
'-']) | |||
return sp.Popen(command, stdout=sp.PIPE) | |||
except ServiceException as s: | |||
logger.error("构建拉流管道异常: {}, requestId:{}", s.msg, requestId) | |||
raise s | |||
except Exception as e: | |||
logger.error("构建拉流管道异常:{}, requestId:{}", format_exc(), requestId) | |||
raise e | |||
def pull_read_video_stream(pull_p, pull_url, width, height, width_height_3, w_2, h_2, requestId): | |||
result = None | |||
try: | |||
if pull_p is None: | |||
pull_p = start_pull_p(pull_url, requestId) | |||
in_bytes = pull_p.stdout.read(width_height_3) | |||
if in_bytes is not None and len(in_bytes) > 0: | |||
try: | |||
# result = (np.frombuffer(in_bytes, np.uint8).reshape([height * 3 // 2, width, 3])) | |||
# ValueError: cannot reshape array of size 3110400 into shape (1080,1920) | |||
result = (np.frombuffer(in_bytes, np.uint8)).reshape((height, width)) | |||
result = cv2.cvtColor(result, cv2.COLOR_YUV2BGR_NV12) | |||
# result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR) | |||
if result.shape[1] > Constant.width: | |||
result = cv2.resize(result, (result.shape[1] // 2, result.shape[0] // 2), interpolation=cv2.INTER_LINEAR) | |||
except Exception: | |||
logger.error("视频格式异常:{}, requestId:{}", format_exc(), requestId) | |||
raise ServiceException(ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[0], | |||
ExceptionType.VIDEO_RESOLUTION_EXCEPTION.value[1]) | |||
except ServiceException as s: | |||
clear_pull_p(pull_p, requestId) | |||
raise s | |||
except Exception: | |||
clear_pull_p(pull_p, requestId) | |||
pull_p = None | |||
width = None | |||
height = None | |||
width_height_3 = None | |||
logger.error("读流异常:{}, requestId:{}", format_exc(), requestId) | |||
return result, pull_p, width, height, width_height_3 | |||
while True: | |||
frame, pull_p, width, height, width_height_3 = pull_read_video_stream(pull_p, 'rtmp://live.play.t-aaron.com/live/THSAr', 1920, | |||
1080*3//2, 1920*1080*3//2, 960, 540, | |||
'111') | |||
if frame is not None: | |||
push_p = push_video_stream(frame, push_p, 'rtmp://live.push.t-aaron.com/live/THSAs', p_push_status, '11') | |||
clear_pull_p(pull_p, "11111") | |||
close_all_p(push_p, None, None, "11111") |
@@ -7,16 +7,20 @@ | |||
# v["1"] = "5" | |||
# | |||
# print(aaa) | |||
class base(object): | |||
__slots__=('x') | |||
var=8 | |||
def __init__(self): | |||
pass | |||
def aa(self): | |||
print("aa") | |||
b=base() | |||
b.x=88 | |||
print(b.aa()) | |||
# class base(object): | |||
# __slots__=('x') | |||
# var=8 | |||
# def __init__(self): | |||
# pass | |||
# | |||
# def aa(self): | |||
# print("aa") | |||
# | |||
# b=base() | |||
# b.x=88 | |||
# print(b.aa()) | |||
aa = [1,2,3] | |||
a,b,c = aa | |||
print(a) | |||
print(b) | |||
print(c) |
@@ -0,0 +1,51 @@ | |||
import time | |||
from multiprocessing import Queue | |||
from os import getpid | |||
import cv2 | |||
import psutil | |||
import requests | |||
from moviepy.editor import VideoFileClip | |||
from pymediainfo import MediaInfo | |||
# aa = time.time() | |||
# try: | |||
# clip = VideoFileClip("rtmp://192.168.10.101:19350/rlive/stream_107?sign=NQe66OXS") | |||
# print("分辨率:", clip.size) | |||
# print("帧率:", clip.fps) | |||
# print("持续时间:", clip.duration) | |||
# except: | |||
# pass | |||
# print(time.time() - aa) | |||
# aa = time.time() | |||
# cap = cv2.VideoCapture("rtmp://192.168.10.101:19350/rlive/stream_123?sign=w6RNKsFF") | |||
# width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |||
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |||
# total_frames = int(cap.get(7)) | |||
# fps = cap.get(cv2.CAP_PROP_FPS) | |||
# duration = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000 | |||
# cap.release() | |||
# print("分辨率:", (width, height)) | |||
# print("帧率:", fps) | |||
# print("持续时间:", duration) | |||
# print("总帧数:", total_frames) | |||
# print(time.time() - aa) | |||
# aa = Queue() | |||
# for i in range(1000): | |||
# aa.put("1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" | |||
# "11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111" | |||
# "11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") | |||
# aa.cancel_join_thread() | |||
# aa.cancel_join_thread() | |||
# aa.close() | |||
# aa.cancel_join_thread() | |||
# aa.cancel_join_thread() | |||
# aa.close() | |||
# aa.cancel_join_thread() | |||
# aa.cancel_join_thread() | |||
# aa.get() | |||
# aa = time.time() | |||
# psutil.Process(getpid()).ppid() | |||
# print(time.time()-aa) | |||
@@ -0,0 +1,19 @@ | |||
import cv2 | |||
import numpy as np | |||
# 创建VideoCapture对象并打开WebRTC视频流 | |||
cap = cv2.VideoCapture('webrtc://221.230.150.241:10800/rtc/stream_14') | |||
while True: | |||
print(cap.isOpened()) | |||
# 读取视频流的一帧 | |||
ret, frame = cap.read() | |||
if ret: | |||
# 在这里进行帧处理 | |||
# 例如,可以对每一帧进行图像处理、对象检测等操作 | |||
# 显示处理后的帧 | |||
cv2.imshow('Frame', frame) | |||
# 按下'q'键退出循环 | |||
if cv2.waitKey(1) & 0xFF == ord('q'): | |||
break | |||
# 释放VideoCapture对象和窗口 | |||
cap.release() | |||
cv2.destroyAllWindows() |