Browse Source

上传

tags/V2.8.3^2^2
chenyukun 1 year ago
parent
commit
9658e88736
100 changed files with 0 additions and 6469 deletions
  1. +0
    -40
      .gitignore
  2. +0
    -58
      .idea/deployment.xml
  3. +0
    -6
      .idea/encodings.xml
  4. +0
    -88
      .idea/inspectionProfiles/Project_Default.xml
  5. +0
    -4
      .idea/misc.xml
  6. +0
    -8
      .idea/modules.xml
  7. +0
    -13
      .idea/sshConfigs.xml
  8. +0
    -6
      .idea/vcs.xml
  9. +0
    -56
      .idea/webServers.xml
  10. +0
    -687
      .idea/workspace.xml
  11. +0
    -0
      __init__.py
  12. +0
    -45
      common/Constant.py
  13. +0
    -256
      common/RequestSchema.py
  14. +0
    -404
      common/YmlConstant.py
  15. +0
    -0
      common/__init__.py
  16. BIN
      common/__pycache__/Constant.cpython-310.pyc
  17. BIN
      common/__pycache__/Constant.cpython-38.pyc
  18. BIN
      common/__pycache__/YmlConstant.cpython-38.pyc
  19. BIN
      common/__pycache__/__init__.cpython-310.pyc
  20. BIN
      common/__pycache__/__init__.cpython-38.pyc
  21. +0
    -30
      concurrency/CommonThread.py
  22. +0
    -62
      concurrency/FeedbackThread.py
  23. +0
    -202
      concurrency/FileUploadThread.py
  24. +0
    -84
      concurrency/HeartbeatThread.py
  25. +0
    -1520
      concurrency/IntelligentRecognitionProcess.py
  26. +0
    -122
      concurrency/PullStreamThread.py
  27. +0
    -406
      concurrency/PullVideoStreamProcess.py
  28. +0
    -51
      concurrency/RecordingHeartbeatThread.py
  29. +0
    -0
      concurrency/__init__.py
  30. BIN
      concurrency/__pycache__/CommonThread.cpython-38.pyc
  31. BIN
      concurrency/__pycache__/FeedbackThread.cpython-38.pyc
  32. BIN
      concurrency/__pycache__/FileUploadThread.cpython-38.pyc
  33. BIN
      concurrency/__pycache__/HeartbeatThread.cpython-38.pyc
  34. BIN
      concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc
  35. BIN
      concurrency/__pycache__/PullStreamThread.cpython-38.pyc
  36. BIN
      concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc
  37. BIN
      concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc
  38. BIN
      concurrency/__pycache__/__init__.cpython-38.pyc
  39. +0
    -22
      config/dsp_aliyun.json
  40. +0
    -116
      config/dsp_application.json
  41. +0
    -17
      config/dsp_baidu.json
  42. +0
    -12
      config/dsp_logger.json
  43. +0
    -24
      dsp_master.py
  44. +0
    -30
      entity/FeedBack.py
  45. +0
    -14
      entity/PullStreamDto.py
  46. +0
    -12
      entity/TaskParam.py
  47. +0
    -0
      entity/__init__.py
  48. BIN
      entity/__pycache__/FeedBack.cpython-38.pyc
  49. BIN
      entity/__pycache__/__init__.cpython-38.pyc
  50. +0
    -21
      enums/AnalysisStatusEnum.py
  51. +0
    -22
      enums/AnalysisTypeEnum.py
  52. +0
    -188
      enums/BaiduSdkEnum.py
  53. +0
    -78
      enums/ExceptionEnum.py
  54. +0
    -77
      enums/ModelTypeEnum.py
  55. +0
    -16
      enums/RecordingStatusEnum.py
  56. +0
    -0
      enums/__init__.py
  57. BIN
      enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc
  58. BIN
      enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc
  59. BIN
      enums/__pycache__/BaiduSdkEnum.cpython-310.pyc
  60. BIN
      enums/__pycache__/BaiduSdkEnum.cpython-38.pyc
  61. BIN
      enums/__pycache__/ExceptionEnum.cpython-310.pyc
  62. BIN
      enums/__pycache__/ExceptionEnum.cpython-38.pyc
  63. BIN
      enums/__pycache__/ModelTypeEnum.cpython-38.pyc
  64. BIN
      enums/__pycache__/RecordingStatusEnum.cpython-38.pyc
  65. BIN
      enums/__pycache__/__init__.cpython-310.pyc
  66. BIN
      enums/__pycache__/__init__.cpython-38.pyc
  67. +0
    -22
      exception/CustomerException.py
  68. +0
    -0
      exception/__init__.py
  69. BIN
      exception/__pycache__/CustomerException.cpython-310.pyc
  70. BIN
      exception/__pycache__/CustomerException.cpython-38.pyc
  71. BIN
      exception/__pycache__/__init__.cpython-310.pyc
  72. BIN
      exception/__pycache__/__init__.cpython-38.pyc
  73. +0
    -0
      font/__init__.py
  74. BIN
      font/simsun.ttc
  75. BIN
      image/logo.png
  76. +0
    -425
      service/Dispatcher.py
  77. +0
    -0
      service/__init__.py
  78. BIN
      service/__pycache__/Dispatcher.cpython-310.pyc
  79. BIN
      service/__pycache__/Dispatcher.cpython-38.pyc
  80. BIN
      service/__pycache__/__init__.cpython-310.pyc
  81. BIN
      service/__pycache__/__init__.cpython-38.pyc
  82. +0
    -3
      test/__init__.py
  83. BIN
      test/__pycache__/__init__.cpython-38.pyc
  84. +0
    -0
      test/aliyun/__init__.py
  85. BIN
      test/aliyun/aaa.jpeg
  86. +0
    -119
      test/aliyun/ossdemo.py
  87. +0
    -128
      test/aliyun/vod.py
  88. +0
    -164
      test/aliyun/vodTest.py
  89. +0
    -130
      test/aliyun/voddemo.py
  90. +0
    -124
      test/aliyun/vodtest1.py
  91. +0
    -29
      test/aliyun/vodtest2.py
  92. +0
    -55
      test/collections/ChainMap.py
  93. +0
    -111
      test/collections/Counter.py
  94. +0
    -35
      test/collections/OrderedDict.py
  95. +0
    -30
      test/collections/__init__.py
  96. +0
    -66
      test/collections/defaultdict.py
  97. +0
    -148
      test/collections/deque.py
  98. +0
    -83
      test/collections/namedtuple.py
  99. +0
    -0
      test/color/__init__.py
  100. +0
    -0
      test/color/color_test.py

+ 0
- 40
.gitignore View File

@@ -1,40 +0,0 @@
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/

### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache

### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr

### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/

### VS Code ###
.vscode/

/.idea
/.vscode
/.svn
tuoheng-ui
target/
HELP.md

+ 0
- 58
.idea/deployment.xml View File

@@ -1,58 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PublishConfigData" serverName="外网" remoteFilesAllowedToDisappearOnAutoupload="false">
<serverData>
<paths name="10.21">
<serverdata>
<mappings>
<mapping deploy="/home/chenyukun/dev/algSch" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
<paths name="10.22">
<serverdata>
<mappings>
<mapping deploy="/home/th/tuo_heng/prod/tuoheng_alg" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
<paths name="192.168.11.7">
<serverdata>
<mappings>
<mapping deploy="/home/th/tuo_heng/test/tuoheng_alg" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
<paths name="192.168.11.8">
<serverdata>
<mappings>
<mapping deploy="/home/th/tuo_heng/dev/tuoheng_alg" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
<paths name="66">
<serverdata>
<mappings>
<mapping deploy="/opt/ai/tuoheng_alg" local="$PROJECT_DIR$" web="/" />
<mapping local="" />
</mappings>
</serverdata>
</paths>
<paths name="chenyukun">
<serverdata>
<mappings>
<mapping deploy="/opt/ai/algSch" local="$PROJECT_DIR$" web="/" />
<mapping deploy="" local="" />
</mappings>
</serverdata>
</paths>
<paths name="外网">
<serverdata>
<mappings>
<mapping deploy="/home/thsw/chenyukun/tuoheng_alg" local="$PROJECT_DIR$" web="/" />
</mappings>
</serverdata>
</paths>
</serverData>
</component>
</project>

+ 0
- 6
.idea/encodings.xml View File

@@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding">
<file url="PROJECT" charset="UTF-8" />
</component>
</project>

+ 0
- 88
.idea/inspectionProfiles/Project_Default.xml View File

@@ -1,88 +0,0 @@
<component name="InspectionProjectProfileManager">
<profile version="1.0">
<option name="myName" value="Project Default" />
<inspection_tool class="JavaDoc" enabled="true" level="WARNING" enabled_by_default="true">
<option name="TOP_LEVEL_CLASS_OPTIONS">
<value>
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" />
<option name="REQUIRED_TAGS" value="" />
</value>
</option>
<option name="INNER_CLASS_OPTIONS">
<value>
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" />
<option name="REQUIRED_TAGS" value="" />
</value>
</option>
<option name="METHOD_OPTIONS">
<value>
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" />
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" />
</value>
</option>
<option name="FIELD_OPTIONS">
<value>
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" />
<option name="REQUIRED_TAGS" value="" />
</value>
</option>
<option name="IGNORE_DEPRECATED" value="false" />
<option name="IGNORE_JAVADOC_PERIOD" value="true" />
<option name="IGNORE_DUPLICATED_THROWS" value="false" />
<option name="IGNORE_POINT_TO_ITSELF" value="false" />
<option name="myAdditionalJavadocTags" value="date" />
</inspection_tool>
<inspection_tool class="JavadocDeclaration" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ADDITIONAL_TAGS" value="date" />
</inspection_tool>
<inspection_tool class="MissingJavadoc" enabled="true" level="WARNING" enabled_by_default="true">
<option name="PACKAGE_SETTINGS">
<Options>
<option name="ENABLED" value="false" />
</Options>
</option>
<option name="MODULE_SETTINGS">
<Options>
<option name="ENABLED" value="false" />
</Options>
</option>
<option name="TOP_LEVEL_CLASS_SETTINGS">
<Options>
<option name="ENABLED" value="false" />
</Options>
</option>
<option name="INNER_CLASS_SETTINGS">
<Options>
<option name="ENABLED" value="false" />
</Options>
</option>
<option name="METHOD_SETTINGS">
<Options>
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" />
<option name="ENABLED" value="false" />
</Options>
</option>
<option name="FIELD_SETTINGS">
<Options>
<option name="ENABLED" value="false" />
</Options>
</option>
</inspection_tool>
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
<option name="ignoredErrors">
<list>
<option value="N806" />
<option value="N803" />
<option value="N802" />
</list>
</option>
</inspection_tool>
<inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
<option name="ignoredIdentifiers">
<list>
<option value="str.*" />
</list>
</option>
</inspection_tool>
</profile>
</component>

+ 0
- 4
.idea/misc.xml View File

@@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Remote Python 3.8.15 (sftp://th@192.168.11.8:32178/home/th/anaconda3/envs/chenyukun/bin/python3.8)" project-jdk-type="Python SDK" />
</project>

+ 0
- 8
.idea/modules.xml View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/tuoheng_alg.iml" filepath="$PROJECT_DIR$/tuoheng_alg.iml" />
</modules>
</component>
</project>

+ 0
- 13
.idea/sshConfigs.xml View File

@@ -1,13 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="SshConfigs">
<configs>
<sshConfig authType="PASSWORD" host="192.168.10.66" id="aa89844a-f7c0-47b6-9359-30d13fa76380" port="22" nameFormat="DESCRIPTIVE" username="thsw2" />
<sshConfig authType="PASSWORD" host="192.168.10.21" id="adf5e1da-4910-4668-bfbb-432f4e2ae77c" port="22" nameFormat="DESCRIPTIVE" username="th" />
<sshConfig authType="PASSWORD" host="192.168.10.22" id="ac18a75e-ff42-4875-a5da-ad98d2d695ea" port="22" nameFormat="DESCRIPTIVE" username="th" />
<sshConfig authType="PASSWORD" connectionConfig="{&quot;serverAliveInterval&quot;:300}" host="192.168.10.66" id="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" port="32782" nameFormat="DESCRIPTIVE" username="root" />
<sshConfig authType="PASSWORD" connectionConfig="{&quot;proxyParams&quot;:{&quot;proxyHost&quot;:&quot;&quot;,&quot;proxyPort&quot;:-1,&quot;proxyType&quot;:&quot;IDE_WIDE_PROXY&quot;}}" host="192.168.11.7" id="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" port="22" nameFormat="DESCRIPTIVE" username="th" useOpenSSHConfig="true" />
<sshConfig authType="PASSWORD" host="221.226.114.142" id="2af8cb49-06d5-499e-85f2-e22072c6c979" port="1011" nameFormat="DESCRIPTIVE" username="thsw" useOpenSSHConfig="true" />
</configs>
</component>
</project>

+ 0
- 6
.idea/vcs.xml View File

@@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$" vcs="Git" />
</component>
</project>

+ 0
- 56
.idea/webServers.xml View File

@@ -1,56 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="WebServers">
<option name="servers">
<webServer id="630d5d4a-219c-4d57-bb0b-44534517b306" name="chenyukun">
<fileTransfer accessType="SFTP" host="192.168.10.66" port="32782" sshConfigId="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" sshConfig="root@192.168.10.66:32782 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="cc246223-f324-4e86-9e18-4b309f3a6500" name="66">
<fileTransfer accessType="SFTP" host="192.168.10.66" port="32782" sshConfigId="dcf03076-1bc5-4ce3-a4e4-38f7f00ea74a" sshConfig="root@192.168.10.66:32782 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="c24476df-a574-465f-9529-a8e029b84f34" name="10.21">
<fileTransfer accessType="SFTP" host="192.168.10.22" port="22" sshConfigId="ac18a75e-ff42-4875-a5da-ad98d2d695ea" sshConfig="th@192.168.10.22:22 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="575fb0f8-1aa4-4ab8-8952-1657964a0673" name="10.22">
<fileTransfer accessType="SFTP" host="192.168.10.22" port="22" sshConfigId="ac18a75e-ff42-4875-a5da-ad98d2d695ea" sshConfig="th@192.168.10.22:22 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="b761b5c5-5f66-4c6a-ad49-4783ff5df619" name="192.168.11.8">
<fileTransfer accessType="SFTP" host="192.168.11.8" port="32178" sshConfigId="080a8ea2-04ef-404c-8202-a30cad7668a2" sshConfig="th@192.168.11.8:32178 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="d52d4eb1-ad07-4dd6-adac-d5e84d4a0f0c" name="192.168.11.7">
<fileTransfer accessType="SFTP" host="192.168.11.7" port="22" sshConfigId="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" sshConfig="th@192.168.11.7:22 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
<webServer id="e0e06591-e01f-4d76-88e9-9c8ee17b919f" name="外网">
<fileTransfer accessType="SFTP" host="192.168.11.7" port="22" sshConfigId="5bb44c10-4e9c-4059-a0c0-9f2596b74bc0" sshConfig="th@192.168.11.7:22 password">
<advancedOptions>
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" />
</advancedOptions>
</fileTransfer>
</webServer>
</option>
</component>
</project>

+ 0
- 687
.idea/workspace.xml View File

@@ -1,687 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes">
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/FeedbackThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FeedbackThread.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/FileUploadThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/FileUploadThread.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/PullVideoStreamProcess.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ModelTypeEnum.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/test/线程/Test.py" beforeDir="false" afterPath="$PROJECT_DIR$/test/线程/Test.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/util/Cv2Utils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/Cv2Utils.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="Git.Settings">
<excluded-from-favorite>
<branch-storage>
<map>
<entry type="LOCAL">
<value>
<list>
<branch-info repo="$PROJECT_DIR$" source="master" />
</list>
</value>
</entry>
<entry type="REMOTE">
<value>
<list>
<branch-info repo="$PROJECT_DIR$" source="origin/master" />
</list>
</value>
</entry>
</map>
</branch-storage>
</excluded-from-favorite>
<favorite-branches>
<branch-storage>
<map>
<entry type="LOCAL">
<value>
<list>
<branch-info repo="$PROJECT_DIR$" source="develop" />
</list>
</value>
</entry>
<entry type="REMOTE">
<value>
<list>
<branch-info repo="$PROJECT_DIR$" source="origin/develop" />
</list>
</value>
</entry>
</map>
</branch-storage>
</favorite-branches>
<option name="RECENT_BRANCH_BY_REPOSITORY">
<map>
<entry key="$PROJECT_DIR$" value="master" />
</map>
</option>
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="GitSEFilterConfiguration">
<file-type-list>
<filtered-out-file-type name="LOCAL_BRANCH" />
<filtered-out-file-type name="REMOTE_BRANCH" />
<filtered-out-file-type name="TAG" />
<filtered-out-file-type name="COMMIT_BY_MESSAGE" />
</file-type-list>
</component>
<component name="GitToolBoxStore">
<option name="recentBranches">
<RecentBranches>
<option name="branchesForRepo">
<list>
<RecentBranchesForRepo>
<option name="branches">
<list>
<RecentBranch>
<option name="branchName" value="develop" />
<option name="lastUsedInstant" value="1668736215" />
</RecentBranch>
<RecentBranch>
<option name="branchName" value="master" />
<option name="lastUsedInstant" value="1668668084" />
</RecentBranch>
</list>
</option>
<option name="repositoryRootUrl" value="file://$PROJECT_DIR$" />
</RecentBranchesForRepo>
</list>
</option>
</RecentBranches>
</option>
</component>
<component name="MarkdownSettingsMigration">
<option name="stateVersion" value="1" />
</component>
<component name="MavenImportPreferences">
<option name="generalSettings">
<MavenGeneralSettings>
<option name="mavenHome" value="C:/learn/maven/apache-maven-3.6.3-bin/apache-maven-3.6.3" />
<option name="userSettingsFile" value="C:\learn\maven\apache-maven-3.6.3-bin\apache-maven-3.6.3\conf\settings.xml" />
</MavenGeneralSettings>
</option>
<option name="importingSettings">
<MavenImportingSettings>
<option name="jdkForImporter" value="11" />
</MavenImportingSettings>
</option>
</component>
<component name="MavenRunner">
<option name="jreName" value="11" />
</component>
<component name="ProjectId" id="2DTRMTxJTz5BhFzI55HkZIMBcy5" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">{
&quot;keyToString&quot;: {
&quot;RunOnceActivity.OpenProjectViewOnStart&quot;: &quot;true&quot;,
&quot;RunOnceActivity.ShowReadmeOnStart&quot;: &quot;true&quot;,
&quot;WebServerToolWindowFactoryState&quot;: &quot;true&quot;,
&quot;WebServerToolWindowPanel.toolwindow.highlight.mappings&quot;: &quot;true&quot;,
&quot;WebServerToolWindowPanel.toolwindow.highlight.symlinks&quot;: &quot;true&quot;,
&quot;WebServerToolWindowPanel.toolwindow.show.date&quot;: &quot;false&quot;,
&quot;WebServerToolWindowPanel.toolwindow.show.permissions&quot;: &quot;false&quot;,
&quot;WebServerToolWindowPanel.toolwindow.show.size&quot;: &quot;false&quot;,
&quot;last_opened_file_path&quot;: &quot;D:/tuoheng/codenew/tuoheng_alg&quot;,
&quot;node.js.detected.package.eslint&quot;: &quot;true&quot;,
&quot;node.js.detected.package.tslint&quot;: &quot;true&quot;,
&quot;node.js.selected.package.eslint&quot;: &quot;(autodetect)&quot;,
&quot;node.js.selected.package.tslint&quot;: &quot;(autodetect)&quot;,
&quot;nodejs_package_manager_path&quot;: &quot;npm&quot;,
&quot;project.structure.last.edited&quot;: &quot;SDK&quot;,
&quot;project.structure.proportion&quot;: &quot;0.15&quot;,
&quot;project.structure.side.proportion&quot;: &quot;0.2816092&quot;,
&quot;settings.editor.selected.configurable&quot;: &quot;preferences.pluginManager&quot;,
&quot;vue.rearranger.settings.migration&quot;: &quot;true&quot;
}
}</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="D:\tuoheng\codenew\tuoheng_alg\enums" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\entity" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\读写" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\config" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\算法" />
</key>
<key name="MoveFile.RECENT_KEYS">
<recent name="D:\tuoheng\codenew\tuoheng_alg\config" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\test" />
<recent name="D:\tuoheng\codenew\tuoheng_alg" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\test\设计模式\单例" />
<recent name="D:\tuoheng\codenew\tuoheng_alg\font" />
</key>
</component>
<component name="RunManager" selected="Python.Test">
<configuration name="CpuUtils" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/util" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/util/CpuUtils.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="Test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/线程" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/线程/Test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="csv_test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/读写" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/读写/csv_test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="editImage" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/editimage" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/editimage/editImage.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="mysqltest" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/mysqltest.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/内存优化/slots" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/内存优化/slots/test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test1" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="tuoheng_alg" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/内存优化/slots" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/内存优化/slots/test1.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<list>
<item itemvalue="Python.editImage" />
<item itemvalue="Python.mysqltest" />
<item itemvalue="Python.Test" />
<item itemvalue="Python.test1" />
<item itemvalue="Python.CpuUtils" />
<item itemvalue="Python.csv_test" />
<item itemvalue="Python.test (1)" />
</list>
<recent_temporary>
<list>
<item itemvalue="Python.Test" />
<item itemvalue="Python.test1" />
<item itemvalue="Python.test (1)" />
<item itemvalue="Python.csv_test" />
<item itemvalue="Python.CpuUtils" />
</list>
</recent_temporary>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="SshConsoleOptionsProvider">
<option name="myEncoding" value="UTF-8" />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="" />
<created>1660721040418</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1660721040418</updated>
<workItem from="1660721041939" duration="5378000" />
<workItem from="1660742200263" duration="758000" />
<workItem from="1660781586599" duration="12677000" />
<workItem from="1660821003279" duration="3079000" />
<workItem from="1660831418060" duration="2591000" />
<workItem from="1660867353831" duration="14213000" />
<workItem from="1661125394679" duration="2000000" />
<workItem from="1661212127373" duration="12131000" />
<workItem from="1661228338772" duration="10683000" />
<workItem from="1661263812380" duration="582000" />
<workItem from="1661298710414" duration="3633000" />
<workItem from="1661385517494" duration="6862000" />
<workItem from="1661474047536" duration="3841000" />
<workItem from="1661506480813" duration="579000" />
<workItem from="1661753711797" duration="4495000" />
<workItem from="1661847814441" duration="5437000" />
<workItem from="1661864932477" duration="11602000" />
<workItem from="1661903556894" duration="23425000" />
<workItem from="1661956938136" duration="695000" />
<workItem from="1661989919031" duration="25723000" />
<workItem from="1662039810210" duration="419000" />
<workItem from="1662076586600" duration="25491000" />
<workItem from="1662335184832" duration="5150000" />
<workItem from="1662348891112" duration="7581000" />
<workItem from="1662421409878" duration="15047000" />
<workItem from="1663472604061" duration="19071000" />
<workItem from="1663515200540" duration="648000" />
<workItem from="1663545195142" duration="10121000" />
<workItem from="1666136820911" duration="26264000" />
<workItem from="1666223126104" duration="11494000" />
<workItem from="1666269871579" duration="3963000" />
<workItem from="1666351797324" duration="835000" />
<workItem from="1666436589395" duration="2588000" />
<workItem from="1666568450522" duration="695000" />
<workItem from="1666658084006" duration="458000" />
<workItem from="1668557891343" duration="19808000" />
<workItem from="1668667255748" duration="5947000" />
<workItem from="1668729965000" duration="4848000" />
<workItem from="1668992475879" duration="14053000" />
<workItem from="1669163433580" duration="26647000" />
<workItem from="1669251179588" duration="805000" />
<workItem from="1669276335478" duration="1020000" />
<workItem from="1669280017428" duration="1441000" />
<workItem from="1669628332462" duration="4397000" />
<workItem from="1669680423418" duration="5107000" />
<workItem from="1669778128579" duration="17564000" />
<workItem from="1669860048099" duration="1865000" />
<workItem from="1670294320960" duration="9059000" />
<workItem from="1670484573639" duration="7495000" />
<workItem from="1670545327661" duration="1911000" />
<workItem from="1670573239165" duration="15919000" />
<workItem from="1670893670201" duration="9457000" />
<workItem from="1670976721564" duration="5457000" />
<workItem from="1671067024950" duration="576000" />
<workItem from="1671427932628" duration="2256000" />
<workItem from="1671445227735" duration="582000" />
<workItem from="1671606515022" duration="4520000" />
<workItem from="1672047085940" duration="72000" />
<workItem from="1672119186060" duration="628000" />
<workItem from="1672192765984" duration="1002000" />
<workItem from="1672273700875" duration="1315000" />
<workItem from="1672295805200" duration="19000" />
<workItem from="1672709979593" duration="2445000" />
<workItem from="1672797232144" duration="25138000" />
<workItem from="1672877597405" duration="5633000" />
<workItem from="1672967214543" duration="2590000" />
<workItem from="1673483697794" duration="13972000" />
<workItem from="1674003653469" duration="751000" />
<workItem from="1674089698944" duration="1180000" />
<workItem from="1674174312546" duration="591000" />
<workItem from="1674953245041" duration="2374000" />
<workItem from="1675038738781" duration="20967000" />
<workItem from="1675126111623" duration="4395000" />
<workItem from="1675158655221" duration="5508000" />
<workItem from="1675298111671" duration="1710000" />
<workItem from="1675388395566" duration="5304000" />
<workItem from="1675643763842" duration="771000" />
<workItem from="1676269822235" duration="1954000" />
<workItem from="1676362382024" duration="821000" />
<workItem from="1676424351744" duration="4050000" />
<workItem from="1676506502236" duration="585000" />
<workItem from="1676871078953" duration="337000" />
<workItem from="1676895744433" duration="4418000" />
<workItem from="1676944131792" duration="515000" />
<workItem from="1677036599171" duration="4605000" />
<workItem from="1677112353743" duration="588000" />
<workItem from="1677574708616" duration="34000" />
<workItem from="1677632498068" duration="4279000" />
<workItem from="1677654510460" duration="2082000" />
<workItem from="1677727307545" duration="438000" />
<workItem from="1678153491396" duration="9573000" />
<workItem from="1678253386456" duration="45394000" />
<workItem from="1678668097364" duration="2754000" />
<workItem from="1678760898640" duration="1320000" />
<workItem from="1678791733686" duration="531000" />
<workItem from="1678839507873" duration="595000" />
<workItem from="1678885439785" duration="444000" />
<workItem from="1678925915104" duration="595000" />
<workItem from="1678927031601" duration="987000" />
<workItem from="1678928413253" duration="6728000" />
<workItem from="1679013228398" duration="17427000" />
<workItem from="1679039229464" duration="9832000" />
<workItem from="1679118299629" duration="17688000" />
<workItem from="1679289612196" duration="5820000" />
<workItem from="1679297557058" duration="1333000" />
<workItem from="1679359163976" duration="1997000" />
<workItem from="1679444345433" duration="1190000" />
<workItem from="1679633582926" duration="1979000" />
<workItem from="1679876991879" duration="1396000" />
<workItem from="1680136325711" duration="24199000" />
<workItem from="1680250415691" duration="1353000" />
<workItem from="1680486532876" duration="8132000" />
<workItem from="1680502907387" duration="10960000" />
<workItem from="1680527121128" duration="3411000" />
<workItem from="1680577929248" duration="5512000" />
<workItem from="1680741123267" duration="14728000" />
<workItem from="1680826640176" duration="21580000" />
<workItem from="1680914030055" duration="14971000" />
<workItem from="1680952718810" duration="967000" />
<workItem from="1681086404430" duration="27714000" />
<workItem from="1681170492379" duration="39568000" />
<workItem from="1681220684404" duration="2140000" />
<workItem from="1681258113350" duration="32577000" />
<workItem from="1681301257655" duration="429000" />
<workItem from="1681344786746" duration="5993000" />
<workItem from="1681363389283" duration="5626000" />
<workItem from="1681431288218" duration="25974000" />
<workItem from="1681690599771" duration="2894000" />
<workItem from="1681696465772" duration="30396000" />
<workItem from="1681826261843" duration="1474000" />
<workItem from="1681863254347" duration="13207000" />
<workItem from="1681950317514" duration="23460000" />
<workItem from="1682036333722" duration="651000" />
<workItem from="1682405963588" duration="37651000" />
<workItem from="1682554149580" duration="33878000" />
<workItem from="1682640444831" duration="10674000" />
<workItem from="1683244481879" duration="9171000" />
<workItem from="1683332505792" duration="23325000" />
<workItem from="1683506530261" duration="919000" />
<workItem from="1683507482567" duration="15434000" />
<workItem from="1683591783960" duration="1186000" />
<workItem from="1683677260592" duration="21750000" />
<workItem from="1683762579964" duration="23871000" />
<workItem from="1683851036596" duration="51000" />
<workItem from="1683851900729" duration="83000" />
<workItem from="1683851995142" duration="24789000" />
<workItem from="1684110880642" duration="5895000" />
<workItem from="1684197638479" duration="9103000" />
<workItem from="1684284520362" duration="13345000" />
<workItem from="1684379357818" duration="22600000" />
<workItem from="1684456296559" duration="11147000" />
<workItem from="1684653340859" duration="1199000" />
<workItem from="1684715657250" duration="6747000" />
<workItem from="1684801865053" duration="16900000" />
<workItem from="1684887585997" duration="21179000" />
<workItem from="1685069170536" duration="5199000" />
<workItem from="1685318330589" duration="16451000" />
<workItem from="1685367595669" duration="1105000" />
<workItem from="1685405545435" duration="5540000" />
<workItem from="1685929597469" duration="1586000" />
<workItem from="1686009758832" duration="4033000" />
<workItem from="1686099127317" duration="8648000" />
<workItem from="1686181421528" duration="9733000" />
<workItem from="1686530580527" duration="10215000" />
<workItem from="1686708793889" duration="28856000" />
<workItem from="1686787483987" duration="42321000" />
<workItem from="1686882826411" duration="32824000" />
<workItem from="1686963632234" duration="27367000" />
<workItem from="1687046210304" duration="54489000" />
<workItem from="1687141700932" duration="30282000" />
<workItem from="1687219517554" duration="39842000" />
<workItem from="1687306657563" duration="921000" />
<workItem from="1687307950930" duration="44000" />
<workItem from="1687308509659" duration="25425000" />
<workItem from="1687652018398" duration="8524000" />
<workItem from="1687736740408" duration="603000" />
<workItem from="1687737713032" duration="3837000" />
<workItem from="1687779451916" duration="5176000" />
<workItem from="1687933838564" duration="4146000" />
<workItem from="1687954592393" duration="1199000" />
<workItem from="1687997778160" duration="3792000" />
<workItem from="1688021144565" duration="1972000" />
<workItem from="1688083600084" duration="65000" />
<workItem from="1688083679443" duration="5459000" />
<workItem from="1688344638833" duration="1197000" />
<workItem from="1688441104396" duration="5401000" />
<workItem from="1688524387384" duration="5018000" />
<workItem from="1688611646979" duration="3976000" />
<workItem from="1688688564237" duration="1253000" />
<workItem from="1688953948444" duration="13288000" />
<workItem from="1689120808268" duration="5671000" />
<workItem from="1689297784810" duration="1254000" />
<workItem from="1689341342536" duration="14000" />
<workItem from="1689378621763" duration="831000" />
<workItem from="1689554206797" duration="16635000" />
<workItem from="1689644925650" duration="19947000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="Vcs.Log.Tabs.Properties">
<option name="TAB_STATES">
<map>
<entry key="MAIN">
<value>
<State />
</value>
</entry>
</map>
</option>
</component>
<component name="XDebuggerManager">
<breakpoint-manager>
<breakpoints>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/test/ffmpeg11/ffmpeg33.py</url>
<line>24</line>
<option name="timeStamp" value="1" />
</line-breakpoint>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/test/aliyun/ossdemo.py</url>
<line>4</line>
<option name="timeStamp" value="4" />
</line-breakpoint>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/test/collections/deque.py</url>
<line>134</line>
<option name="timeStamp" value="6" />
</line-breakpoint>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/util/ModelUtils.py</url>
<line>1</line>
<option name="timeStamp" value="7" />
</line-breakpoint>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/dsp_master.py</url>
<line>1</line>
<option name="timeStamp" value="8" />
</line-breakpoint>
</breakpoints>
</breakpoint-manager>
</component>
<component name="XSLT-Support.FileAssociations.UIState">
<expand />
<select />
</component>
<component name="com.intellij.coverage.CoverageDataManagerImpl">
<SUITE FILE_PATH="coverage/tuoheng_alg$color_test.coverage" NAME="color_test 覆盖结果" MODIFIED="1683683775604" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/color" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo1.coverage" NAME="demo1 覆盖结果" MODIFIED="1685325611032" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg33.coverage" NAME="ffmpeg33 覆盖结果" MODIFIED="1670489109246" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo4.coverage" NAME="demo4 覆盖结果" MODIFIED="1684809818971" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" />
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" />
<SUITE FILE_PATH="coverage/tuoheng_alg$OrderedDict.coverage" NAME="OrderedDict 覆盖结果" MODIFIED="1684897161191" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$SnakeGame.coverage" NAME="SnakeGame 覆盖结果" MODIFIED="1684825356565" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/游戏" />
<SUITE FILE_PATH="coverage/tuoheng_alg$wraps.coverage" NAME="wraps 覆盖结果" MODIFIED="1684913804419" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" />
<SUITE FILE_PATH="coverage/tuoheng_alg$CpuUtils.coverage" NAME="CpuUtils 覆盖结果" MODIFIED="1686972304076" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg12.coverage" NAME="ffmpeg12 覆盖结果" MODIFIED="1675391366890" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__2_.coverage" NAME="Test (2) 覆盖结果" MODIFIED="1681796501563" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/路径" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test1.coverage" NAME="test1 覆盖结果" MODIFIED="1687661266628" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ossdemo.coverage" NAME="ossdemo 覆盖结果" MODIFIED="1681715255761" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" />
<SUITE FILE_PATH="coverage/tuoheng_alg$Counter.coverage" NAME="Counter 覆盖结果" MODIFIED="1684894898737" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test__1_.coverage" NAME="test (1) 覆盖结果" MODIFIED="1687056062763" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" />
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$singledispatch.coverage" NAME="singledispatch 覆盖结果" MODIFIED="1684912905741" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$test.coverage" NAME="test 覆盖结果" MODIFIED="1668577200259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/while" />
<SUITE FILE_PATH="coverage/tuoheng_alg$editImage.coverage" NAME="editImage 覆盖结果" MODIFIED="1678348350574" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/editimage" />
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="协程2 覆盖结果" MODIFIED="1668066168428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng/algSch/test/协程/" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ImgBaiduSdk.coverage" NAME="ImgBaiduSdk 覆盖结果" MODIFIED="1678355024003" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo2.coverage" NAME="demo2 覆盖结果" MODIFIED="1684808407865" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ChainMap.coverage" NAME="ChainMap 覆盖结果" MODIFIED="1684905474944" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master 覆盖结果" MODIFIED="1686926216806" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/tuoheng_alg$IntelligentRecognitionProcess.coverage" NAME="IntelligentRecognitionProcess 覆盖结果" MODIFIED="1682651444560" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/concurrency" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo3.coverage" NAME="demo3 覆盖结果" MODIFIED="1684809071819" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test.coverage" NAME="test 覆盖结果" MODIFIED="1686930120727" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/集合" />
<SUITE FILE_PATH="coverage/tuoheng_alg$Test.coverage" NAME="Test 覆盖结果" MODIFIED="1689663111360" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" />
<SUITE FILE_PATH="coverage/tuoheng_alg$csv_test.coverage" NAME="csv_test 覆盖结果" MODIFIED="1687000802518" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" />
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665738045603" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/DATA/chenyukun/algSch/test/" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test2.coverage" NAME="test2 覆盖结果" MODIFIED="1669178077956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" />
<SUITE FILE_PATH="coverage/tuoheng_alg$csv.coverage" NAME="csv 覆盖结果" MODIFIED="1685331143094" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" />
<SUITE FILE_PATH="coverage/tuoheng_alg$aa.coverage" NAME="aa 覆盖结果" MODIFIED="1684461916527" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$cmp_to_key.coverage" NAME="cmp_to_key 覆盖结果" MODIFIED="1684910406140" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" />
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils__1_.coverage" NAME="KafkaUtils (1) Coverage Results" MODIFIED="1663464961001" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$__init____1_.coverage" NAME="__init__ (1) 覆盖结果" MODIFIED="1684918690445" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/偏函数" />
<SUITE FILE_PATH="coverage/tuoheng_alg$voddemo.coverage" NAME="voddemo 覆盖结果" MODIFIED="1681722102430" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/aliyun" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1668522825199" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1668437822632" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch/test/kafka" />
<SUITE FILE_PATH="coverage/tuoheng_alg$re.coverage" NAME="re 覆盖结果" MODIFIED="1684221962919" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/正则" />
<SUITE FILE_PATH="coverage/tuoheng_alg$deque.coverage" NAME="deque 覆盖结果" MODIFIED="1684896079231" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start__1_.coverage" NAME="producer_start (1) 覆盖结果" MODIFIED="1665832569996" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$ffmpeg11.coverage" NAME="ffmpeg11 覆盖结果" MODIFIED="1668410004435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$__init__.coverage" NAME="__init__ 覆盖结果" MODIFIED="1686535860174" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo.coverage" NAME="demo 覆盖结果" MODIFIED="1686927940237" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" />
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1670999187123" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test__3_.coverage" NAME="test (3) 覆盖结果" MODIFIED="1686902851380" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/内存优化/slots" />
<SUITE FILE_PATH="coverage/tuoheng_alg$numpy_test.coverage" NAME="numpy_test 覆盖结果" MODIFIED="1684205019028" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/numpy" />
<SUITE FILE_PATH="coverage/tuoheng_alg$namedtuple.coverage" NAME="namedtuple 覆盖结果" MODIFIED="1684898422076" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="冒泡 覆盖结果" MODIFIED="1685368101589" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/算法" />
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$gputest.coverage" NAME="gputest 覆盖结果" MODIFIED="1681950938970" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/gpu" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$3.coverage" NAME="协程3 覆盖结果" MODIFIED="1668147029048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" />
<SUITE FILE_PATH="coverage/tuoheng_alg$AliyunSdk.coverage" NAME="AliyunSdk 覆盖结果" MODIFIED="1683803902993" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="全局变量1 覆盖结果" MODIFIED="1685322476342" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/语法" />
<SUITE FILE_PATH="coverage/tuoheng_alg$csv_test__1_.coverage" NAME="csv_test (1) 覆盖结果" MODIFIED="1685331476413" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/读写" />
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$cv2test1__1_.coverage" NAME="cv2test1 覆盖结果" MODIFIED="1665820653649" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" />
<SUITE FILE_PATH="coverage/tuoheng_alg$TimeUtils.coverage" NAME="TimeUtils Coverage Results" MODIFIED="1661222768678" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start1.coverage" NAME="producer_start1 覆盖结果" MODIFIED="1671428635702" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo5.coverage" NAME="demo5 覆盖结果" MODIFIED="1684810002359" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/元类" />
<SUITE FILE_PATH="coverage/tuoheng_alg$demo__1_.coverage" NAME="demo (1) 覆盖结果" MODIFIED="1685086704735" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/语法/for" />
<SUITE FILE_PATH="coverage/tuoheng_alg___$producer_stop.coverage" NAME="producer_stop 覆盖结果" MODIFIED="1668522920533" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/thsw/chenyukun/algSch" />
<SUITE FILE_PATH="coverage/tuoheng_alg$Test__1_.coverage" NAME="Test (1) 覆盖结果" MODIFIED="1683865962957" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/线程" />
<SUITE FILE_PATH="coverage/tuoheng_alg$pa.coverage" NAME="pa 覆盖结果" MODIFIED="1684217734590" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/pachong" />
<SUITE FILE_PATH="coverage/tuoheng_alg$defaultdict.coverage" NAME="defaultdict 覆盖结果" MODIFIED="1684900122612" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/collections" />
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg13.coverage" NAME="ffmpeg13 覆盖结果" MODIFIED="1675394160900" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" />
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" />
<SUITE FILE_PATH="coverage/tuoheng_alg$test__2_.coverage" NAME="test (2) 覆盖结果" MODIFIED="1686824265048" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/str" />
</component>
</project>

+ 0
- 0
__init__.py View File


+ 0
- 45
common/Constant.py View File

@@ -1,45 +0,0 @@
# -*- coding: utf-8 -*-
# 配置文件名称
APPLICATION_CONFIG = "dsp_application.json"
# 编码格式
UTF_8 = "utf-8"

# 文件读模式
R = 'r'
ON_OR = "_on_or_"
ON_AI = "_on_ai_"
MP4 = ".mp4"
# 初始化进度
init_progess = "0.0000"
# 进度100%
success_progess = "1.0000"

# 拉流每帧图片缩小宽度大小限制, 大于1400像素缩小一半, 小于1400像素不变
width = 1400

COLOR = (
[0, 0, 255],
[255, 0, 0],
[211, 0, 148],
[0, 127, 0],
[0, 69, 255],
[0, 255, 0],
[255, 0, 255],
[0, 0, 127],
[127, 0, 255],
[255, 129, 0],
[139, 139, 0],
[255, 255, 0],
[127, 255, 0],
[0, 127, 255],
[0, 255, 127],
[255, 127, 255],
[8, 101, 139],
[171, 130, 255],
[139, 112, 74],
[205, 205, 180])

ONLINE = "online"
OFFLINE = "offline"
PHOTO = "photo"
RECORDING = "recording"

+ 0
- 256
common/RequestSchema.py View File

@@ -1,256 +0,0 @@
# -*- coding: utf-8 -*-

model = {
'type': 'list',
'required': True,
'nullable': False,
'minlength': 1,
'maxlength': 3,
'schema': {
'type': 'dict',
'required': True,
'schema': {
'code': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'dependencies': 'categories',
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
'categories': {
'type': 'list',
'required': True,
'dependencies': 'code',
'minlength': 1,
'schema': {
'type': 'dict',
'required': True,
'schema': {
'id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,255}$'},
'config': {
'type': 'dict',
'required': False,
'dependencies': 'id',
}
}
}
}
}
}
}

# 在线参数校验
ONLINE_START_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
},
'pull_url': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'maxlength': 255
},
'push_url': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
},
'results_base_dir': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{0,36}$'
},
'models': model
}
# 在线停止参数校验
ONLINE_STOP_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
}
}

# 离线开始参数校验
OFFLINE_START_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
},
'original_url': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
},
'push_url': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
},
'original_type': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
},
'results_base_dir': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'models': model
}

# 离线停止参数校验
OFFLINE_STOP_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
}
}

# 图片开始参数校验
IMAGE_START_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
},
'image_urls': {
'type': 'list',
'required': True,
'minlength': 1,
'schema': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 5000
}
},
'results_base_dir': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'models': model
}

# 录屏参数校验
RECORDING_START_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
},
'pull_url': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
},
'push_url': {
'type': 'string',
'required': True,
'nullable': False,
'empty': False,
'maxlength': 255
}
}

# 录屏停止参数校验
RECORDING_STOP_SCHEMA = {
'request_id': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
'command': {
'type': 'string',
'required': True,
'empty': False,
'nullable': False,
'allowed': ['start', 'stop']
}
}

+ 0
- 404
common/YmlConstant.py View File

@@ -1,404 +0,0 @@
# -*- coding: utf-8 -*-
BASE_DIR = 'base_dir'
GPU_CODES = ['3090', '2080', '4090', 'A10']
GPU_NAME = 'gpu_name'
GPU_2080 = '2080'
GPU_2080_Ti = '2080Ti'
KAFKA_ACKS = "acks"
KAFKA_RETRIES = "retries"
KAFKA_LINGER_MS = "linger_ms"
KAFKA_RETRY_BACKOFF_MS = "retry_backoff_ms"
KAFKA_MAX_IN_FLIGHT_REQUESTS = "max_in_flight_requests_per_connection"
KAFKA_CLIENT_ID = "client_id"
KAFKA_GROUP_ID = "group_id"
KAFKA_AUTO_OFFSET_RESET = "auto_offset_reset"
KAFKA_ENABLE_AUTO_COMMIT = "enable_auto_commit"
KAFKA_MAX_POLL_RECORDS = "max_poll_records"
REQUEST_IDREQUEST_ID = "request_id"
FEEDBACK = "feedback"
RECORDING = "recording"
FBQUEUE = "fbQueue"
CONTEXT = "context"
MODEL = 'model'
MSG = "msg"
GPU_IDS = "gpu_ids"
ANALYSE_TYPE = "analyse_type"
COMMAND = "command"
START = "start"
STOP = "stop"
SERVICE = "service"
FRAME_SCORE = "frame_score"
PULL_URL = "pull_url"
PUSH_URL = "push_url"
ORIGINAL_URL = "original_url"
ORIGINAL_TYPE = "original_type"
IMAGE_URLS = "image_urls"
RESULTS_BASE_DIR = "results_base_dir"
MODELS = "models"
CODE = 'code'
CATEGORIES = "categories"
ID = 'id'
CONFIG = "config"
VIDEO = "video"
FILE_PATH = "file_path"
KAFKA = "kafka"
TOPIC = "topic"
DSP_ALG_ONLINE_TASKS_TOPIC = "dsp-alg-online-tasks-topic"
DSP_ALG_OFFLINE_TASKS_TOPIC = "dsp-alg-offline-tasks-topic"
DSP_ALG_IMAGE_TASKS_TOPI = "dsp-alg-image-tasks-topic"
DSP_RECORDING_TASKS_TOPI = "dsp-recording-task-topic"
DSP_ALG_RESULTS_TOPIC = "dsp-alg-results-topic"
DSP_RECORDING_RESULTS_TOPIC = "dsp-recording-result-topic"
DSP = "dsp"
ACTIVE = "active"
PRODUCER = "producer"
CONSUMER = "consumer"
BOOTSTRAP_SERVERS = "bootstrap_servers"
ALIYUN = "aliyun"
ACCESS_KEY = "access_key"
ACCESS_SECRET = "access_secret"
OSS = "oss"
ENDPOINT = "endpoint"
BUCKET = "bucket"
CONNECT_TIMEOUT = "connect_timeout"
VOD = "vod"
ECSREGIONID = "ecsRegionId"
CATEID = "CateId"
GPU = "gpu"
ORDER = "order"
LIMIT = "limit"
MAXLOAD = "maxLoad"
MAXMEMORY = "maxMemory"
INCLUDENAN = "includeNan"
EXCLUDEID = "excludeID"
EXCLUDEUUID = "excludeUUID"
BAIDU = "baidu"
VEHICLE = "vehicle"
APP_ID = "APP_ID"
API_KEY = "API_KEY"
SECRET_KEY = "SECRET_KEY"
PERSON = "person"
ORC = "orc"
LOG = 'log'
IS_VIDEO = "is_video"
IS_IMAGE = "is_image"
# 校验schema规则定义
SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
COMMAND: {
'type': 'string',
'required': True,
'allowed': [START, STOP]
},
PULL_URL: {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
PUSH_URL: {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
ORIGINAL_URL: {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
ORIGINAL_TYPE: {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
IMAGE_URLS: {
'type': 'list',
'required': False,
'schema': {
'type': 'string',
'empty': False,
'maxlength': 5000
}
},
RESULTS_BASE_DIR: {
'type': 'string',
'required': False,
'nullable': True,
'regex': r'^[a-zA-Z0-9]{0,36}$'
},
MODELS: {
'type': 'list',
'required': False,
'nullable': True,
'schema': {
'type': 'dict',
'required': True,
'schema': {
CODE: {
'type': 'string',
'required': True,
'empty': False,
'dependencies': CATEGORIES,
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
IS_VIDEO: {
'type': 'string',
'required': True,
'empty': False,
'dependencies': CODE,
'allowed': ["0", "1"]
},
IS_IMAGE: {
'type': 'string',
'required': True,
'empty': False,
'dependencies': CODE,
'allowed': ["0", "1"]
},
CATEGORIES: {
'type': 'list',
'required': True,
'dependencies': CODE,
'schema': {
'type': 'dict',
'required': True,
'schema': {
ID: {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
CONFIG: {
'type': 'dict',
'required': False,
'dependencies': ID,
}
}
}
}
}
}
}
}


def get_file_path(context):
return context[VIDEO][FILE_PATH]


def get_video_add_water(context):
return context[VIDEO]["video_add_water"]


def get_online_tasks_topic(context):
return context["kafka"]["topic"]["dsp-alg-online-tasks-topic"]


def get_offline_tasks_topic(context):
return context[KAFKA][TOPIC][DSP_ALG_OFFLINE_TASKS_TOPIC]


def get_image_tasks_topic(context):
return context[KAFKA][TOPIC][DSP_ALG_IMAGE_TASKS_TOPI]


def get_recording_tasks_topic(context):
return context[KAFKA][TOPIC][DSP_RECORDING_TASKS_TOPI]


def get_kafka_producer_config(context):
return context[KAFKA][context[DSP][ACTIVE]][PRODUCER]


def get_kafka_consumer_config(context):
return context[KAFKA][context[DSP][ACTIVE]][CONSUMER]


def get_kafka_bootstrap_servers(context):
return context[KAFKA][context[DSP][ACTIVE]][BOOTSTRAP_SERVERS]


def get_kafka_results_topic(context):
return context[KAFKA][TOPIC][DSP_ALG_RESULTS_TOPIC]


def get_kafka_recording_result_topic(context):
return context[KAFKA][TOPIC][DSP_RECORDING_RESULTS_TOPIC]


def get_aliyun_access_key(context):
return context[ALIYUN][ACCESS_KEY]


def get_aliyun_access_secret(context):
return context[ALIYUN][ACCESS_SECRET]


def get_aliyun_oss_endpoint(context):
return context[ALIYUN][OSS][ENDPOINT]


def get_aliyun_oss_bucket(context):
return context[ALIYUN][OSS][BUCKET]


def get_aliyun_oss_connect_timeout(context):
return context[ALIYUN][OSS][CONNECT_TIMEOUT]


def get_aliyun_vod_ecsRegionId(context):
return context[ALIYUN][VOD][ECSREGIONID]


def get_aliyun_vod_cateId(context):
return context[ALIYUN][VOD][context[DSP][ACTIVE]][CATEID]


def get_gpu_order(context):
return context[GPU][ORDER]


def get_gpu_limit(context):
return context[GPU][LIMIT]


def get_gpu_maxLoad(context):
return context[GPU][MAXLOAD]


def get_gpu_maxMemory(context):
return context[GPU][MAXMEMORY]


def get_gpu_includeNan(context):
return context[GPU][INCLUDENAN]


def get_gpu_excludeID(context):
return context[GPU][EXCLUDEID]


def get_gpu_excludeUUID(context):
return context[GPU][EXCLUDEUUID]


def get_baidu_vehicle_APP_ID(context):
return context[BAIDU][VEHICLE][APP_ID]


def get_baidu_vehicle_API_KEY(context):
return context[BAIDU][VEHICLE][API_KEY]


def get_baidu_vehicle_SECRET_KEY(context):
return context[BAIDU][VEHICLE][SECRET_KEY]


def get_baidu_person_APP_ID(context):
return context[BAIDU][PERSON][APP_ID]


def get_baidu_person_API_KEY(context):
return context[BAIDU][PERSON][API_KEY]


def get_baidu_person_SECRET_KEY(context):
return context[BAIDU][PERSON][SECRET_KEY]


def get_baidu_ocr_APP_ID(context):
return context[BAIDU][ORC][APP_ID]


def get_baidu_ocr_API_KEY(context):
return context[BAIDU][ORC][API_KEY]


def get_baidu_ocr_SECRET_KEY(context):
return context[BAIDU][ORC][SECRET_KEY]


def get_log_base_path(context):
return context[LOG]["base_path"]


def get_log_enable_file(context):
return context[LOG]["enable_file_log"]


def get_log_log_name(context):
return context[LOG]["log_name"]


def get_log_rotation(context):
return context[LOG]["rotation"]


def get_log_retention(context):
return context[LOG]["retention"]


def get_log_log_fmt(context):
return context[LOG]["log_fmt"]


def get_log_level(context):
return context[LOG]["level"]


def get_log_enqueue(context):
return context[LOG]["enqueue"]


def get_log_encoding(context):
return context[LOG]["encoding"]


def get_log_enable_stderr(context):
return context[LOG]["enable_stderr"]


CV2_PULL_STREAM_TIMEOUT = "cv2_pull_stream_timeout"
CV2_READ_STREAM_TIMEOUT = "cv2_read_stream_timeout"


def get_pull_stream_timeout(context):
return int(context[SERVICE][CV2_PULL_STREAM_TIMEOUT])


def get_read_stream_timeout(context):
return int(context[SERVICE][CV2_READ_STREAM_TIMEOUT])


def get_service_timeout(context):
return int(context[SERVICE]["timeout"])


FILTER = "filter"


def get_similarity(context):
return context[SERVICE][FILTER]["similarity"]


def get_picture_similarity(context):
return context[SERVICE][FILTER]["picture_similarity"]


def get_frame_step(context):
return int(context[SERVICE][FILTER]["frame_step"])


+ 0
- 0
common/__init__.py View File


BIN
common/__pycache__/Constant.cpython-310.pyc View File


BIN
common/__pycache__/Constant.cpython-38.pyc View File


BIN
common/__pycache__/YmlConstant.cpython-38.pyc View File


BIN
common/__pycache__/__init__.cpython-310.pyc View File


BIN
common/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 30
concurrency/CommonThread.py View File

@@ -1,30 +0,0 @@
from threading import Thread
from loguru import logger


class Common(Thread):

__slots__ = [
'__context',
'__func',
'__param1',
'__param2',
'__result',
]

def __init__(self, context, func, param1, param2):
super(Common, self).__init__()
self.__context = context
self.__func = func
self.__param1 = param1
self.__param2 = param2
self.__result = None

def get_result(self):
self.join(60 * 60 * 12)
return self.__result

def run(self):
logger.info("开始执行线程!")
self.__result = self.__func(self.__param1, self.__param2)
logger.info("线程停止完成!")

+ 0
- 62
concurrency/FeedbackThread.py View File

@@ -1,62 +0,0 @@
# -*- coding: utf-8 -*-
import time
from threading import Thread
from traceback import format_exc

from loguru import logger

from util.KafkaUtils import CustomerKafkaProducer

'''
问题反馈线程
'''


class FeedbackThread(Thread):
__slots__ = [
'__fbQueue',
'__context'
]

def __init__(self, fbQueue, context):
super().__init__()
self.__fbQueue = fbQueue
self.__context = context

'''
阻塞获取反馈消息
'''

def getFeedback(self):
return self.__fbQueue.get()

def run(self):
logger.info("启动问题反馈线程")
kafkaProducer = CustomerKafkaProducer(self.__context)
dsp_alg_results_topic = self.__context["kafka"]["topic"]["dsp-alg-results-topic"]
dsp_recording_result_topic = self.__context["kafka"]["topic"]["dsp-recording-result-topic"]
while True:
logger.info("问题反馈发送消息循环")
feedback = None
recording = None
try:
fb = self.getFeedback()
if fb is not None and len(fb) > 0:
feedback = fb.get("feedback")
recording = fb.get("recording")
if feedback is not None and len(feedback) > 0:
kafkaProducer.sender(dsp_alg_results_topic, feedback["request_id"], feedback, 1)
feedback = None
if recording is not None and len(recording) > 0:
kafkaProducer.sender(dsp_recording_result_topic, recording["request_id"], recording, 1)
recording = None
else:
time.sleep(1)
except Exception:
if feedback and feedback.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), feedback.get("request_id"))
elif recording and recording.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), recording.get("request_id"))
else:
logger.error("问题反馈异常:{}", format_exc())
logger.info("问题反馈线程执行完成")

+ 0
- 202
concurrency/FileUploadThread.py View File

@@ -1,202 +0,0 @@
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Thread
from time import sleep
from traceback import format_exc

from loguru import logger
import cv2

from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.AliyunSdk import AliyunOssSdk
from util import TimeUtils, ImageUtils
from entity import FeedBack
from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint


class FileUpload(Thread):
__slots__ = ('_fbQueue', '_context', '_imageQueue', '_analyse_type', '_msg', '_base_dir')

def __init__(self, fbQueue, context, msg, imageQueue, analyse_type, base_dir):
super().__init__()
self._fbQueue = fbQueue
self._context = context
self._imageQueue = imageQueue
self._analyse_type = analyse_type
self._msg = msg
self._base_dir = base_dir


'''
图片上传线程
'''


class ImageFileUpload(FileUpload):
__slots__ = ()

def run(self):
requestId = self._msg.get("request_id")
logger.info("启动图片上传线程, requestId:{}", requestId)
# 初始化oss客户端
aliyunOssSdk = AliyunOssSdk(self._base_dir, requestId)
aliyunOssSdk.get_oss_bucket()
high_score_image = {}
similarity = self._context["service"]["filter"]["similarity"]
picture_similarity = bool(self._context["service"]["filter"]["picture_similarity"])
frame_step = int(self._context["service"]["filter"]["frame_step"])
image_queue = self._imageQueue
analyse_type = self._analyse_type
results_base_dir = self._msg.get("results_base_dir")
fb_queue = self._fbQueue
with ThreadPoolExecutor(max_workers=5) as t:
try:
while True:
try:
# 获取队列中的消息
image_msg = image_queue.get(timeout=43200)
if image_msg is not None and len(image_msg) > 0:
image_dict = image_msg.get("image")
command = image_msg.get("command")
if command == "stop":
break
if image_dict is not None and len(image_dict) > 0:
image_result = handle_image(high_score_image, image_dict, picture_similarity,
similarity, frame_step, analyse_type)
del image_dict
if image_result:
# 图片帧数编码
task = []
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame"))
or_image_name = build_image_name(str(image_result.get("current_frame")),
str(image_result.get("last_frame")),
image_result.get("mode_service"),
"OR", "O", results_base_dir, requestId)
or_future = t.submit(aliyunOssSdk.sync_upload_file, or_image_name,
or_image.tobytes())
task.append(or_future)
model_info_list = image_result.get("model_info")
msg_list = []
for model_info in model_info_list:
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame"))
ai_image_name = build_image_name(str(image_result.get("current_frame")),
str(image_result.get("last_frame")),
image_result.get("mode_service"),
"AI", model_info.get("detectTargetCode"),
results_base_dir, requestId)
ai_future = t.submit(aliyunOssSdk.sync_upload_file, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(
{"feedback": FeedBack.message_feedback(requestId,
AnalysisStatus.RUNNING.value,
analyse_type, "", "",
image_result.get("progress"),
or_image_name,
ai_image_name,
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
TimeUtils.now_date_to_str())})
for thread_result in as_completed(task):
thread_result.result()
for msg in msg_list:
sendResult(fb_queue, msg, requestId)
else:
sleep(1)
except Exception:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), requestId)
finally:
high_score_image.clear()
logger.info("停止图片上传线程, requestId:{}", requestId)


def sendResult(fbQueue, result, requestId):
try:
fbQueue.put(result, timeout=10)
except Exception:
logger.error("添加反馈到队列超时异常:{}, requestId:{}", format_exc(), requestId)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])


def build_image_name(current_frame, last_frame, mode_type, image_type, target, results_base_dir, requestId):
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \
"-{target}-{requestId}_{image_type}.jpg"
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
image_name = image_format.format(
base_dir=results_base_dir,
time_now=time_now,
current_frame=current_frame,
last_frame=last_frame,
random_num=random_num,
mode_type=mode_type,
target=target,
requestId=requestId,
image_type=image_type)
return image_name


def handle_image(high_score_image, frame_all, picture_similarity, similarity, frame_step, analyse_type):
flag = True
if picture_similarity and len(high_score_image) > 0:
hash1 = ImageUtils.dHash(high_score_image.get("or_frame"))
hash2 = ImageUtils.dHash(frame_all[0][1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if len(high_score_image) > 0:
diff_frame_num = frame_all[0][2] - high_score_image.get("current_frame")
if diff_frame_num < frame_step:
flag = False
# if diff_frame_num >= frame_step:
# hash1 = ImageUtils.dHash(high_score_image.get("or_frame"))
# hash2 = ImageUtils.dHash(frame_all[0][1])
# dist = ImageUtils.Hamming_distance(hash1, hash2)
# similarity_1 = 1 - dist * 1.0 / 64
# if similarity_1 != 1:
# flag = True
det_result = frame_all[1]
model_info = []
if flag and det_result is not None and len(det_result) > 0:
'''
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号:modeCode
检测目标:detectTargetCode
'''
# 更加模型编码解析数据
for modelCode in list(det_result.keys()):
# 模型编号下面的检测目标对象
det_info = det_result.get(modelCode)
if det_info is not None and len(det_info) > 0:
for detectTargetCode in list(det_info.keys()):
target_list = det_info.get(detectTargetCode)
if target_list is not None and len(target_list) > 0:
# orFrame = loads(dumps(frame_all.get("frame")))
orFrame = frame_all[0][1].copy()
for target in target_list:
draw_painting_joint(target[1], orFrame, target[3], target[2], target[4], "leftTop")
model_info.append({
"modelCode": str(modelCode),
"detectTargetCode": str(detectTargetCode),
"frame": orFrame
})
if len(model_info) > 0:
high_score_image["or_frame"] = frame_all[0][1]
high_score_image["current_frame"] = frame_all[0][2]
image_result = {
"or_frame": frame_all[0][1],
"model_info": model_info,
"current_frame": frame_all[0][2],
"last_frame": frame_all[0][2] + frame_step,
"progress": "",
"mode_service": analyse_type,
}
return image_result
return None

+ 0
- 84
concurrency/HeartbeatThread.py View File

@@ -1,84 +0,0 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from traceback import format_exc

from loguru import logger

from common.Constant import init_progess
from common.YmlConstant import FEEDBACK
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from entity.FeedBack import message_feedback


class Heartbeat(Thread):
__slots__ = ('__fbQueue', '__hbQueue', '__request_id', '__analyse_type', '__progress')

def __init__(self, fbQueue, hbQueue, request_id, analyse_type):
super().__init__()
self.__fbQueue = fbQueue
self.__hbQueue = hbQueue
self.__request_id = request_id
self.__analyse_type = analyse_type
self.__progress = init_progess

def getHbQueue(self):
eBody = None
try:
eBody = self.__hbQueue.get(block=False)
except Exception as e:
pass
return eBody

# 推送执行结果
def sendResult(self, result):
try:
self.__fbQueue.put(result, timeout=10)
except Exception:
logger.error("添加反馈到队列超时异常:{}, requestId:{}", format_exc(), self.__request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])

def sendHbQueue(self, result):
try:
self.__hbQueue.put(result, timeout=10)
except Exception:
logger.error("添加心跳到队列超时异常:{}, requestId:{}", format_exc(), self.__request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])

def sendhbMessage(self, analysisStatus):
self.sendResult({FEEDBACK: message_feedback(self.__request_id,
analysisStatus,
self.__analyse_type,
progress=self.__progress,
analyse_time=TimeUtils.now_date_to_str())})

def run(self):
try:
logger.info("开始启动心跳线程!requestId:{}", self.__request_id)
hb_init_num = 0
start_time = time.time()
while True:
if time.time() - start_time > 43200:
logger.info("心跳线程运行超时!!!!requestId:{}", self.__request_id)
break
time.sleep(3)
hb_msg = self.getHbQueue()
if hb_msg is not None and len(hb_msg) > 0:
command = hb_msg.get("command")
hb_value = hb_msg.get("hb_value")
if 'stop' == command:
logger.info("开始终止心跳线程, requestId:{}", self.__request_id)
break
if hb_value is not None:
self.__progress = hb_value
if hb_init_num % 30 == 0:
self.sendhbMessage(AnalysisStatus.RUNNING.value)
hb_init_num += 3
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), self.__request_id)
logger.info("心跳线程停止完成!requestId:{}", self.__request_id)

+ 0
- 1520
concurrency/IntelligentRecognitionProcess.py
File diff suppressed because it is too large
View File


+ 0
- 122
concurrency/PullStreamThread.py View File

@@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
import time
from queue import Queue
from threading import Thread

from loguru import logger

from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util import GPUtils
from util.Cv2Utils import Cv2Util


class PullStreamThread(Thread):
def __init__(self, msg, content, pullQueue, fbQueue):
super().__init__()
self.command = Queue()
self.msg = msg
self.content = content
self.pullQueue = pullQueue
self.fbQueue = fbQueue
self.recording_pull_stream_timeout = int(self.content["service"]["recording_pull_stream_timeout"])

def getCommand(self):
eBody = None
try:
eBody = self.command.get(block=False)
except Exception as e:
pass
return eBody

def sendCommand(self, result):
self.command.put(result)

def sendPullQueue(self, result):
self.pullQueue.put(result)


class RecordingPullStreamThread(PullStreamThread):

def run(self):
cv2tool = None
try:
logger.info("录屏任务, 开启拉流, requestId:{}", self.msg.get("request_id"))
gpu_ids = GPUtils.get_gpu_ids(self.content)
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"), content=self.content,
gpu_ids=gpu_ids, log=logger)
cv2_init_num = 1
init_pull_num = 1
start_time = time.time()
start_time_2 = time.time()
concurrent_frame = 1
cv2tool.get_recording_video_info()
while True:
body = self.getCommand()
if body is not None and len(body) > 0:
if 'stop' == body.get("command"):
logger.info("录屏任务, 拉流线程停止中, reuqestId:{}", self.msg.get("request_id"))
self.sendPullQueue({"status": "2"})
break
if self.pullQueue.full():
time.sleep(0.1)
continue
# 检测视频信息是否存在或拉流对象是否存在
if cv2tool.checkconfig():
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
pull_stream_init_timeout = time.time() - start_time
if pull_stream_init_timeout > self.recording_pull_stream_timeout:
logger.info("录屏拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout,
self.msg.get("request_id"))
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(0.5)
cv2tool.get_recording_video_info()
continue
start_time = time.time()
cv2_init_num = 1
frame = cv2tool.recording_read()
if frame is None:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id"))
pull_stream_read_timeout = time.time() - start_time_2
if pull_stream_read_timeout > self.recording_pull_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
self.msg.get("request_id"))
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
if cv2tool.all_frames is not None and len(cv2tool.all_frames) > 0:
if concurrent_frame < cv2tool.all_frames - 100:
logger.info("流异常结束:requestId: {}", self.msg.get("request_id"))
self.sendPullQueue({"status": "3"})
break
logger.info("拉流线程结束, requestId: {}", self.msg.get("request_id"))
self.sendPullQueue({"status": "2"})
break
init_pull_num += 1
time.sleep(0.5)
cv2tool.recording_pull_p()
continue
init_pull_num = 1
start_time_2 = time.time()
self.sendPullQueue({"status": "4",
"frame": frame,
"cct_frame": cv2tool.current_frame,
"width": cv2tool.width,
"height": cv2tool.height,
"fps": cv2tool.fps,
"all_frame": cv2tool.all_frames})
concurrent_frame += 1
except ServiceException as s:
self.sendPullQueue({"status": "1", "error": {"code": s.code, "msg": s.msg}})
except Exception as e:
logger.exception("实时拉流异常: {}, requestId:{}", e, self.msg.get("request_id"))
self.sendPullQueue({"status": "1", "error": {"code": ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
"msg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1]}})
finally:
if cv2tool:
cv2tool.close()
logger.info("录屏拉流线程结束, requestId: {}", self.msg.get("request_id"))




+ 0
- 406
concurrency/PullVideoStreamProcess.py View File

@@ -1,406 +0,0 @@
# -*- coding: utf-8 -*-
import os
import time
from multiprocessing import Process, Queue
from os import getpid
from traceback import format_exc

import psutil
from loguru import logger

from util.LogUtils import init_log
from concurrency.FileUploadThread import ImageFileUpload
from concurrency.HeartbeatThread import Heartbeat
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p
from util.TimeUtils import now_date_to_str


class PullVideoStreamProcess(Process):
__slots__ = ('_command', '_msg', '_context', '_pullQueue', '_fbQueue', '_hbQueue', '_imageQueue', '_analyse_type',
"_base_dir")

def __init__(self, msg, context, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type, base_dir):
super().__init__()
self._command = Queue()
self._msg = msg
self._context = context
self._pullQueue = pullQueue
self._fbQueue = fbQueue
self._hbQueue = hbQueue
self._imageQueue = imageQueue
self._analyse_type = analyse_type
self._base_dir = base_dir

def sendCommand(self, result, enable_ex=True):
try:
self._command.put(result, timeout=10)
except Exception:
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), self._msg.get("request_id"))
if enable_ex:
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
def start_heartbeat(self, hb):
if hb is None:
hb = Heartbeat(self._fbQueue, self._hbQueue, self._msg.get("request_id"), self._analyse_type)
hb.setDaemon(True)
hb.start()
start_time = time.time()
retry_count = 0
while True:
if hb.is_alive():
return hb
retry_count += 1
if retry_count > 10:
logger.error("心跳线程异常重试失败!requestId:{}", self._msg.get("request_id"))
del hb
raise Exception("心跳线程启动失败")
if not hb.is_alive():
logger.warning("心跳线程异常等待中, requestId:{}", self._msg.get("request_id"))
if time.time() - start_time <= 3:
time.sleep(0.5)
continue
if time.time() - start_time > 3:
logger.warning("心跳线程异常重启中, requestId:{}", self._msg.get("request_id"))
hb = Heartbeat(self._fbQueue, self._hbQueue, self._msg.get("request_id"), self._analyse_type)
hb.setDaemon(True)
hb.start()
continue

def start_File_upload(self, imageFileUpload):
if imageFileUpload is None:
imageFileUpload = ImageFileUpload(self._fbQueue, self._context, self._msg, self._imageQueue,
self._analyse_type, self._base_dir)
imageFileUpload.setDaemon(True)
imageFileUpload.start()
start_time = time.time()
retry_count = 0
while True:
if imageFileUpload.is_alive():
return imageFileUpload
retry_count += 1
if retry_count > 10:
logger.error("图片上传线程异常重试失败!requestId:{}", self._msg.get("request_id"))
raise Exception("图片线程启动失败")
if not imageFileUpload.is_alive():
logger.warning("图片上传线程异常等待中, requestId:{}", self._msg.get("request_id"))
if time.time() - start_time <= 3:
time.sleep(0.5)
continue
if time.time() - start_time > 3:
logger.warning("图片上传线程异常重启中, requestId:{}", self._msg.get("request_id"))
imageFileUpload = ImageFileUpload(self._fbQueue, self._context, self._msg, self._imageQueue,
self._analyse_type, self._base_dir)
imageFileUpload.setDaemon(True)
imageFileUpload.start()
start_time = time.time()
continue


def putQueue(queue, result, requestId, enable_ex=True):
try:
queue.put(result, timeout=10)
except Exception:
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), requestId)
if enable_ex:
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])


def getNoBlockQueue(queue):
eBody = None
try:
eBody = queue.get(block=False)
except Exception:
pass
return eBody

def check(start_time, service_timeout, requestId, imageFileUpload, hb):
create_task_time = time.time() - start_time
if create_task_time > service_timeout:
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, requestId)
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
# 检测图片上传线程是否正常运行
if imageFileUpload is not None and not imageFileUpload.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", requestId)
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")
# 检测心跳线程是否正常运行
if hb is not None and not hb.is_alive():
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, requestId:{}", requestId)
raise Exception("未检测到心跳线程活动,心跳线程可能出现异常!")


class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()

def run(self):
pull_p = None
imageFileUpload = None
hb = None
requestId = '1'
pull_queue = self._pullQueue
fb_queue = self._fbQueue
image_queue = self._imageQueue
hb_queue = self._hbQueue
try:
base_dir = self._base_dir
# 加载日志框架
init_log(base_dir)
requestId = self._msg.get("request_id")
pull_url = self._msg.get("pull_url")

logger.info("开启视频拉流进程, requestId:{}", requestId)

pull_stream_timeout = int(self._context["service"]["cv2_pull_stream_timeout"])
read_stream_timeout = int(self._context["service"]["cv2_read_stream_timeout"])
service_timeout = int(self._context["service"]["timeout"])
command_queue = self._command

# 视频相关配置
width = None
height = None
width_height_3 = None
all_frames = 0
w_2 = None
h_2 = None

# 开启图片上传线程
imageFileUpload = self.start_File_upload(imageFileUpload)
# 开启心跳线程
hb = self.start_heartbeat(hb)
# 初始化拉流工具类
cv2_init_num = 1
init_pull_num = 1
start_time = time.time()
pull_stream_start_time = time.time()
pull_stream_read_start_time = time.time()
kill_parent_process_timeout = time.time()
concurrent_frame = 1
stop_pull_stream_step = False
while True:
# 检测任务执行是否超时、心跳线程是否正常、图片上传线程是否正常
check(start_time, service_timeout, requestId, imageFileUpload, hb)
# 获取指令信息
command = getNoBlockQueue(command_queue)
if command is not None:
if 'stop_pull_stream' == command.get("command"):
putQueue(pull_queue, ("9",), requestId) # 9 停止拉流
stop_pull_stream_step = True
clear_pull_p(pull_p, requestId)
continue
# 停止图片上传线程
if 'stop_image_hb' == command.get("command"):
putQueue(image_queue, {"command": "stop"}, requestId)
putQueue(hb_queue, {"command": "stop"}, requestId)
clear_pull_p(pull_p, requestId)
imageFileUpload.join(60 * 3)
hb.join(60 * 3)
logger.error("图片线程停止完成, requestId:{}", requestId)
break
if stop_pull_stream_step:
time.sleep(1)
continue
# 检测视频信息是否存在或拉流对象是否存在
check_vide_result = check_video_stream(width, height)
if check_vide_result:
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, requestId)
pull_stream_init_timeout = time.time() - pull_stream_start_time
if pull_stream_init_timeout > pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, requestId)
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId)
continue
pull_stream_start_time = time.time()
cv2_init_num = 1
frame, pull_p, width, height, width_height_3 = pull_read_video_stream(pull_p, pull_url, width,
height, width_height_3, w_2, h_2,
requestId)
if frame is None:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, requestId)
pull_stream_read_timeout = time.time() - pull_stream_read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
requestId)
putQueue(pull_queue, ("3",), requestId) # 3 超时
stop_pull_stream_step = True
clear_pull_p(pull_p, requestId)
continue
clear_pull_p(pull_p, requestId)
init_pull_num += 1
continue
init_pull_num = 1
pull_stream_read_start_time = time.time()
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), requestId)
# 如果一直有视频流,队列一直是满的,应该是父进程挂了,直接等待60退出
if time.time() - kill_parent_process_timeout > 60:
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId)
putQueue(fb_queue, {"feedback": message_feedback(requestId,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_CPU_RESOURCES.value[0],
ExceptionType.NO_CPU_RESOURCES.value[1],
analyse_time=now_date_to_str())},
requestId)
break
# logger.info("当前视频帧队列处理满队列状态, requestId: {}", requestId)
if psutil.Process(getpid()).ppid() == 1:
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId)
putQueue(fb_queue, {"feedback": message_feedback(requestId,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_CPU_RESOURCES.value[0],
ExceptionType.NO_CPU_RESOURCES.value[1],
analyse_time=now_date_to_str())},
requestId)
break
continue
kill_parent_process_timeout = time.time()
putQueue(pull_queue, ("4", frame, concurrent_frame, w_2, h_2, all_frames), requestId)
concurrent_frame += 1
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), requestId)
putQueue(pull_queue, ("1", s.code, s.msg), requestId, enable_ex=False)
except Exception:
logger.error("实时拉流异常: {}, requestId:{}", format_exc(), requestId)
putQueue(pull_queue, ("1", ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), requestId, enable_ex=False)
finally:
clear_pull_p(pull_p, requestId)
if imageFileUpload:
putQueue(image_queue, {"command": "stop"}, requestId, enable_ex=False)
imageFileUpload.join(60 * 3)
if hb:
putQueue(hb_queue, {"command": "stop"}, requestId, enable_ex=False)
hb.join(60 * 3)
logger.info("实时拉流线程结束, requestId: {}", requestId)


class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()

def run(self):
pull_p = None
imageFileUpload = None
hb = None
requestId = '1'
pull_queue = self._pullQueue
fb_queue = self._fbQueue
image_queue = self._imageQueue
hb_queue = self._hbQueue
try:
base_dir = self._base_dir
init_log(base_dir)
requestId = self._msg.get("request_id")
pull_url = self._msg.get("original_url")
logger.info("开启离线视频拉流进程, requestId:{}", requestId)

service_timeout = int(self._context["service"]["timeout"])
command_queue = self._command

# 开启图片上传线程
imageFileUpload = self.start_File_upload(imageFileUpload)
# 开启心跳线程
hb = self.start_heartbeat(hb)
cv2_init_num = 1
start_time = time.time()
concurrent_frame = 1
stop_pull_stream_step = False
kill_parent_process_timeout = time.time()
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId)
while True:
check(start_time, service_timeout, requestId, imageFileUpload, hb)
command = getNoBlockQueue(command_queue)
if command is not None and len(command) > 0:
if 'stop_pull_stream' == command.get("command"):
putQueue(pull_queue, ("9",), requestId) # 9 停止拉流
stop_pull_stream_step = True
clear_pull_p(pull_p, requestId)
continue
if 'stop_image_hb' == command.get("command"):
putQueue(image_queue, {"command": "stop"}, requestId)
putQueue(hb_queue, {"command": "stop"}, requestId)
clear_pull_p(pull_p, requestId)
imageFileUpload.join(60 * 3)
hb.join(60 * 3)
logger.error("图片线程停止完成, requestId:{}", requestId)
break
if stop_pull_stream_step:
time.sleep(1)
continue
if pull_queue.full():
logger.info("当前视频帧队列处理满队列状态, requestId: {}", requestId)
# 如果一直有视频流,队列一直是满的,应该是父进程挂了,直接等待60退出
if time.time() - kill_parent_process_timeout > 60:
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId)
putQueue(fb_queue, {"feedback": message_feedback(requestId,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_CPU_RESOURCES.value[0],
ExceptionType.NO_CPU_RESOURCES.value[1],
analyse_time=now_date_to_str())},
requestId)
break
if psutil.Process(getpid()).ppid() == 1:
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", requestId)
putQueue(fb_queue, {"feedback": message_feedback(requestId,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_CPU_RESOURCES.value[0],
ExceptionType.NO_CPU_RESOURCES.value[1],
analyse_time=now_date_to_str())},
requestId)
break
continue
kill_parent_process_timeout = time.time()
check_vide_result = check_video_stream(width, height)
if check_vide_result:
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, requestId)
if cv2_init_num > 3:
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, requestId)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, requestId)
continue
frame, pull_p, width, height, width_height_3 = pull_read_video_stream(pull_p, pull_url, width,
height, width_height_3, w_2, h_2,
requestId)
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, requestId)
# 允许100帧的误差
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束:requestId: {}", requestId)
putQueue(pull_queue, ("3",), requestId)
stop_pull_stream_step = True
continue
logger.info("离线拉流线程结束, requestId: {}", requestId)
putQueue(pull_queue, ("2",), requestId)
stop_pull_stream_step = True
continue
putQueue(pull_queue, ("4", frame, concurrent_frame, w_2, h_2, all_frames), requestId)
concurrent_frame += 1
except ServiceException as s:
logger.error("离线任务拉流出现异常:{}, requestId:{}", s.msg, requestId)
putQueue(pull_queue, ("1", s.code, s.msg), requestId, enable_ex=False)
except Exception:
logger.error("离线拉流异常: {}, requestId:{}", format_exc(), requestId)
putQueue(pull_queue, ("1", ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), requestId, enable_ex=False)
finally:
clear_pull_p(pull_p, requestId)
if imageFileUpload:
putQueue(image_queue, {"command": "stop"}, requestId, enable_ex=False)
imageFileUpload.join(60 * 3)
if hb:
putQueue(hb_queue, {"command": "stop"}, requestId, enable_ex=False)
hb.join(60 * 3)
logger.info("离线拉流线程结束, requestId: {}", requestId)

+ 0
- 51
concurrency/RecordingHeartbeatThread.py View File

@@ -1,51 +0,0 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from loguru import logger
from entity.FeedBack import recording_feedback
from enums.RecordingStatusEnum import RecordingStatus


class RecordingHeartbeat(Thread):
def __init__(self, fbQueue, hbQueue, request_id):
super().__init__()
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.request_id = request_id

def getHbQueue(self):
eBody = None
try:
eBody = self.hbQueue.get(block=False)
except Exception as e:
pass
return eBody

# 推送执行结果
def sendResult(self, result):
self.fbQueue.put(result)

def sendHbQueue(self, result):
self.hbQueue.put(result)

def sendhbMessage(self, statusl):
self.sendResult({"recording": recording_feedback(self.request_id, statusl)})

def run(self):
logger.info("开始启动录屏心跳线程!requestId:{}", self.request_id)
hb_init_num = 0
while True:
try:
time.sleep(3)
hb_msg = self.getHbQueue()
if hb_msg is not None and len(hb_msg) > 0:
command = hb_msg.get("command")
if 'stop' == command:
logger.info("开始终止心跳线程, requestId:{}", self.request_id)
break
if hb_init_num % 30 == 0:
self.sendhbMessage(RecordingStatus.RECORDING_RUNNING.value[0])
hb_init_num += 3
except Exception as e:
logger.exception("心跳线程异常:{}, requestId:{}", e, self.request_id)
logger.info("心跳线程停止完成!requestId:{}", self.request_id)

+ 0
- 0
concurrency/__init__.py View File


BIN
concurrency/__pycache__/CommonThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/FeedbackThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/FileUploadThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/HeartbeatThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc View File


BIN
concurrency/__pycache__/PullStreamThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc View File


BIN
concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 22
config/dsp_aliyun.json View File

@@ -1,22 +0,0 @@
{
"access_key": "LTAI5tSJ62TLMUb4SZuf285A",
"access_secret": "MWYynm30filZ7x0HqSHlU3pdLVNeI7",
"oss": {
"endpoint": "http://oss-cn-shanghai.aliyuncs.com",
"bucket": "ta-tech-image",
"connect_timeout": 30
},
"vod": {
"host_address": "https://vod.play.t-aaron.com/",
"ecsRegionId": "cn-shanghai",
"dev": {
"CateId": 1000468341
},
"test": {
"CateId": 1000468338
},
"prod": {
"CateId": 1000468340
}
}
}

+ 0
- 116
config/dsp_application.json View File

@@ -1,116 +0,0 @@
{
"dsp": {
"active": "dev"
},
"kafka": {
"topic": {
"dsp-alg-online-tasks-topic": "dsp-alg-online-tasks",
"dsp-alg-offline-tasks-topic": "dsp-alg-offline-tasks",
"dsp-alg-image-tasks-topic": "dsp-alg-image-tasks",
"dsp-alg-results-topic": "dsp-alg-task-results",
"dsp-recording-task-topic": "dsp-recording-task",
"dsp-recording-result-topic": "dsp-recording-result"
},
"dev": {
"bootstrap_servers": ["192.168.11.13:9092"],
"dsp-alg-online-tasks": {
"partition": [0]
},
"dsp-alg-offline-tasks": {
"partition": [0]
},
"dsp-alg-task-results": {
"partition": [0]
},
"producer": {
"acks": -1,
"retries": 3,
"linger_ms": 50,
"retry_backoff_ms": 1000,
"max_in_flight_requests_per_connection": 5
},
"consumer": {
"client_id": "dsp_ai_server",
"group_id": "dsp-ai-dev",
"auto_offset_reset": "latest",
"enable_auto_commit": 0,
"max_poll_records": 1
}
},
"test": {
"bootstrap_servers": ["106.14.96.218:19092"],
"dsp-alg-online-tasks": {
"partition": [0]
},
"dsp-alg-offline-tasks": {
"partition": [0]
},
"dsp-alg-task-results": {
"partition": [0]
},
"producer": {
"acks": -1,
"retries": 3,
"linger_ms": 50,
"retry_backoff_ms": 1000,
"max_in_flight_requests_per_connection": 5
},
"consumer": {
"client_id": "dsp_ai_server",
"group_id": "dsp-ai-test",
"auto_offset_reset": "latest",
"enable_auto_commit": 0,
"max_poll_records": 1
}
},
"prod": {
"bootstrap_servers": ["101.132.127.1:19094"],
"dsp-alg-online-tasks": {
"partition": [0]
},
"dsp-alg-offline-tasks": {
"partition": [0]
},
"dsp-alg-task-results": {
"partition": [0]
},
"producer": {
"acks": -1,
"retries": 3,
"linger_ms": 50,
"retry_backoff_ms": 1000,
"max_in_flight_requests_per_connection": 5
},
"consumer": {
"client_id": "dsp_ai_server",
"group_id": "dsp-ai-prod",
"auto_offset_reset": "latest",
"enable_auto_commit": 0,
"max_poll_records": 1
}
}
},
"video": {
"file_path": "../dsp/video/",
"video_add_water": 0
},
"service": {
"frame_score": 0.4,
"filter": {
"picture_similarity": 1,
"similarity": 0.65,
"frame_step": 160
},
"timeout": 21600,
"cv2_pull_stream_timeout": 1000,
"cv2_read_stream_timeout": 1000,
"recording_pull_stream_timeout": 600
},
"model": {
"limit": 3
},
"task": {
"limit": 5
}
}


+ 0
- 17
config/dsp_baidu.json View File

@@ -1,17 +0,0 @@
{
"orc": {
"APP_ID": 28173504,
"API_KEY": "kqrFE7VuygIaFer7z6cRxzoi",
"SECRET_KEY": "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
},
"vehicle": {
"APP_ID": 31096670,
"API_KEY": "Dam3O4tgPRN3qh4OYE82dbg7",
"SECRET_KEY": "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
},
"person": {
"APP_ID": 31096755,
"API_KEY": "CiWrt4iyxOly36n3kR7utiAG",
"SECRET_KEY": "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"
}
}

+ 0
- 12
config/dsp_logger.json View File

@@ -1,12 +0,0 @@
{
"enable_file_log": 1,
"enable_stderr": 1,
"base_path": "../dsp/logs",
"log_name": "dsp.log",
"log_fmt": "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}",
"level": "INFO",
"rotation": "00:00",
"retention": "7 days",
"encoding": "utf8"
}


+ 0
- 24
dsp_master.py View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
from os.path import dirname, realpath
from sys import argv

from loguru import logger
from torch import multiprocessing

from service.Dispatcher import DispatcherService
from util.LogUtils import init_log

'''
dsp主程序入口
'''
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
base_dir = dirname(realpath(__file__))
init_log(base_dir)
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙")
# 获取主程序执行根路径
arg = argv
logger.info("脚本启动参数: {}", arg)
envs = ('dev', 'test', 'prod')
active = [env for env in envs if env in arg]
DispatcherService(base_dir, active)

+ 0
- 30
entity/FeedBack.py View File

@@ -1,30 +0,0 @@
def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="",
modelCode="", detectTargetCode="", analyse_time="", analyse_results=""):
taskfb = {}
results = []
result_msg = {}
taskfb["request_id"] = requestId
taskfb["status"] = status
taskfb["type"] = type
taskfb["error_code"] = error_code
taskfb["error_msg"] = error_msg
taskfb["progress"] = progress
result_msg["original_url"] = original_url
result_msg["sign_url"] = sign_url
result_msg["analyse_results"] = analyse_results
result_msg["model_code"] = modelCode
result_msg["detect_targets_code"] = detectTargetCode
result_msg["analyse_time"] = analyse_time
results.append(result_msg)
taskfb["results"] = results
return taskfb


def recording_feedback(requestId, status, error_code="", error_msg="", recording_video_url=""):
rdfb = {}
rdfb["request_id"] = requestId
rdfb["status"] = status
rdfb["error_code"] = error_code
rdfb["error_msg"] = error_msg
rdfb["recording_video_url"] = recording_video_url
return rdfb

+ 0
- 14
entity/PullStreamDto.py View File

@@ -1,14 +0,0 @@

class PullStreamDto:

__slots__ = ('msg', 'context', 'pullQueue', 'fbQueue', 'hbQueue', 'imageQueue', 'analyse_type')

def __init__(self, msg, context, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type):
self.msg = msg
self.context = context
self.pullQueue = pullQueue
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.imageQueue = imageQueue
self.analyse_type = analyse_type


+ 0
- 12
entity/TaskParam.py View File

@@ -1,12 +0,0 @@

class Param:

__slots__ = ('fbqueue', 'msg', 'analyse_type', 'base_dir', 'context', 'gpu_name')

def __init__(self, fbqueue, msg, analyse_type, base_dir, context, gpu_name):
self.fbqueue = fbqueue
self.msg = msg
self.analyse_type = analyse_type
self.base_dir = base_dir
self.context = context
self.gpu_name = gpu_name

+ 0
- 0
entity/__init__.py View File


BIN
entity/__pycache__/FeedBack.cpython-38.pyc View File


BIN
entity/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 21
enums/AnalysisStatusEnum.py View File

@@ -1,21 +0,0 @@
from enum import Enum, unique


# 分析状态枚举
@unique
class AnalysisStatus(Enum):
# 等待
WAITING = "waiting"

# 分析中
RUNNING = "running"

# 分析完成
SUCCESS = "success"

# 超时
TIMEOUT = "timeout"

# 失败
FAILED = "failed"

+ 0
- 22
enums/AnalysisTypeEnum.py View File

@@ -1,22 +0,0 @@
from enum import Enum, unique


# 分析类型枚举
@unique
class AnalysisType(Enum):
# 在线
ONLINE = "1"

# 离线
OFFLINE = "2"

# 图片
IMAGE = "3"

# 录屏
RECORDING = "9999"






+ 0
- 188
enums/BaiduSdkEnum.py View File

@@ -1,188 +0,0 @@
from enum import Enum, unique

'''
ocr官方文档: https://ai.baidu.com/ai-doc/OCR/zkibizyhz
官方文档: https://ai.baidu.com/ai-doc/VEHICLE/rk3inf9tj
参数1: 异常编号
参数2: 异常英文描述
参数3: 异常中文描述
参数4: 0-异常信息统一输出为内部异常
1-异常信息可以输出
2-输出空的异常信息
参数5: 指定异常重试的次数
'''


# 异常枚举
@unique
class BaiduSdkErrorEnum(Enum):

UNKNOWN_ERROR = (1, "Unknown error", "未知错误", 0, 0)

SERVICE_TEMPORARILY_UNAVAILABLE = (2, "Service temporarily unavailable", "服务暂不可用,请再次请求", 0, 3)

UNSUPPORTED_OPENAPI_METHOD = (3, "Unsupported openapi method", "调用的API不存在", 0, 0)

API_REQUEST_LIMIT_REACHED = (4, "Open api request limit reached", "请求量限制, 请稍后再试!", 1, 5)

NO_PERMISSION_TO_ACCESS_DATA = (6, "No permission to access data", "无权限访问该用户数据", 1, 0)

GET_SERVICE_TOKEN_FAILED = (13, "Get service token failed", "获取token失败", 0, 2)

IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 1)

APP_NOT_EXSITS_OR_CREATE_FAILED = (15, "app not exsits or create failed", "应用不存在或者创建失败", 0, 0)

API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 2)

API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 10)

API_TOTAL_REQUEST_LIMIT_REACHED = (19, "Open api total request limit reached", "请求总量超限额!", 1, 2)

INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数,token拉取失败", 0, 1)

ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效,token有效期为30天", 0, 1)

ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期,token有效期为30天", 0, 1)

INTERNAL_ERROR = (282000, "internal error", "服务器内部错误", 0, 1)

INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 1)

NOT_ENOUGH_PARAM = (216101, "not enough param", "缺少必须的参数!", 0, 0)

SERVICE_NOT_SUPPORT = (216102, "service not support", "请求了不支持的服务,请检查调用的url", 0, 0)

PARAM_TOO_LONG = (216103, "param too long", "请求中某些参数过长!", 1, 0)

APPID_NOT_EXIST = (216110, "appid not exist", "appid不存在", 0, 0)

EMPTY_IMAGE = (216200, "empty image", "图片为空!", 1, 0)

IMAGE_FORMAT_ERROR = (216201, "image format error", "上传的图片格式错误,现阶段我们支持的图片格式为:PNG、JPG、JPEG、BMP", 1, 0)

IMAGE_SIZE_ERROR = (216202, "image size error", "上传的图片大小错误,分辨率不高于4096*4096", 1, 0)

IMAGE_SIZE_BASE_ERROR = (216203, "image size error", "上传的图片编码有误", 1, 0)

RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 2)

DETECT_ERROR = (216634, "detect error", "检测错误", 2, 2)

MISSING_PARAMETERS = (282003, "missing parameters: {参数名}", "请求参数缺失", 0, 0)

BATCH_ROCESSING_ERROR = (282005, "batch processing error", "处理批量任务时发生部分或全部错误", 0, 5)

BATCH_TASK_LIMIT_REACHED = (282006, "batch task limit reached", "批量任务处理数量超出限制,请将任务数量减少到10或10以下", 1, 5)

IMAGE_TRANSCODE_ERROR = (282100, "image transcode error", "图片压缩转码错误", 0, 1)

IMAGE_SPLIT_LIMIT_REACHED = (282101, "image split limit reached", "长图片切分数量超限!", 1, 1)

TARGET_DETECT_ERROR = (282102, "target detect error", "未检测到图片中识别目标!", 2, 1)

TARGET_RECOGNIZE_ERROR = (282103, "target recognize error", "图片目标识别错误!", 2, 1)

URLS_NOT_EXIT = (282110, "urls not exit", "URL参数不存在,请核对URL后再次提交!", 1, 0)

URL_FORMAT_ILLEGAL = (282111, "url format illegal", "URL格式非法!", 1, 0)

URL_DOWNLOAD_TIMEOUT = (282112, "url download timeout", "URL格式非法!", 1, 0)

URL_RESPONSE_INVALID = (282113, "url response invalid", "URL返回无效参数!", 1, 0)

URL_SIZE_ERROR = (282114, "url size error", "URL长度超过1024字节或为0!", 1, 0)

REQUEST_ID_NOT_EXIST = (282808, "request id: xxxxx not exist", "request id xxxxx 不存在", 0, 0)

RESULT_TYPE_ERROR = (282809, "result type error", "返回结果请求错误(不属于excel或json)", 0, 0)

IMAGE_RECOGNIZE_ERROR = (282810, "image recognize error", "图像识别错误", 2, 1)

INVALID_ARGUMENT = (283300, "Invalid argument", "入参格式有误,可检查下图片编码、代码格式是否有误", 1, 0)

INTERNAL_ERROR_2 = (336000, "Internal error", "服务器内部错误", 0, 0)

INVALID_ARGUMENT_2 = (336001, "Invalid Argument", "入参格式有误,比如缺少必要参数、图片编码错误等等,可检查下图片编码、代码格式是否有误", 0, 0)

SDK_IMAGE_SIZE_ERROR = ('SDK100', "image size error", "图片大小超限,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内,图片请求格式支持:PNG、JPG、BMP", 1, 0)

SDK_IMAGE_LENGTH_ERROR = ('SDK101', "image length error", "图片边长不符合要求,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内", 1, 0)

SDK_READ_IMAGE_FILE_ERROR = ('SDK102', "read image file error", "读取图片文件错误", 0, 1)

SDK_CONNECTION_OR_READ_DATA_TIME_OUT = ('SDK108', "connection or read data time out", "连接超时或读取数据超时,请检查本地网络设置、文件读取设置", 0, 3)

SDK_UNSUPPORTED_IMAGE_FORMAT = ('SDK109', "unsupported image format", "不支持的图片格式,当前支持以下几类图片:PNG、JPG、BMP", 1, 0)


BAIDUERRORDATA = {
BaiduSdkErrorEnum.UNKNOWN_ERROR.value[0]: BaiduSdkErrorEnum.UNKNOWN_ERROR,
BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE.value[0]: BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE,
BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD.value[0]: BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD,
BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA.value[0]: BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA,
BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED.value[0]: BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED,
BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED.value[0]: BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED,
BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED.value[0]: BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED,
BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.INVALID_TOKEN.value[0]: BaiduSdkErrorEnum.INVALID_TOKEN,
BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID,
BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED,
BaiduSdkErrorEnum.INTERNAL_ERROR.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR,
BaiduSdkErrorEnum.INVALID_PARAM.value[0]: BaiduSdkErrorEnum.INVALID_PARAM,
BaiduSdkErrorEnum.NOT_ENOUGH_PARAM.value[0]: BaiduSdkErrorEnum.NOT_ENOUGH_PARAM,
BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT.value[0]: BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT,
BaiduSdkErrorEnum.PARAM_TOO_LONG.value[0]: BaiduSdkErrorEnum.PARAM_TOO_LONG,
BaiduSdkErrorEnum.APPID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.APPID_NOT_EXIST,
BaiduSdkErrorEnum.EMPTY_IMAGE.value[0]: BaiduSdkErrorEnum.EMPTY_IMAGE,
BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR,
BaiduSdkErrorEnum.RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.RECOGNIZE_ERROR,
BaiduSdkErrorEnum.DETECT_ERROR.value[0]: BaiduSdkErrorEnum.DETECT_ERROR,
BaiduSdkErrorEnum.MISSING_PARAMETERS.value[0]: BaiduSdkErrorEnum.MISSING_PARAMETERS,
BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR.value[0]: BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR,
BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED,
BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR,
BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED,
BaiduSdkErrorEnum.TARGET_DETECT_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_DETECT_ERROR,
BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.URL_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.URL_SIZE_ERROR,
BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST,
BaiduSdkErrorEnum.RESULT_TYPE_ERROR.value[0]: BaiduSdkErrorEnum.RESULT_TYPE_ERROR,
BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.INVALID_ARGUMENT.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT,
BaiduSdkErrorEnum.INTERNAL_ERROR_2.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR_2,
BaiduSdkErrorEnum.INVALID_ARGUMENT_2.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT_2,
BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR,
BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR,
BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT.value[0]: BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT,
BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT.value[0]: BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT,
BaiduSdkErrorEnum.URLS_NOT_EXIT.value[0]: BaiduSdkErrorEnum.URLS_NOT_EXIT,
BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL.value[0]: BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL,
BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT.value[0]: BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT,
BaiduSdkErrorEnum.URL_RESPONSE_INVALID.value[0]: BaiduSdkErrorEnum.URL_RESPONSE_INVALID
}

@unique
class VehicleEnum(Enum):
CAR = ("car", "小汽车", 0)
TRICYCLE = ("tricycle", "三轮车", 1)
MOTORBIKE = ("motorbike", "摩托车", 2)
CARPLATE = ("carplate", "车牌", 3)
TRUCK = ("truck", "卡车", 4)
BUS = ("bus", "巴士", 5)


VehicleEnumVALUE={
VehicleEnum.CAR.value[0]: VehicleEnum.CAR,
VehicleEnum.TRICYCLE.value[0]: VehicleEnum.TRICYCLE,
VehicleEnum.MOTORBIKE.value[0]: VehicleEnum.MOTORBIKE,
VehicleEnum.CARPLATE.value[0]: VehicleEnum.CARPLATE,
VehicleEnum.TRUCK.value[0]: VehicleEnum.TRUCK,
VehicleEnum.BUS.value[0]: VehicleEnum.BUS
}

+ 0
- 78
enums/ExceptionEnum.py View File

@@ -1,78 +0,0 @@
from enum import Enum, unique


# 异常枚举
@unique
class ExceptionType(Enum):

OR_VIDEO_ADDRESS_EXCEPTION = ("SP000", "未拉取到视频流, 请检查拉流地址是否有视频流!")

ANALYSE_TIMEOUT_EXCEPTION = ("SP001", "AI分析超时!")

PULLSTREAM_TIMEOUT_EXCEPTION = ("SP002", "原视频拉流超时!")

READSTREAM_TIMEOUT_EXCEPTION = ("SP003", "原视频读取视频流超时!")

GET_VIDEO_URL_EXCEPTION = ("SP004", "获取视频播放地址失败!")

GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP005", "获取原视频播放地址超时!")

PULL_STREAM_URL_EXCEPTION = ("SP006", "拉流地址不能为空!")

PUSH_STREAM_URL_EXCEPTION = ("SP007", "推流地址不能为空!")

PUSH_STREAM_TIME_EXCEPTION = ("SP008", "未生成本地视频地址!")

AI_MODEL_MATCH_EXCEPTION = ("SP009", "未匹配到对应的AI模型!")

ILLEGAL_PARAMETER_FORMAT = ("SP010", "非法参数格式!")

PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道可能被占用, 请稍后再试!")

VIDEO_RESOLUTION_EXCEPTION = ("SP012", "不支持该分辨率类型的视频,请切换分辨率再试!")

READ_IAMGE_URL_EXCEPTION = ("SP013", "未能解析图片地址!")

DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED = ("SP014", "不支持该类型的检测目标!")

WRITE_STREAM_EXCEPTION = ("SP015", "写流异常!")

OR_VIDEO_DO_NOT_EXEIST_EXCEPTION = ("SP016", "原视频不存在!")

MODEL_LOADING_EXCEPTION = ("SP017", "模型加载异常!")

MODEL_ANALYSE_EXCEPTION = ("SP018", "算法模型分析异常!")

AI_MODEL_CONFIG_EXCEPTION = ("SP019", "模型配置不能为空!")

AI_MODEL_GET_CONFIG_EXCEPTION = ("SP020", "获取模型配置异常, 请检查模型配置是否正确!")

MODEL_GROUP_LIMIT_EXCEPTION = ("SP021", "模型组合个数超过限制!")

MODEL_NOT_SUPPORT_VIDEO_EXCEPTION = ("SP022", "%s不支持视频识别!")

MODEL_NOT_SUPPORT_IMAGE_EXCEPTION = ("SP023", "%s不支持图片识别!")

THE_DETECTION_TARGET_CANNOT_BE_EMPTY = ("SP024", "检测目标不能为空!")

URL_ADDRESS_ACCESS_FAILED = ("SP025", "URL地址访问失败, 请检测URL地址是否正确!")

UNIVERSAL_TEXT_RECOGNITION_FAILED = ("SP026", "识别失败!")

COORDINATE_ACQUISITION_FAILED = ("SP027", "飞行坐标识别异常!")

PUSH_STREAM_EXCEPTION = ("SP028", "推流异常!")

NOT_REQUESTID_TASK_EXCEPTION = ("SP993", "未查询到该任务,无法停止任务!")

GPU_EXCEPTION = ("SP994", "GPU出现异常!")

NO_RESOURCES = ("SP995", "服务器暂无资源可以使用,请稍后30秒后再试!")

NO_CPU_RESOURCES = ("SP996", "暂无CPU资源可以使用,请稍后再试!")

SERVICE_COMMON_EXCEPTION = ("SP997", "公共服务异常!")

NO_GPU_RESOURCES = ("SP998", "暂无GPU资源可以使用,请稍后再试!")

SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常!")

+ 0
- 77
enums/ModelTypeEnum.py View File

@@ -1,77 +0,0 @@
from enum import Enum, unique

'''
参数说明
1. 编号
2. 模型编号
3. 模型名称
4. 选用的模型名称
'''


@unique
class ModelType(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river')

FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2')

TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2')

EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None)

PLATE_MODEL = ("5", "005", "车牌模型", None)

VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle')

PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian')

SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire')

ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer')

COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad')

SHIP_MODEL = ("11", "011", "船只模型", 'ship2')

BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None)

CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency')

RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2')

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement')

DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning')

NOPARKING_MODEL = ("18", "018", "城市违章模型", 'noParking')

def checkCode(code):
for model in ModelType:
if model.value[1] == code:
return True
return False


'''
参数1: 检测目标名称
参数2: 检测目标
参数3: 初始化百度检测客户端
'''


@unique
class BaiduModelTarget(Enum):
VEHICLE_DETECTION = (
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id))

HUMAN_DETECTION = (
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id))

PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id))


BAIDU_MODEL_TARGET_CONFIG = {
BaiduModelTarget.VEHICLE_DETECTION.value[1]: BaiduModelTarget.VEHICLE_DETECTION,
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION,
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING
}

+ 0
- 16
enums/RecordingStatusEnum.py View File

@@ -1,16 +0,0 @@
from enum import Enum, unique


# 录屏状态枚举
@unique
class RecordingStatus(Enum):
RECORDING_WAITING = ("5", "待录制")

RECORDING_RUNNING = ("10", "录制中")

RECORDING_SUCCESS = ("15", "录制完成")

RECORDING_TIMEOUT = ("20", "录制超时")

RECORDING_FAILED = ("25", "录制失败")

+ 0
- 0
enums/__init__.py View File


BIN
enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc View File


BIN
enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc View File


BIN
enums/__pycache__/BaiduSdkEnum.cpython-310.pyc View File


BIN
enums/__pycache__/BaiduSdkEnum.cpython-38.pyc View File


BIN
enums/__pycache__/ExceptionEnum.cpython-310.pyc View File


BIN
enums/__pycache__/ExceptionEnum.cpython-38.pyc View File


BIN
enums/__pycache__/ModelTypeEnum.cpython-38.pyc View File


BIN
enums/__pycache__/RecordingStatusEnum.cpython-38.pyc View File


BIN
enums/__pycache__/__init__.cpython-310.pyc View File


BIN
enums/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 22
exception/CustomerException.py View File

@@ -1,22 +0,0 @@
# -*- coding: utf-8 -*-
from loguru import logger


"""
自定义异常
"""


class ServiceException(Exception): # 继承异常类
def __init__(self, code, msg, desc=None):
self.code = code
if desc is None:
self.msg = msg
else:
self.msg = msg % desc

def __str__(self):
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg)




+ 0
- 0
exception/__init__.py View File


BIN
exception/__pycache__/CustomerException.cpython-310.pyc View File


BIN
exception/__pycache__/CustomerException.cpython-38.pyc View File


BIN
exception/__pycache__/__init__.cpython-310.pyc View File


BIN
exception/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 0
font/__init__.py View File


BIN
font/simsun.ttc View File


BIN
image/logo.png View File

Before After
Width: 277  |  Height: 48  |  Size: 26KB

+ 0
- 425
service/Dispatcher.py View File

@@ -1,425 +0,0 @@
# -*- coding: utf-8 -*-
import time
from traceback import format_exc

from cerberus import Validator
from torch.cuda import is_available

from common.YmlConstant import SCHEMA
from concurrency.FeedbackThread import FeedbackThread
from entity.FeedBack import message_feedback, recording_feedback
from entity.TaskParam import Param
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from enums.RecordingStatusEnum import RecordingStatus
from exception.CustomerException import ServiceException
from util import TimeUtils
from loguru import logger
from multiprocessing import Queue
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess, ScreenRecordingProcess
from util import GPUtils
from util.CpuUtils import check_cpu, print_cpu_ex_status
from util.FileUtils import create_dir_not_exist
from util.GPUtils import get_first_gpu_name, print_gpu_ex_status
from util.KafkaUtils import CustomerKafkaConsumer
from util.RWUtils import getConfigs

'''
分发服务
'''


class DispatcherService:
__slots__ = (
'__base_dir',
'__context',
'__feedbackThread',
'__listeningProcesses',
'__fbQueue',
'__topics',
'__analysisType',
'__gpu_name',
'__resource_status'
)

"""
初始化
"""

def __init__(self, base_dir, active):
if not is_available():
raise Exception("cuda不在活动状态, 请检测显卡驱动是否正常!!!!")
self.__context = getConfigs(base_dir, 'config/dsp_application.json')
create_dir_not_exist(base_dir, self.__context["video"]["file_path"])
self.__base_dir = base_dir
if len(active) > 0:
self.__context["dsp"]["active"] = active[0]

self.__resource_status = False
self.__feedbackThread = None # 初始化反馈线程对象

self.__listeningProcesses = {}
self.__fbQueue = Queue()

self.__topics = (
self.__context["kafka"]["topic"]["dsp-alg-online-tasks-topic"],
self.__context["kafka"]["topic"]["dsp-alg-offline-tasks-topic"],
self.__context["kafka"]["topic"]["dsp-alg-image-tasks-topic"],
self.__context["kafka"]["topic"]["dsp-recording-task-topic"]
)
self.__analysisType = {
self.__topics[0]: (AnalysisType.ONLINE.value, lambda x, y: self.online(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[1]: (AnalysisType.OFFLINE.value, lambda x, y: self.offline(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[2]: (AnalysisType.IMAGE.value, lambda x, y: self.image(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[3]: (AnalysisType.RECORDING.value, lambda x, y: self.recording(x, y),
lambda x, y, z: self.recording_method(x, y, z))
}

gpu_name_array = get_first_gpu_name()
gpu_codes = ('3090', '2080', '4090', 'A10')
gpu_array = [g for g in gpu_codes if g in gpu_name_array]
self.__gpu_name = '2080Ti'
if len(gpu_array) > 0:
if gpu_array[0] != '2080':
self.__gpu_name = gpu_array[0]
else:
raise Exception("GPU资源不在提供的模型所支持的范围内!请先提供对应的GPU模型!")
logger.info("当前服务环境为: {}, 服务器GPU使用型号: {}", self.__context["dsp"]["active"], self.__gpu_name)
self.start_service()

# 服务调用启动方法
def start_service(self):
# 初始化kafka监听者
customerKafkaConsumer = CustomerKafkaConsumer(self.__context, topics=self.__topics)
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙")
# 循环消息处理
start_time = time.time()
# persistent_time = time.time()
# full_count = 0
while True:
try:
# 检查任务进程运行情况,去除活动的任务
self.check_process_task()
start_time = self.check_service_resource(start_time)
# if len(self.__listeningProcesses) > 0:
# now = time.time()
# requestIds = list(self.__listeningProcesses.keys())
# requestId = requestIds[-1]
# task_process = self.__listeningProcesses.get(requestId)
# end_time = now - task_process.start_proccess_time
# if end_time > 80 and task_process.pullQueue.full() and time.time() - persistent_time < 10:
# full_count += 1
# if full_count > 4:
# logger.error("服务器资源限制, 暂无资源可以使用! requestId:{}", requestId)
# task_process.sendEvent({"command": "stop_ex"})
# full_count = 0
# persistent_time = time.time()
# if end_time > 80 and task_process.pullQueue.full() and time.time() - persistent_time >= 10:
# full_count = 0
# persistent_time = time.time()
self.start_feedback_thread()
msg = customerKafkaConsumer.poll()
time.sleep(1)
if msg is not None and len(msg) > 0:
for k, v in msg.items():
for m in v:
message = m.value
customerKafkaConsumer.commit_offset(m)
requestId = self.getRequestId(message.get("request_id"))
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}",
m.topic, m.offset, m.partition, message, requestId)
topic_method = self.__analysisType.get(m.topic)
topic_method[2](m.topic, message, topic_method[0])

except Exception:
logger.exception("主线程异常:{}", format_exc())

'''
考虑到requestId为空的场景
'''

@staticmethod
def getRequestId(request_id):
if not request_id:
return '1'
return request_id

def identify_method(self, topic, message, analysisType):
"""
实时、离线、图片识别逻辑
1. topic topic
2. 请求消息体
3. 分析类型:实时、离线、图片
"""
try:
# 校验参数
check_result = self.check_msg(message)
if not check_result:
return
if not is_available():
raise ServiceException(ExceptionType.GPU_EXCEPTION.value[0],
ExceptionType.GPU_EXCEPTION.value[1])
self.__analysisType.get(topic)[1](message, analysisType)
except ServiceException as s:
logger.error("消息监听异常:{}, requestId: {}", s.msg,
self.getRequestId(message.get("request_id")))
if message.get("request_id"):
self.__fbQueue.put({
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}, timeout=10)
except Exception:
logger.error("消息监听异常:{}, requestId: {}", format_exc(),
self.getRequestId(message.get("request_id")))
if message.get("request_id"):
self.__fbQueue.put({
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}, timeout=10)

def recording_method(self, topic, message, analysisType):
try:
# 校验参数
check_result = self.check_msg(message)
if not check_result:
return
self.__analysisType.get(topic)[1](message, analysisType)
except ServiceException as s:
logger.error("消息监听异常:{}, requestId: {}", s.msg,
self.getRequestId(message.get("request_id")))
if message.get("request_id"):
self.__fbQueue.put({
"recording": recording_feedback(message.get("request_id"),
RecordingStatus.RECORDING_FAILED.value[0],
error_code=s.code,
error_msg=s.msg)}, timeout=10)
except Exception:
logger.error("消息监听异常:{}, requestId: {}", format_exc(),
self.getRequestId(message.get("request_id")))
if message.get("request_id"):
self.__fbQueue.put({
"recording": recording_feedback(message.get("request_id"),
RecordingStatus.RECORDING_FAILED.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])}, timeout=10)

# 开启实时进程
def startOnlineProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg.get("request_id")):
logger.warning("实时重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name)
# 创建在线识别进程并启动
coir = OnlineIntelligentRecognitionProcess(param)
coir.start()
# 记录请求与进程映射
self.__listeningProcesses[msg.get("request_id")] = coir

# 结束实时进程
def stopOnlineProcess(self, msg, analysisType):
ps = self.__listeningProcesses.get(msg.get("request_id"))
if ps is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id"))
putQueue(self.__fbQueue, {
"feedback": message_feedback(msg.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0],
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id"))
return
ps.sendEvent({"command": "stop"})

def check_service_resource(self, start_time, requestId=None):
if len(self.__listeningProcesses) > 0:
gpu_result = print_gpu_ex_status(requestId)
cpu_result = print_cpu_ex_status(self.__base_dir, requestId)
if gpu_result or cpu_result:
self.__resource_status = True
return time.time()
if not gpu_result and not cpu_result and time.time() - start_time > 30:
self.__resource_status = False
return time.time()
return start_time

def check_process_task(self):
for requestId in list(self.__listeningProcesses.keys()):
if not self.__listeningProcesses[requestId].is_alive():
del self.__listeningProcesses[requestId]

# 开启离线进程
def startOfflineProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg.get("request_id")):
logger.warning("离线重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name)
first = OfflineIntelligentRecognitionProcess(param)
first.start()
self.__listeningProcesses[msg.get("request_id")] = first

# 结束离线进程
def stopOfflineProcess(self, msg, analysisType):
ps = self.__listeningProcesses.get(msg.get("request_id"))
if ps is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id"))
putQueue(self.__fbQueue, {
"feedback": message_feedback(msg.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0],
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id"))
return
ps.sendEvent({"command": "stop"})

# 开启图片分析进程
def startImageProcess(self, msg, analysisType):
pp = self.__listeningProcesses.get(msg.get("request_id"))
if pp is not None:
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name)
# 创建在线识别进程并启动
imaged = PhotosIntelligentRecognitionProcess(param)
imaged.start()
self.__listeningProcesses[msg.get("request_id")] = imaged

'''
校验kafka消息
'''

@staticmethod
def check_msg(msg):
try:
v = Validator(SCHEMA, allow_unknown=True)
result = v.validate(msg)
if not result:
logger.error("参数校验异常: {}", v.errors)
if msg.get("request_id"):
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], v.errors)
return result
except ServiceException as s:
raise s
except Exception:
logger.error("参数校验异常: {}", format_exc())
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

'''
开启反馈线程,用于发送消息
'''

def start_feedback_thread(self):
if self.__feedbackThread is None:
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__context)
self.__feedbackThread.setDaemon(True)
self.__feedbackThread.start()
start_time = time.time()
retry_count = 0
while True:
if self.__feedbackThread.is_alive():
break
retry_count += 1
if retry_count > 8:
self.__feedbackThread = None
logger.error("反馈线程异常重试失败!!!!!!")
break
if time.time() - start_time <= 3:
logger.error("反馈线程异常等待中")
time.sleep(1)
continue
logger.error("反馈线程异常重启中")
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__context)
self.__feedbackThread.setDaemon(True)
self.__feedbackThread.start()
start_time = time.time()
continue

'''
在线分析逻辑
'''

def online(self, message, analysisType):
if "start" == message.get("command"):
if self.__resource_status or len(self.__listeningProcesses) >= int(self.__context["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startOnlineProcess(message, analysisType)
elif "stop" == message.get("command"):
self.stopOnlineProcess(message, analysisType)
else:
pass

def offline(self, message, analysisType):
if "start" == message.get("command"):
if self.__resource_status or len(self.__listeningProcesses) >= int(self.__context["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startOfflineProcess(message, analysisType)
elif "stop" == message.get("command"):
self.stopOfflineProcess(message, analysisType)
else:
pass

def image(self, message, analysisType):
if "start" == message.get("command"):
self.startImageProcess(message, analysisType)
else:
pass

def recording(self, message, analysisType):
if "start" == message.get("command"):
logger.info("开始录屏")
check_cpu(self.__base_dir, message.get("request_id"))
GPUtils.check_gpu_resource(message.get("request_id"))
self.startRecordingProcess(message, analysisType)
elif "stop" == message.get("command"):
self.stopRecordingProcess(message, analysisType)
else:
pass

# 开启录屏进程
def startRecordingProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg.get("request_id")):
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
param = Param(self.__fbQueue, msg, analysisType, self.__base_dir, self.__context, self.__gpu_name)
srp = ScreenRecordingProcess(param)
srp.start()
self.__listeningProcesses[msg.get("request_id")] = srp

# 结束录屏进程
def stopRecordingProcess(self, msg, analysisType):
rdp = self.__listeningProcesses.get(msg.get("request_id"))
if rdp is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg.get("request_id"))
putQueue(self.__fbQueue, {
"recording": message_feedback(msg.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[0],
ExceptionType.NOT_REQUESTID_TASK_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}, msg.get("request_id"))
return
rdp.sendEvent({"command": "stop"})


def putQueue(queue, result, requestId, enable_ex=True):
try:
queue.put(result, timeout=10)
except Exception:
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), requestId)
if enable_ex:
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])

+ 0
- 0
service/__init__.py View File


BIN
service/__pycache__/Dispatcher.cpython-310.pyc View File


BIN
service/__pycache__/Dispatcher.cpython-38.pyc View File


BIN
service/__pycache__/__init__.cpython-310.pyc View File


BIN
service/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 3
test/__init__.py View File

@@ -1,3 +0,0 @@

dd = {}
print(dd.get('name', 'aaa'))

BIN
test/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 0
test/aliyun/__init__.py View File


BIN
test/aliyun/aaa.jpeg View File

Before After
Width: 1000  |  Height: 1000  |  Size: 189KB

+ 0
- 119
test/aliyun/ossdemo.py View File

@@ -1,119 +0,0 @@
# -*- coding: utf-8 -*-
import datetime

import cv2
import oss2
import time

from loguru import logger

'''
图片上传使用OSS
1. 阿里云对象存储OSS官网地址:https://help.aliyun.com/product/31815.html?spm=a2c4g.32006.0.0.8c546cf0BpkAQ2
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/32006.html?spm=a2c4g.32006.0.0.66874b78q1pwLa
3. python安装SDK地址: https://help.aliyun.com/document_detail/85288.html?spm=a2c4g.32026.0.0.3f24417coCphWj
4. 安装SDK: pip install oss2
5. 安装python-devel
安装python-devel
由于SDK需要crcmod库计算CRC校验码,而crcmod依赖Python.h文件,如果系统缺少这个头文件,安装SDK不会失败,但crcmod的C扩展模式安装会失败,因此导致上传、下载等操作效率非常低下。
如果python-devel包不存在,则首先要安装这个包。
对于Windows系统和Mac OS X系统,由于安装Python的时候会将Python依赖的头文件一并安装,因此您无需安装python-devel。
对于CentOS、RHEL、Fedora系统,请执行以下命令安装python-devel。
sudo yum install python-devel
对于Debian,Ubuntu系统,请执行以下命令安装python-devel。
sudo apt-get install python-dev
6、图片域名地址:https://image.t-aaron.com/
'''


class AliyunOssSdk:

def __init__(self):
self.__client = None
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9'
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk'
self.__endpoint = 'http://oss-cn-shanghai.aliyuncs.com'
self.__bucket = 'ta-tech-image'

def get_oss_bucket(self):
if not self.__client:
auth = oss2.Auth(self.__access_key, self.__access_secret)
self.__client = oss2.Bucket(auth, self.__endpoint, self.__bucket, connect_timeout=30)

def upload_file(self, updatePath, fileByte):
logger.info("开始上传文件到oss!")
MAX_RETRIES = 3
retry_count = 0
while True:
try:
self.get_oss_bucket()
result = self.__client.put_object(updatePath, fileByte)
return result
logger.info("上传文件到oss成功!")
break
except Exception as e:
self.__client = None
retry_count += 1
time.sleep(1)
logger.info("上传文件到oss失败, 重试次数:{}", retry_count)
if retry_count > MAX_RETRIES:
logger.exception("上传文件到oss重试失败:{}", e)
raise e


YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S"
YMDHMSF = "%Y%m%d%H%M%S%f"

def generate_timestamp():
"""根据当前时间获取时间戳,返回整数"""
return int(time.time())

def now_date_to_str(fmt=None):
if fmt is None:
fmt = YY_MM_DD_HH_MM_SS
return datetime.datetime.now().strftime(fmt)

if __name__ == "__main__":
# 初始化oss对象
ossClient = AliyunOssSdk()
# 读取本地图片
image_frame = cv2.imread('aaa.jpeg')
or_result, or_image = cv2.imencode(".jpg", image_frame)
# 图片名称命名规则
# 1、base_dir 基本文件夹名称,由拓恒公司传参
# 2、time_now 现在的时间
# 3、current_frame 当前视频的帧数
# 4、last_frame 如果有跳帧操作, 填写跳帧的步长,如果没有,和current_frame参数保持一致
# 5、random_num 随机时间字符串
# 6、mode_type 类型:实时视频直播的方式用(online) 离线视频直播(填写视频地址识别)用(offline)
# 7、requestId 请求id, 拓恒公司传参
# 8、image_type 原图用(OR) AI识别后的图片用(AI)
random_num = now_date_to_str(YMDHMSF)
time_now = now_date_to_str("%Y-%m-%d-%H-%M-%S")
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \
"-{requestId}_{image_type}.jpg"
image_name = image_format.format(
base_dir='PWL202304141639429276',
time_now=time_now,
current_frame='0',
last_frame='0',
random_num=random_num,
mode_type='offline',
requestId='111111111111111111',
image_type='OR')
result = ossClient.upload_file(image_name, or_image.tobytes())
# print('http status: {0}'.format(result.status))
# # 请求ID。请求ID是本次请求的唯一标识,强烈建议在程序日志中添加此参数。
# print('request_id: {0}'.format(result.request_id))
# # ETag是put_object方法返回值特有的属性,用于标识一个Object的内容。
# print('ETag: {0}'.format(result.etag))
# # HTTP响应头部。
# print('date: {0}'.format(result.headers['date']))
# print(result.__reduce__())
# 对于图片上传, 上传成功后,直接将image_name给拓恒公司就可以了
# 如果测试查看图片是否上传成功
# 可以使用域名拼接
image_url = 'https://image.t-aaron.com/' + image_name
print(image_url)
# 拓恒公司只需要image_name


+ 0
- 128
test/aliyun/vod.py View File

@@ -1,128 +0,0 @@
# -*- coding: UTF-8 -*-
import json
import traceback
from aliyunsdkcore.client import AcsClient
from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest

# # # 填入AccessKey信息
def init_vod_client(accessKeyId, accessKeySecret):
regionId = 'cn-shanghai' # 点播服务接入地域
connectTimeout = 3 # 连接超时,单位为秒
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
def create_upload_video(clt):
request = CreateUploadVideoRequest.CreateUploadVideoRequest()
request.set_Title('dddddd')
request.set_FileName('/home/thsw/chenyukun/video/111111.mp4')
request.set_Description('Video Description')
# //CoverURL示例:http://192.168.0.0/16/tps/TB1qnJ1PVXXXXXCXXXXXXXXXXXX-700-700.png
# request.set_CoverURL('<your Cover URL>')
# request.set_Tags('tag1,tag2')
# request.set_CateId(0)

# request.set_accept_format('JSON')
response = json.loads(clt.do_action_with_exception(request))
return response

try:
clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
uploadInfo = create_upload_video(clt)
print(json.dumps(uploadInfo, ensure_ascii=False, indent=4))

except Exception as e:
print(e)
print(traceback.format_exc())

# 刷新音视频凭证
# from aliyunsdkvod.request.v20170321 import RefreshUploadVideoRequest
# def refresh_upload_video(clt, videoId):
# request = RefreshUploadVideoRequest.RefreshUploadVideoRequest()
# request.set_VideoId(videoId)
# request.set_accept_format('JSON')
# return json.loads(clt.do_action_with_exception(request))
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# uploadInfo = refresh_upload_video(clt, "d6c419c33da245758f71e362b5ee8b56")
# print(json.dumps(uploadInfo, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())
#
#
# # 获取播放地址
# def init_vod_client(accessKeyId, accessKeySecret):
# regionId = 'cn-shanghai' # 点播服务接入地域
# connectTimeout = 3 # 连接超时,单位为秒
# return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
# def get_play_info(clt, videoId):
# request = GetPlayInfoRequest.GetPlayInfoRequest()
# request.set_accept_format('JSON')
# request.set_VideoId(videoId)
# request.set_AuthTimeout(3600*5)
# response = json.loads(clt.do_action_with_exception(request))
# return response
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# playInfo = get_play_info(clt, uploadInfo["VideoId"])
# print(json.dumps(playInfo, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())
#
# # 获取视频播放凭证
# from aliyunsdkvod.request.v20170321 import GetVideoPlayAuthRequest
# def get_video_playauth(clt, videoId):
# request = GetVideoPlayAuthRequest.GetVideoPlayAuthRequest()
# request.set_accept_format('JSON')
# request.set_VideoId(videoId)
# request.set_AuthInfoTimeout(3000)
# response = json.loads(clt.do_action_with_exception(request))
# return response
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# playAuth = get_video_playauth(clt, uploadInfo["VideoId"])
# print(json.dumps(playAuth, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())







# accessKeyId='LTAI5tSJ62TLMUb4SZuf285A'
# accessKeySecret='MWYynm30filZ7x0HqSHlU3pdLVNeI7'
# filePath="/home/thsw/chenyukun/video/111111.mp4"
# # 测试上传本地音视频
# def testUploadLocalVideo(accessKeyId, accessKeySecret, filePath, storageLocation=None):
# try:
# # 可以指定上传脚本部署的ECS区域。如果ECS区域和视频点播存储区域相同,则自动使用内网上传,上传更快且更省公网流量。
# # ecsRegionId ="cn-shanghai"
# # uploader = AliyunVodUploader(accessKeyId, accessKeySecret, ecsRegionId)
# # 不指定上传脚本部署的ECS区域。
# uploader = AliyunVodUploader(accessKeyId, accessKeySecret)
# uploadVideoRequest = UploadVideoRequest(filePath, 'aiOnLineVideo')
# # 可以设置视频封面,如果是本地或网络图片可使用UploadImageRequest上传图片到视频点播,获取到ImageURL
# #ImageURL示例:https://example.com/sample-****.jpg
# #uploadVideoRequest.setCoverURL('<your Image URL>')
# # 标签
# # uploadVideoRequest.setTags('taa')
# if storageLocation:
# uploadVideoRequest.setStorageLocation(storageLocation)
# videoId = uploader.uploadLocalVideo(uploadVideoRequest)
# print("videoId: %s" % (videoId))
#
# except AliyunVodException as e:
# print(e)
# testUploadLocalVideo(accessKeyId, accessKeySecret, filePath)

+ 0
- 164
test/aliyun/vodTest.py View File

@@ -1,164 +0,0 @@
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import sys

from typing import List

from alibabacloud_vod20170321.client import Client as vod20170321Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_darabonba_env.client import Client as EnvClient
from alibabacloud_vod20170321 import models as vod_20170321_models
from alibabacloud_tea_console.client import Client as ConsoleClient
from alibabacloud_tea_util.client import Client as UtilClient
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest

class Sample:
def __init__(self):
pass

@staticmethod
def initialization(
region_id: str,
) -> vod20170321Client:
config = open_api_models.Config()
# 您的AccessKey ID
config.access_key_id = EnvClient.get_env('LTAI5tSJ62TLMUb4SZuf285A')
# 您的AccessKey Secret
config.access_key_secret = EnvClient.get_env('MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# 您的可用区ID
config.region_id = region_id
return vod20170321Client(config)

@staticmethod
def get_play_info_sample(
client: vod20170321Client,
video_id: str,
) -> vod_20170321_models.GetPlayInfoResponse:
request = vod_20170321_models.GetPlayInfoRequest()
# 视频ID。
request.video_id = video_id
response = client.get_play_info(request)
return response

@staticmethod
async def get_play_info_sample_async(
client: vod20170321Client,
video_id: str,
) -> vod_20170321_models.GetPlayInfoResponse:
request = vod_20170321_models.GetPlayInfoRequest()
# 视频ID。
request.video_id = video_id
response = await client.get_play_info_async(request)
return response

@staticmethod
def main(
args: List[str],
) -> None:
try:
region_id = args[0]
video_id = args[1]
client = Sample.initialization(region_id)
response_get_play_info = Sample.get_play_info_sample(client, video_id)
ConsoleClient.log(UtilClient.to_jsonstring(UtilClient.to_map(response_get_play_info)))
except Exception as error:
ConsoleClient.log(error.message)

@staticmethod
async def main_async(
args: List[str],
) -> None:
try:
region_id = args[0]
video_id = args[1]
client = Sample.initialization(region_id)
response_get_play_info = await Sample.get_play_info_sample_async(client, video_id)
ConsoleClient.log(UtilClient.to_jsonstring(UtilClient.to_map(response_get_play_info)))
except Exception as error:
ConsoleClient.log(error.message)

accessKeyId='LTAI5tSJ62TLMUb4SZuf285A'
accessKeySecret='MWYynm30filZ7x0HqSHlU3pdLVNeI7'
filePath="/home/thsw/chenyukun/video/111111.mp4"
# 测试上传本地音视频
def testUploadLocalVideo(accessKeyId, accessKeySecret, filePath, storageLocation=None):
try:
# 可以指定上传脚本部署的ECS区域。如果ECS区域和视频点播存储区域相同,则自动使用内网上传,上传更快且更省公网流量。
# ecsRegionId ="cn-shanghai"
# uploader = AliyunVodUploader(accessKeyId, accessKeySecret, ecsRegionId)
# 不指定上传脚本部署的ECS区域。
uploader = AliyunVodUploader(accessKeyId, accessKeySecret)
uploadVideoRequest = UploadVideoRequest(filePath, 'aiOnLineVideo')
# 可以设置视频封面,如果是本地或网络图片可使用UploadImageRequest上传图片到视频点播,获取到ImageURL
#ImageURL示例:https://example.com/sample-****.jpg
#uploadVideoRequest.setCoverURL('<your Image URL>')
# 标签
# uploadVideoRequest.setTags('taa')
if storageLocation:
uploadVideoRequest.setStorageLocation(storageLocation)
videoId = uploader.uploadLocalVideo(uploadVideoRequest)
print("videoId: %s" % (videoId))

except AliyunVodException as e:
print(e)
# testUploadLocalVideo(accessKeyId, accessKeySecret, filePath)
from alibabacloud_tea_util import models as util_models
import time
def get_video_url(video_id):
config = open_api_models.Config(access_key_id=accessKeyId, access_key_secret=accessKeySecret)
config.endpoint = f'vod.aliyuncs.com'
client = vod20170321Client(config)
get_play_info_request = vod_20170321_models.GetPlayInfoRequest(video_id=video_id)
runtime = util_models.RuntimeOptions()
start = time.time()
while True:
try:
# 复制代码运行请自行打印 API 的返回值
vod_20170321_models.GetPlayInfoResponse = client.get_play_info_with_options(get_play_info_request, runtime)
play_url = vod_20170321_models.GetPlayInfoResponse.body.play_info_list.play_info[0].play_url
return play_url
except Exception as error:
print("bbbbbbbbbbbbbb")
print(error)
time.sleep(5)
end = time.time()
result = int(end - start)
if result > 1200:
print("aaaaaaaa")
raise error
import json
import traceback
from aliyunsdkcore.client import AcsClient
from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest
# 获取播放地址
def init_vod_client(accessKeyId, accessKeySecret):
regionId = 'cn-shanghai' # 点播服务接入地域
connectTimeout = 3 # 连接超时,单位为秒
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
def get_play_info(clt, videoId):
request = GetPlayInfoRequest.GetPlayInfoRequest()
request.set_accept_format('JSON')
request.set_VideoId(videoId)
request.set_AuthTimeout(3600*5)
response = json.loads(clt.do_action_with_exception(request))
return response


if __name__ == '__main__':
# testUploadLocalVideo(accessKeyId, accessKeySecret, "/home/thsw/chenyukun/video/百水河7.mp4")
# print(Sample.get_play_info_sample(Sample.initialization('cn-shanghai'), 'dfaf3d140f714d9889562bff10a6f69a'))
# print(get_video_url('3bb41d547bad44a7a9202017b8025838'))
try:
clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
playInfo = get_play_info(clt, "43e00a1a9d334c30b743d1cd6138207a")
print(playInfo["PlayInfoList"]["PlayInfo"][0]["PlayURL"])
print(json.dumps(playInfo, ensure_ascii=False, indent=4))

except Exception as e:
print("HTTP Status: 403" not in str(e))

+ 0
- 130
test/aliyun/voddemo.py View File

@@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-

import time

import json

from aliyunsdkcore.client import AcsClient
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest

'''
视频上传使用vod
1. 阿里云VOD文档地址:https://help.aliyun.com/product/29932.html?spm=5176.8413026.J_3895079540.5.1b4a1029mXvncc
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/64148.html?spm=a2c4g.64148.0.0.5ae54150jUecEU
4. 安装SDK:
python -m pip install aliyun-python-sdk-core -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install aliyun-python-sdk-live -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install aliyun-python-sdk-core-v3 -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install aliyun-python-sdk-vod -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install alibabacloud_vod20170321 -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install oss2 -i https://pypi.tuna.tsinghua.edu.cn/simple
python -m pip install voduploadsdk -i https://pypi.tuna.tsinghua.edu.cn/simple
5. 视频域名地址:https://vod.play.t-aaron.com/
'''


class AliyunVodSdk:

def __init__(self):
self.__client = None
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9'
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk'
self.__regionId = "cn-shanghai"
self.__cateId = '1000468340'

def init_vod_client(self):
return AcsClient(self.__access_key, self.__access_secret, self.__regionId, auto_retry=True, max_retry_time=3,
timeout=5)

'''
根据videoId获取视频地址
'''

def get_play_info(self, videoId):
logger.info("开始获取视频地址,videoId:{}", videoId)
start = time.time()
while True:
try:
clt = self.init_vod_client()
request = GetPlayInfoRequest.GetPlayInfoRequest()
request.set_accept_format('JSON')
request.set_VideoId(videoId)
request.set_AuthTimeout(3600 * 5)
response = json.loads(clt.do_action_with_exception(request))
play_url = response["PlayInfoList"]["PlayInfo"][0]["PlayURL"]
logger.info("获取视频地址成功,视频地址: {}", play_url)
return play_url
except Exception as e:
logger.error("获取视频地址失败,5秒后重试, requestId: {}")
time.sleep(5)
current_time = time.time()
if "HTTP Status: 403" not in str(e):
logger.exception("获取视频地址失败: {}", e)
raise e
if "HTTP Status: 403" in str(e) and ("UploadFail" in str(e) or "TranscodeFail" in str(e)):
self.logger.exception("获取视频地址失败: {}", e)
raise e
diff_time = current_time - start
if diff_time > 60 * 60 * 2:
logger.exception("获取视频地址失败超时异常: {},超时时间:{}", e, diff_time)
raise e

def upload_local_video(self, filePath, file_title, storageLocation=None):
logger.info("开始执行vod视频上传, filePath: {}", filePath)
uploader = AliyunVodUploader(self.__access_key, self.__access_secret)
uploadVideoRequest = UploadVideoRequest(filePath, file_title)
uploadVideoRequest.setCateId(self.__cateId)
if storageLocation:
uploadVideoRequest.setStorageLocation(storageLocation)
MAX_RETRIES = 3
retry_count = 0
while True:
try:
result = uploader.uploadLocalVideo(uploadVideoRequest)
logger.info("vod视频上传成功, videoId:{}", result.get("VideoId"))
return result.get("VideoId")
except AliyunVodException as e:
retry_count += 1
time.sleep(3)
logger.error("vod视频上传失败,重试次数:{}", retry_count)
if retry_count >= MAX_RETRIES:
self.logger.exception("vod视频上传重试失败: {}", e)
raise e


YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S"
YMDHMSF = "%Y%m%d%H%M%S%f"


def generate_timestamp():
"""根据当前时间获取时间戳,返回整数"""
return int(time.time())


def now_date_to_str(fmt=None):
if fmt is None:
fmt = YY_MM_DD_HH_MM_SS
return datetime.datetime.now().strftime(fmt)


if __name__ == "__main__":
# 本地原视频命名
random_time = now_date_to_str(YMDHMSF)
# # 如果是离线视频,将 _on_or_ 替换为 _off_or_
# orFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_or_", 'requestId', ".mp4")
# # 本地AI识别后的视频命名
# # 如果是离线视频,将 _on_ai_ 替换为 _off_ai_
# aiFilePath = "%s%s%s%s%s" % ('本地路径', random_time, "_on_ai_", 'requestId', ".mp4")
# filePath = "%s%s%s%s%s" % ('D:\\shipin\\', random_time, "_on_ai_", '11111111', ".mp4")
filePath = 'D:\\shipin\\777.mp4'
codClinet = AliyunVodSdk()
result = codClinet.upload_local_video(filePath, 'aiOnLineVideo1')
print(result)
url = codClinet.get_play_info(result)
print(url)




+ 0
- 124
test/aliyun/vodtest1.py View File

@@ -1,124 +0,0 @@
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import sys

from typing import List
from Tea.core import TeaCore

from alibabacloud_vod20170321.client import Client as Vod20170321Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_darabonba_env.client import Client as EnvClient
from alibabacloud_vod20170321 import models as vod_20170321_models
from alibabacloud_tea_console.client import Client as ConsoleClient
from alibabacloud_tea_util.client import Client as UtilClient


class Sample:
"""
write your Darabonba code here...
"""
def __init__(self):
pass

@staticmethod
def init_vod_client(
access_key_id: str,
access_key_secret: str,
region_id: str,
) -> Vod20170321Client:
"""
使用AK&SK初始化账号Client
"""
config = open_api_models.Config()
config.access_key_id = access_key_id
config.access_key_secret = access_key_secret
config.region_id = region_id
return Vod20170321Client(config)

@staticmethod
def main(
args: List[str],
) -> None:
client = Sample.init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7', args[0])
# 1.获取视频上传地址和凭证,并生成视频信息
create_upload_video_request = vod_20170321_models.CreateUploadVideoRequest(
title=args[1],
file_name=args[2]
)
create_upload_video_response = client.create_upload_video(create_upload_video_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(create_upload_video_response)))
# 媒体id
upload_video_id = create_upload_video_response.body.video_id
ConsoleClient.log(upload_video_id)
# 如果视频文件过大,上传超时后可以刷新视频凭证,然后继续上传
refresh_upload_video_request = vod_20170321_models.RefreshUploadVideoRequest(
video_id=upload_video_id
)
refresh_upload_video_reponse = client.refresh_upload_video(refresh_upload_video_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(refresh_upload_video_reponse)))
# # 2.oss视频文件上传,需要用户实现
# # 3.上传过程中,获取媒体上传详情
# get_upload_details_request = vod_20170321_models.GetUploadDetailsRequest(
# media_ids=upload_video_id
# )
# get_upload_details_reponse = client.get_upload_details(get_upload_details_request)
# ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_upload_details_reponse)))
# # 4.媒体上传完成之后,可以获取媒体播放信息进行播放
# # 4.1 通过播放凭证播放
# get_play_info_request = vod_20170321_models.GetPlayInfoRequest(
# video_id=upload_video_id
# )
# get_play_info_reponse = client.get_play_info(get_play_info_request)
# ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_play_info_reponse)))
# # 4.2 通过播放地址播放
# get_video_play_auth_request = vod_20170321_models.GetVideoPlayAuthRequest(
# video_id=upload_video_id
# )
# get_video_play_auth_reponse = client.get_video_play_auth(get_video_play_auth_request)
# ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_video_play_auth_reponse)))

@staticmethod
async def main_async(
args: List[str],
) -> None:
client = Sample.init_vod_client(EnvClient.get_env('ACCESS_KEY_ID'), EnvClient.get_env('ACCESS_KEY_SECRET'), args[0])
# 1.获取视频上传地址和凭证,并生成视频信息
create_upload_video_request = vod_20170321_models.CreateUploadVideoRequest(
title=args[1],
file_name=args[2]
)
create_upload_video_response = await client.create_upload_video_async(create_upload_video_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(create_upload_video_response)))
# 媒体id
upload_video_id = create_upload_video_response.body.video_id
ConsoleClient.log(upload_video_id)
# 如果视频文件过大,上传超时后可以刷新视频凭证,然后继续上传
refresh_upload_video_request = vod_20170321_models.RefreshUploadVideoRequest(
video_id=upload_video_id
)
refresh_upload_video_reponse = await client.refresh_upload_video_async(refresh_upload_video_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(refresh_upload_video_reponse)))
# 2.oss视频文件上传,需要用户实现
# 3.上传过程中,获取媒体上传详情
get_upload_details_request = vod_20170321_models.GetUploadDetailsRequest(
media_ids=upload_video_id
)
get_upload_details_reponse = await client.get_upload_details_async(get_upload_details_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_upload_details_reponse)))
# 4.媒体上传完成之后,可以获取媒体播放信息进行播放
# 4.1 通过播放凭证播放
get_play_info_request = vod_20170321_models.GetPlayInfoRequest(
video_id=upload_video_id
)
get_play_info_reponse = await client.get_play_info_async(get_play_info_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_play_info_reponse)))
# 4.2 通过播放地址播放
get_video_play_auth_request = vod_20170321_models.GetVideoPlayAuthRequest(
video_id=upload_video_id
)
get_video_play_auth_reponse = await client.get_video_play_auth_async(get_video_play_auth_request)
ConsoleClient.log(UtilClient.to_jsonstring(TeaCore.to_map(get_video_play_auth_reponse)))


if __name__ == '__main__':
Sample.main(['cn-shanghai', "/home/thsw/chenyukun/video/111111.mp4", "/home/thsw/chenyukun/video/111111.mp4"])

+ 0
- 29
test/aliyun/vodtest2.py View File

@@ -1,29 +0,0 @@
import json
import traceback
from aliyunsdkcore.client import AcsClient
from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest
# 获取播放地址
def init_vod_client(accessKeyId, accessKeySecret):
regionId = 'cn-shanghai' # 点播服务接入地域
connectTimeout = 3 # 连接超时,单位为秒
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
def get_play_info(clt, videoId):
request = GetPlayInfoRequest.GetPlayInfoRequest()
request.set_accept_format('JSON')
request.set_VideoId(videoId)
request.set_AuthTimeout(3600*5)
response = json.loads(clt.do_action_with_exception(request))
return response
try:
clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
playInfo = get_play_info(clt, "f2bd66de44f742a5bb7d603c295dc47f")
print(json.dumps(playInfo, ensure_ascii=False, indent=4))

except Exception as e:
print(str(e))
print("403" in str(e))
# print(traceback.format_exc())

+ 0
- 55
test/collections/ChainMap.py View File

@@ -1,55 +0,0 @@



"""
1、ChainMap是什么
ChainMap最基本的使用,可以用来合并两个或者更多个字典,当查询的时候,从前往后依次查询。
ChainMap:将多个字典视为一个,解锁Python超能力。
ChainMap是由Python标准库提供的一种数据结构,允许你将多个字典视为一个。换句话说:ChainMap是一个基于多dict的可更新的视图,它的行为就像一个普通的dict。
ChainMap类用于快速链接多个映射,以便将它们视为一个单元。它通常比创建新字典和多次调用update()快得多。
你以前可能从来没有听说过ChainMap,你可能会认为ChainMap的使用情况是非常特定的。坦率地说,你是对的。
我知道的用例包括:
通过多个字典搜索
提供链缺省值
经常计算字典子集的性能关键的应用程序
2、特性
1)找到一个就不找了:这个列表是按照第一次搜索到最后一次搜索的顺序组织的,搜索查询底层映射,直到一个键被找到。
2)更新原始映射:不同的是,写,更新和删除只操作第一个映射。
3)支持所有常用字典方法。
简而言之ChainMap:将多个字典视为一个,解锁Python超能力。
Python标准库中的集合模块包含许多为性能而设计的实用的数据结构。著名的包括命名元组或计数器。
今天,通过实例,我们来看看鲜为人知的ChainMap。通过浏览具体的示例,我希望给你一个提示,关于在更高级的Python工作中使用ChainMap将如何从中受益。
"""

from collections import ChainMap
baseline = {'music': 'bach', 'art': 'rembrandt'}
adjustments = {'art': 'van gogh', 'opera': 'carmen'}
test = ChainMap(adjustments, baseline)
print(test)
test1 = list(ChainMap(adjustments, baseline))
print(test1)
# 存在重复元素时,也不会去重
dcic1 = {'label1': '11', 'label2': '22'}
dcic2 = {'label2': '22', 'label3': '33'}
dcic3 = {'label4': '44', 'label5': '55'}
last = ChainMap(dcic1, dcic2, dcic3)
print(last)
print(last['label2'])

"""
new_child()方法
用法:new_child(m=None)
返回一个新的ChainMap类,包含了一个新映射(map),后面跟随当前实例的全部映射map。
如果m被指定,它就成为不同新的实例,就是在所有映射前加上 m,如果没有指定,就加上一个空字典,
这样的话一个 d.new_child() 调用等价于ChainMap({}, *d.maps) 。这个方法用于创建子上下文,不改变任何父映射的值。
"""
aa = last.new_child(m={'key_new': 888})
print(aa)

"""
parents属性
属性返回一个新的ChainMap包含所有的当前实例的映射,除了第一个。
这样可以在搜索的时候跳过第一个映射。使用的场景类似在 nested scopes 嵌套作用域中使用nonlocal关键词。
用例也可以类比内建函数super() 。一个d.parents 的引用等价于ChainMap(*d.maps[1:])。
"""
print(aa.parents)

+ 0
- 111
test/collections/Counter.py View File

@@ -1,111 +0,0 @@
import collections
import re
from collections import Counter

print(collections.__all__)
"""
['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict', 'ChainMap']
这个模块实现了特定目标的容器,以提供Python标准内建容器dict , list , set , 和tuple 的替代选择。
deque: 类似列表(list)的容器,实现了在两端快速添加(append)和弹出(pop)
defaultdict: 字典的子类,提供了一个工厂函数,为字典查询提供一个默认值
namedtuple(): 创建命名元组子类的工厂函数,生成可以使用名字来访问元素内容的tuple子类
UserDict: 封装了字典对象,简化了字典子类化
UserList: 封装了列表对象,简化了列表子类化
UserString: 封装了字符串对象,简化了字符串子类化(中文版翻译有误)
Counter: 字典的子类,提供了可哈希对象的计数功能
OrderedDict: 字典的子类,保存了他们被添加的顺序,有序字典
ChainMap: 类似字典(dict)的容器类,将多个映射集合到一个视图里面
"""

text = 'remove an existing key one level down remove an existing key one level down'
# \w 匹配非特殊字符,即a-z、A-Z、0-9、_、汉字
words = re.findall(r'\w+', text)
print(Counter(words).most_common(10))

#计算列表中单词的个数
cnt = Counter()
for word in ['red', 'blue', 'red', 'green', 'blue', 'blue']:
cnt[word] += 1
print(cnt)

# #上述这样计算有点嘛,下面的方法更简单,直接计算就行
L = ['red', 'blue', 'red', 'green', 'blue', 'blue']
print(Counter(L))

# 元素从一个iterable 被计数或从其他的mapping (or counter)初始化:
# 字符串计数
print(Counter('gallahad'))

# 字典计数
print(Counter({'red': 4, 'blue': 2}))

# 是个啥玩意计数
print(Counter(cats=4, dogs=8))

"""
1、elements()
描述:返回一个迭代器,其中每个元素将重复出现计数值所指定次。 元素会按首次出现的顺序返回。 如果一个元素的计数值小于1,elements() 将会忽略它。
语法:elements( )
参数:无
"""
c = Counter(a=4, b=2, c=0, d=-2)
print(c)
print(list(c.elements()))
print(sorted(c.elements()))
c = Counter(a=4, b=2, c=0, d=5)
print(list(c.elements()))

"""
2、most_common()
返回一个列表,其中包含n个最常见的元素及出现次数,按常见程度由高到低排序。
如果n被省略或为None,most_common() 将返回计数器中的所有元素,
计数值相等的元素按首次出现的顺序排序,经常用来计算top词频的词语。
"""
print(Counter('abracadabra').most_common(3))
print(Counter('abracadabra').most_common(5))

"""
3、subtract()
从迭代对象或映射对象减去元素。像dict.update() 但是是减去,而不是替换。输入和输出都可以是0或者负数。
"""
c = Counter(a=4, b=2, c=0, d=-2)
d = Counter(a=1, b=2, c=3, d=4)
c.subtract(d)
print(c)

#减去一个abcd
str0 = Counter('aabbccdde')
str0.subtract('abcd')
print(str0)

"""
4、字典方法
通常字典方法都可用于Counter对象,除了有两个方法工作方式与字典并不相同。
fromkeys(iterable)
这个类方法没有在Counter中实现。
update([iterable-or-mapping])
从迭代对象计数元素或者从另一个映射对象 (或计数器) 添加。 像 dict.update() 但是是加上,而不是替换。
另外,迭代对象应该是序列元素,而不是一个 (key, value) 对。
"""
c = Counter(a=4, b=2, c=0, d=-2)
print(sum(c.values()))
print(list(c))
print(set(c))
print(dict(c))
print(c.items())
print(+c) # 删除零计数和负计数
c.clear()
print(c)
"""
5、数学操作
这个功能非常强大,提供了几个数学操作,可以结合 Counter 对象,以生产 multisets (计数器中大于0的元素)。
加和减,结合计数器,通过加上或者减去元素的相应计数。交集和并集返回相应计数的最小或最大值。
每种操作都可以接受带符号的计数,但是输出会忽略掉结果为零或者小于零的计数。
"""
c = Counter(a=3, b=1)
d = Counter(a=1, b=2)
print(c+d)
print(c - d)
print(c & d)
print(c | d)

+ 0
- 35
test/collections/OrderedDict.py View File

@@ -1,35 +0,0 @@
from collections import OrderedDict


"""
1、popitem
语法:popitem(last=True)
功能:有序字典的 popitem() 方法移除并返回一个 (key, value) 键值对。
如果 last 值为真,则按 LIFO 后进先出的顺序返回键值对,否则就按 FIFO 先进先出的顺序返回键值对。
"""
d = OrderedDict.fromkeys('abcde')
print(d)
print(d.popitem())
# #last=False时,弹出第一个
print(d.popitem(last=False))
print(d.popitem(last=True))

"""
2、move_to_end
"""
d = OrderedDict.fromkeys('abcde')
d.move_to_end('b')
print(d)
d.move_to_end('b', last=False)
print(d)

"""
3、reversed()
相对于通常的映射方法,有序字典还另外提供了逆序迭代的支持,通过reversed()。
"""
d = OrderedDict.fromkeys('acbde')
print(d)
print(list(reversed(d)))

c = OrderedDict({'a': 1, 'c': 2, 'b': 3})
print(c)

+ 0
- 30
test/collections/__init__.py View File

@@ -1,30 +0,0 @@
# import collections
#
# print(collections.__all__)
# print(dir(collections))
#
# d = {}
# d.setdefault(2, []).append(23)
# d.setdefault(2, []).append(11)
# print(d)
# d.setdefault(2, []).append(23)
#
# # 定义一个curry风格函数
# x = lambda y: [
# print(y),
# print("..."),
# x
# ][-1]
# print(x(1)(2))
#
# import heapq
# print(heapq.nlargest(1, [
# {'S': 5, 'H': 3},
# {'S': 7, 'H': 1},
# {'S': 0, 'H': 2}
# ], key=lambda x: x['S']))
#
# s = [1, [2, [3, [4, [5, 6], 7], 8], (9, 0)]]
# f = lambda x: [y for _x in x for y in f(_x)] if isinstance(x, (list, tuple)) else [x]
#
# print(f(s)) # [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]

+ 0
- 66
test/collections/defaultdict.py View File

@@ -1,66 +0,0 @@


"""
默认字典-defaultdict
在Python字典中收集数据通常是很有用的。
在字典中获取一个 key 有两种方法, 第一种 get , 第二种 通过 [] 获取.
使用dict时,如果引用的Key不存在,就会抛出KeyError。如果希望key不存在时,返回一个默认值,就可以用defaultdict。
当我使用普通的字典时,用法一般是dict={},添加元素的只需要dict[element] =value即,调用的时候也是如此,
dict[element] = xxx,但前提是element字典里,如果不在字典里就会报错
这时defaultdict就能排上用场了,defaultdict的作用是在于,当字典里的key不存在但被查找时,
返回的不是keyError而是一个默认值,这个默认值是什么呢,下面会说

1、基础介绍
defaultdict([default_factory[, ...]])
返回一个新的类似字典的对象。 defaultdict是内置dict类的子类。它重载了一个方法并添加了一个可写的实例变量。
其余的功能与dict类相同,此处不再重复说明。
本对象包含一个名为default_factory的属性,构造时,第一个参数用于为该属性提供初始值,默认为 None。
所有其他参数(包括关键字参数)都相当于传递给 dict 的构造函数。
defaultdict 对象除了支持标准 dict 的操作,还支持以下方法作为扩展:
__missing__(key)
如果 default_factory 属性为 None,则调用本方法会抛出 KeyError 异常,附带参数 key。
如果 default_factory 不为 None,则它会被(不带参数地)调用来为 key 提供一个默认值,
这个值和 key 作为一对键值对被插入到字典中,并作为本方法的返回值返回。
如果调用 default_factory 时抛出了异常,这个异常会原封不动地向外层传递。
在无法找到所需键值时,本方法会被 dict 中的 __getitem__() 方法调用。
无论本方法返回了值还是抛出了异常,都会被 __getitem__() 传递。
注意,__missing__() 不会 被 __getitem__() 以外的其他方法调用。
意味着 get() 会像正常的 dict 那样返回 None,而不是使用 default_factory。
"""
from collections import defaultdict

"""
2、示例介绍
使用 list 作为 default_factory,很轻松地将(键-值对组成的)序列转换为(键-列表组成的)字典
"""
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
d = defaultdict(list)
for k, v in s:
d[k].append(v)
print(sorted(d.items()))

"""
当每个键第一次遇见时,它还没有在字典里面,所以自动创建该条目,即调用default_factory方法,
返回一个空的 list。 list.append() 操作添加值到这个新的列表里。当再次存取该键时,就正常操作,list.append()
添加另一个值到列表中。这个计数比它的等价方法dict.setdefault()要快速和简单:
"""
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
d = {}
for k, v in s:
d.setdefault(k, []).append(v)
print(sorted(d.items()))
# 设置 default_factory为int,使defaultdict用于计数(类似其他语言中的 bag或multiset):
s = 'mississippi'
d = defaultdict(int)
for k in s:
d[k] += 1
print(sorted(d.items()))

# 设置 default_factory 为 set 使 defaultdict 用于构建 set 集合:
s = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]
d = defaultdict(set)
for k, v in s:
d[k].add(v)
print(sorted(d.items()))
print(d['aaa'])


+ 0
- 148
test/collections/deque.py View File

@@ -1,148 +0,0 @@

"""
deque
双端队列,可以快速的从另外一侧追加和推出对象,deque是一个双向链表,
针对list连续的数据结构插入和删除进行优化。它提供了两端都可以操作的序列,
这表示在序列的前后你都可以执行添加或删除操作。双向队列(deque)对象支持以下方法:
"""
from collections import deque

"""
1、append()
添加 x 到右端。
"""
d = deque('ghi')
d.append('j')
print(d)

"""
2、appendleft()
添加 x 到左端。
"""
d.appendleft('f')
print(d)

"""
3、clear()
移除所有元素,使其长度为0.
"""
d = deque('ghi')
d.clear()
print(d)

"""
4、copy()
创建一份浅拷贝。
"""
d = deque('xiaoweuge')
y = d.copy()
print(y)

"""
5、count()
计算 deque 中元素等于 x 的个数。
"""
d = deque('xiaoweuge-shuai')
print(d.count('a'))

"""
6、extend()
扩展deque的右侧,通过添加iterable参数中的元素。
"""
a = deque('abc')
b = deque('cd')
a.extend(b)
print(a)

#与append 的区别
a = deque('abc')
b = deque('cd')
a.append(b)
print(a)

"""
7、extendleft()
扩展deque的左侧,通过添加iterable参数中的元素。注意,左添加时,在结果中iterable参数中的顺序将被反过来添加。
"""
a = deque('abc')
b = deque('cd')
a.extendleft(b)
print(a)

"""
8、index()
返回 x 在 deque 中的位置(在索引 start 之后,索引 stop 之前)。 返回第一个匹配项,如果未找到则引发 ValueError。
"""
d = deque('xiaoweuge')
print(d.index('w'))

"""
9、insert()
在位置 i 插入 x 。
如果插入会导致一个限长 deque 超出长度 maxlen 的话,就引发一个 IndexError。
"""
a = deque('abc')
a.insert(1, 'X')
print(a)

"""
10、pop()
移去并且返回一个元素,deque 最右侧的那一个。 如果没有元素的话,就引发一个 IndexError。
"""
d = deque('abc')
print(d.pop())

"""
11、popleft()
移去并且返回一个元素,deque 最左侧的那一个。 如果没有元素的话,就引发 IndexError。
"""
d = deque('abc')
print(d.popleft())

"""
12、remove(value)
移除找到的第一个 value。 如果没有的话就引发 ValueError。
"""
a = deque('abca')
a.remove('a')
print(a)

"""
13、reverse()
将deque逆序排列。返回 None 。
"""
#逆序排列
d = deque('ghi') # 创建一个deque
print(list(reversed(d)))

"""
14、rotate(n=1)
向右循环移动 n 步。 如果 n 是负数,就向左循环。
如果deque不是空的,向右循环移动一步就等价于 d.appendleft(d.pop()) , 向左循环一步就等价于 d.append(d.popleft()) 。
"""
# 向右边挤一挤
d = deque('ghijkl')
d.rotate(1)
print(d)

# 向左边挤一挤
d.rotate(-1)
print(d)

#看一个更明显的
x = deque('12345')
x.rotate()
print(x)

d = deque(['12',' av', 'cd'])
d.rotate(1)
print(d)

"""
15、maxlen
Deque的最大尺寸,如果没有限定的话就是 None 。
"""
d=deque(maxlen=10)
for i in range(20):
d.append(i)
print(d)

+ 0
- 83
test/collections/namedtuple.py View File

@@ -1,83 +0,0 @@


from collections import namedtuple

"""
可命名元组-namedtuple
生成可以使用名字来访问元素内容的tuple子类,命名元组赋予每个位置一个含义,提供可读性和自文档性。
它们可以用于任何普通元组,并添加了通过名字获取值的能力,通过索引值也是可以的。
1、参数介绍
namedtuple(typename,field_names,*,verbose=False, rename=False, module=None)
1)typename:该参数指定所创建的tuple子类的类名,相当于用户定义了一个新类。
2)field_names:该参数是一个字符串序列,如 ['x','y']。此外,field_names 也可直接使用单个字符串代表所有字段名,多个字段名用空格、逗号隔开,如 'x y' 或 'x,y'。任何有效的 Python 标识符都可作为字段名(不能以下画线开头)。有效的标识符可由字母、数字、下画线组成,但不能以数字、下面线开头,也不能是关键字(如 return、global、pass、raise 等)。
3)rename:如果将该参数设为 True,那么无效的字段名将会被自动替换为位置名。例如指定 ['abc','def','ghi','abc'],它将会被替换为 ['abc', '_1','ghi','_3'],这是因为 def 字段名是关键字,而 abc 字段名重复了。
4)verbose:如果该参数被设为 True,那么当该子类被创建后,该类定义就被立即打印出来。
5)module:如果设置了该参数,那么该类将位于该模块下,因此该自定义类的 __module__ 属性将被设为该参数值。
"""
# 定义命名元组类:Point
Point = namedtuple('Point', ['x', 'y'])
# 初始化Point对象,即可用位置参数,也可用命名参数
p = Point(11, y=22)
# 像普通元组一样用根据索引访问元素
print(p[0] + p[1])

#执行元组解包,按元素的位置解包
a, b = p
print(a, b)

#根据字段名访问各元素
print(p.x + p.y)
print(p)

"""
备注: 在Python中,带有前导下划线的方法通常被认为是“私有的”。
但是,namedtuple提供的其他方法(如._asdict()、._make()、._replace()等)是公开的。
除了继承元组的方法,命名元组还支持三个额外的方法和两个属性。为了防止字段名冲突,方法和属性以下划线开始。
"""
"""
_make(iterable)
类方法从存在的序列或迭代实例创建一个新实例。
"""
t = [14, 55]
print(Point._make(t))

"""
_asdict()
返回一个新的dict ,它将字段名称映射到它们对应的值:
"""
p = Point(x=11, y=22)
print(p._asdict())

"""
_replace(**kwargs)
返回一个新的命名元组实例,并将指定域替换为新的值
"""
p = Point(x=11, y=22)
p._replace(x=33)
print(p._replace(x=33))

"""
两个属性
_fields
字符串元组列出了字段名。用于提醒和从现有元组创建一个新的命名元组类型。
"""
print(p._fields)

Color = namedtuple('Color', 'red green blue')
Pixel = namedtuple('Pixel', Point._fields + Color._fields)
print(Pixel._fields)

"""
_field_defaults
字典将字段名称映射到默认值。
"""

Account = namedtuple('Account', ['type', 'balance'], defaults=[0])
print(Account._field_defaults)
print(Account('premium'))

"""
getattr()
要获取这个名字域的值,使用 getattr() 函数 :
"""
print(getattr(p, 'x'))

+ 0
- 0
test/color/__init__.py View File


+ 0
- 0
test/color/color_test.py View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save