<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="PublishConfigData" autoUpload="Always" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||||
<serverData> | |||||
<paths name="10.11"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/home/thsw/tuo_heng/prod/algSch" local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="10.13"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/home/chenyukun/algSch" local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="66"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/home/thsw2/tuo_heng/prod/algSch" local="$PROJECT_DIR$" web="/" /> | |||||
<mapping local="" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="66:6000"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/home/thsw/tuo_heng/prod/algSch" local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="chenyukun"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/opt/ai/algSch" local="$PROJECT_DIR$" web="/" /> | |||||
<mapping deploy="" local="" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="dell@192.168.10.12:22"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping deploy="/home/chenyukun/fk" local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="root@212.129.223.66:20653"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="thsw2@192.168.10.66:22"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="thsw2@212.129.223.66:6500"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping local="$PROJECT_DIR$" web="/" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
</serverData> | |||||
<option name="myAutoUpload" value="ALWAYS" /> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="Encoding"> | |||||
<file url="PROJECT" charset="UTF-8" /> | |||||
</component> | |||||
</project> |
<component name="InspectionProjectProfileManager"> | |||||
<profile version="1.0"> | |||||
<option name="myName" value="Project Default" /> | |||||
<inspection_tool class="JavaDoc" enabled="true" level="WARNING" enabled_by_default="true"> | |||||
<option name="TOP_LEVEL_CLASS_OPTIONS"> | |||||
<value> | |||||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||||
<option name="REQUIRED_TAGS" value="" /> | |||||
</value> | |||||
</option> | |||||
<option name="INNER_CLASS_OPTIONS"> | |||||
<value> | |||||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||||
<option name="REQUIRED_TAGS" value="" /> | |||||
</value> | |||||
</option> | |||||
<option name="METHOD_OPTIONS"> | |||||
<value> | |||||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||||
<option name="REQUIRED_TAGS" value="@return@param@throws or @exception" /> | |||||
</value> | |||||
</option> | |||||
<option name="FIELD_OPTIONS"> | |||||
<value> | |||||
<option name="ACCESS_JAVADOC_REQUIRED_FOR" value="none" /> | |||||
<option name="REQUIRED_TAGS" value="" /> | |||||
</value> | |||||
</option> | |||||
<option name="IGNORE_DEPRECATED" value="false" /> | |||||
<option name="IGNORE_JAVADOC_PERIOD" value="true" /> | |||||
<option name="IGNORE_DUPLICATED_THROWS" value="false" /> | |||||
<option name="IGNORE_POINT_TO_ITSELF" value="false" /> | |||||
<option name="myAdditionalJavadocTags" value="date" /> | |||||
</inspection_tool> | |||||
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true"> | |||||
<option name="ignoredErrors"> | |||||
<list> | |||||
<option value="N806" /> | |||||
<option value="N803" /> | |||||
<option value="N802" /> | |||||
</list> | |||||
</option> | |||||
</inspection_tool> | |||||
</profile> | |||||
</component> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="ProjectRootManager" version="2" languageLevel="JDK_16" project-jdk-name="Python 3.10 (learn)" project-jdk-type="Python SDK" /> | |||||
<component name="SwUserDefinedSpecifications"> | |||||
<option name="specTypeByUrl"> | |||||
<map /> | |||||
</option> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="ProjectModuleManager"> | |||||
<modules> | |||||
<module fileurl="file://$PROJECT_DIR$/tuoheng_fk.iml" filepath="$PROJECT_DIR$/tuoheng_fk.iml" /> | |||||
</modules> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="RunConfigurationProducerService"> | |||||
<option name="ignoredProducers"> | |||||
<set> | |||||
<option value="com.android.tools.idea.compose.preview.runconfiguration.ComposePreviewRunConfigurationProducer" /> | |||||
</set> | |||||
</option> | |||||
</component> | |||||
</project> |
<changelist name="在进行签出之前于_2022_11_2_13_46_取消提交了变更_[Changes]" date="1667367981858" recycled="false" toDelete="true"> | |||||
<option name="PATH" value="$PROJECT_DIR$/.idea/shelf/在进行签出之前于_2022_11_2_13_46_取消提交了变更_[Changes]/shelved.patch" /> | |||||
<option name="DESCRIPTION" value="在进行签出之前于 2022/11/2 13:46 取消提交了变更 [Changes]" /> | |||||
</changelist> |
<changelist name="在进行签出之前于_2022_11_2_13_46_取消提交了变更_[Changes]1" date="1667368007590" recycled="true" deleted="true"> | |||||
<option name="PATH" value="$PROJECT_DIR$/.idea/shelf/在进行签出之前于_2022_11_2_13_46_取消提交了变更_[Changes]1/shelved.patch" /> | |||||
<option name="DESCRIPTION" value="在进行签出之前于 2022/11/2 13:46 取消提交了变更 [Changes]" /> | |||||
</changelist> |
Index: .idea/deployment.xml | |||||
IDEA additional info: | |||||
Subsystem: com.intellij.openapi.diff.impl.patch.BaseRevisionTextPatchEP | |||||
<+><?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<project version=\"4\">\r\n <component name=\"PublishConfigData\" autoUpload=\"Always\" serverName=\"66\" remoteFilesAllowedToDisappearOnAutoupload=\"false\">\r\n <serverData>\r\n <paths name=\"66\">\r\n <serverdata>\r\n <mappings>\r\n <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\r\n </mappings>\r\n </serverdata>\r\n </paths>\r\n <paths name=\"chenyukun\">\r\n <serverdata>\r\n <mappings>\r\n <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\r\n </mappings>\r\n </serverdata>\r\n </paths>\r\n <paths name=\"root@212.129.223.66:20653\">\r\n <serverdata>\r\n <mappings>\r\n <mapping local=\"$PROJECT_DIR$\" web=\"/\" />\r\n </mappings>\r\n </serverdata>\r\n </paths>\r\n </serverData>\r\n <option name=\"myAutoUpload\" value=\"ALWAYS\" />\r\n </component>\r\n</project> | |||||
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP | |||||
<+>UTF-8 | |||||
=================================================================== | |||||
diff --git a/.idea/deployment.xml b/.idea/deployment.xml | |||||
--- a/.idea/deployment.xml | |||||
+++ b/.idea/deployment.xml | |||||
@@ -1,7 +1,14 @@ | |||||
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
- <component name="PublishConfigData" autoUpload="Always" serverName="66" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||||
+ <component name="PublishConfigData" autoUpload="Always" serverName="dell@192.168.10.12:22" remoteFilesAllowedToDisappearOnAutoupload="false"> | |||||
<serverData> | |||||
+ <paths name="10.13"> | |||||
+ <serverdata> | |||||
+ <mappings> | |||||
+ <mapping local="$PROJECT_DIR$" web="/" /> | |||||
+ </mappings> | |||||
+ </serverdata> | |||||
+ </paths> | |||||
<paths name="66"> | |||||
<serverdata> | |||||
<mappings> | |||||
@@ -13,10 +20,31 @@ | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping local="$PROJECT_DIR$" web="/" /> | |||||
+ </mappings> | |||||
+ </serverdata> | |||||
+ </paths> | |||||
+ <paths name="dell@192.168.10.12:22"> | |||||
+ <serverdata> | |||||
+ <mappings> | |||||
+ <mapping deploy="/tmp/pycharm_project_764" local="$PROJECT_DIR$" /> | |||||
</mappings> | |||||
</serverdata> | |||||
</paths> | |||||
<paths name="root@212.129.223.66:20653"> | |||||
+ <serverdata> | |||||
+ <mappings> | |||||
+ <mapping local="$PROJECT_DIR$" web="/" /> | |||||
+ </mappings> | |||||
+ </serverdata> | |||||
+ </paths> | |||||
+ <paths name="thsw2@192.168.10.66:22"> | |||||
+ <serverdata> | |||||
+ <mappings> | |||||
+ <mapping local="$PROJECT_DIR$" web="/" /> | |||||
+ </mappings> | |||||
+ </serverdata> | |||||
+ </paths> | |||||
+ <paths name="thsw2@212.129.223.66:6500"> | |||||
<serverdata> | |||||
<mappings> | |||||
<mapping local="$PROJECT_DIR$" web="/" /> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="SshConfigs"> | |||||
<configs> | |||||
<sshConfig authType="PASSWORD" host="192.168.10.66" id="aa89844a-f7c0-47b6-9359-30d13fa76380" port="22" nameFormat="DESCRIPTIVE" username="thsw2" /> | |||||
<sshConfig authType="PASSWORD" host="192.168.10.13" id="63f793e6-0de4-4efb-9c6b-ade628d8ac78" port="22" nameFormat="DESCRIPTIVE" username="dell" /> | |||||
<sshConfig authType="PASSWORD" host="192.168.10.11" id="1d2cef01-3eb7-4213-a06b-983ed1c84429" port="22" nameFormat="DESCRIPTIVE" username="thsw" /> | |||||
<sshConfig authType="PASSWORD" host="212.129.223.66" id="7fda1f9b-b71f-4916-a628-e0155820a3e1" port="6000" nameFormat="DESCRIPTIVE" username="thsw" /> | |||||
</configs> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="VcsDirectoryMappings"> | |||||
<mapping directory="$PROJECT_DIR$" vcs="Git" /> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="WebServers"> | |||||
<option name="servers"> | |||||
<webServer id="630d5d4a-219c-4d57-bb0b-44534517b306" name="chenyukun"> | |||||
<fileTransfer accessType="SFTP" host="212.129.223.66" port="20653" sshConfigId="98ad0e43-4426-4250-8bdd-0cf2c0e03bc1" sshConfig="root@212.129.223.66:20653 password"> | |||||
<advancedOptions> | |||||
<advancedOptions dataProtectionLevel="Private" passiveMode="true" shareSSLContext="true" /> | |||||
</advancedOptions> | |||||
</fileTransfer> | |||||
</webServer> | |||||
<webServer id="cc246223-f324-4e86-9e18-4b309f3a6500" name="66"> | |||||
<fileTransfer accessType="SFTP" host="192.168.10.66" port="22" sshConfigId="aa89844a-f7c0-47b6-9359-30d13fa76380" sshConfig="thsw2@192.168.10.66:22 password"> | |||||
<advancedOptions> | |||||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||||
</advancedOptions> | |||||
</fileTransfer> | |||||
</webServer> | |||||
<webServer id="c4f24aea-3720-451d-a1b1-315444a6141b" name="10.13"> | |||||
<fileTransfer accessType="SFTP" host="192.168.10.13" port="22" sshConfigId="63f793e6-0de4-4efb-9c6b-ade628d8ac78" sshConfig="dell@192.168.10.13:22 password"> | |||||
<advancedOptions> | |||||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||||
</advancedOptions> | |||||
</fileTransfer> | |||||
</webServer> | |||||
<webServer id="0c410ac3-ea9f-4fbb-8161-9153dcbfcc9e" name="10.11"> | |||||
<fileTransfer accessType="SFTP" host="192.168.10.11" port="22" sshConfigId="1d2cef01-3eb7-4213-a06b-983ed1c84429" sshConfig="thsw@192.168.10.11:22 password"> | |||||
<advancedOptions> | |||||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||||
</advancedOptions> | |||||
</fileTransfer> | |||||
</webServer> | |||||
<webServer id="e24a8be2-8b21-436b-b8ba-c6460e9668ed" name="66:6000"> | |||||
<fileTransfer accessType="SFTP" host="212.129.223.66" port="6000" sshConfigId="7fda1f9b-b71f-4916-a628-e0155820a3e1" sshConfig="thsw@212.129.223.66:6000 password"> | |||||
<advancedOptions> | |||||
<advancedOptions dataProtectionLevel="Private" keepAliveTimeout="0" passiveMode="true" shareSSLContext="true" /> | |||||
</advancedOptions> | |||||
</fileTransfer> | |||||
</webServer> | |||||
</option> | |||||
</component> | |||||
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<project version="4"> | |||||
<component name="AutoImportSettings"> | |||||
<option name="autoReloadType" value="SELECTIVE" /> | |||||
</component> | |||||
<component name="ChangeListManager"> | |||||
<list default="true" id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="Changes"> | |||||
<change beforePath="$PROJECT_DIR$/.idea/deployment.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/inspectionProfiles/Project_Default.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/inspectionProfiles/Project_Default.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/modules.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/modules.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/sshConfigs.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/sshConfigs.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/webServers.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/webServers.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/common/Constant.py" beforeDir="false" afterPath="$PROJECT_DIR$/common/Constant.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/CommonProcess.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/CommonThread.py" beforeDir="false" afterPath="$PROJECT_DIR$/concurrency/CommonThread.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/FileUpdateThread.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/HeartbeatThread.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/IntelligentRecognitionProcess.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/concurrency/MessagePollingThread.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/config/ModelConfig.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/dsp_application.yml" beforeDir="false" afterPath="$PROJECT_DIR$/fk_application.yml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/dsp_master.py" beforeDir="false" afterPath="$PROJECT_DIR$/fk_master.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/entity/FeedBack.py" beforeDir="false" afterPath="$PROJECT_DIR$/entity/FeedBack.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/enums/AnalysisStatusEnum.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/enums/AnalysisTypeEnum.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/enums/ExceptionEnum.py" beforeDir="false" afterPath="$PROJECT_DIR$/enums/ExceptionEnum.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/enums/ModelTypeEnum.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/service/Dispatcher.py" beforeDir="false" afterPath="$PROJECT_DIR$/service/Dispatcher.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/Producer2.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/__init__.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/cv2test.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/cv2test1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/experimental.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/ffmpeg11/__init__.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg11.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/ffmpeg11/ffmpeg33.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/ffmpeg2.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/ffmpeg3.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/gputest.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/gputest1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/1.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/2.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI1.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI2.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI3.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI4.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI5.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI6.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI7.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/image/AI8.jpg" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/mysqltest.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/producer_start.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/producer_stop.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/read.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/same1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/same2.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/same3.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/test1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/torch_utils.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/vod.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/vodTest.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/vodtest1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/vodtest2.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/ORB算法.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/__init__.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/互信息.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/余弦相似度计算.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/视频添加图片水印1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/视频添加文字水印.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/视频添加文字水印1.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/视频添加文字水印2.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/test/水印/视频添加文字水印3.py" beforeDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/tuoheng_alg.iml" beforeDir="false" afterPath="$PROJECT_DIR$/tuoheng_fk.iml" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/util/ImageUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ImageUtils.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/util/KafkaUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/KafkaUtils.py" afterDir="false" /> | |||||
<change beforePath="$PROJECT_DIR$/util/ModelUtils.py" beforeDir="false" afterPath="$PROJECT_DIR$/util/ModelUtils.py" afterDir="false" /> | |||||
</list> | |||||
<option name="SHOW_DIALOG" value="false" /> | |||||
<option name="HIGHLIGHT_CONFLICTS" value="true" /> | |||||
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> | |||||
<option name="LAST_RESOLUTION" value="IGNORE" /> | |||||
</component> | |||||
<component name="FileTemplateManagerImpl"> | |||||
<option name="RECENT_TEMPLATES"> | |||||
<list> | |||||
<option value="Python Script" /> | |||||
</list> | |||||
</option> | |||||
</component> | |||||
<component name="Git.Settings"> | |||||
<excluded-from-favorite> | |||||
<branch-storage> | |||||
<map> | |||||
<entry type="LOCAL"> | |||||
<value> | |||||
<list> | |||||
<branch-info repo="$PROJECT_DIR$" source="master" /> | |||||
</list> | |||||
</value> | |||||
</entry> | |||||
<entry type="REMOTE"> | |||||
<value> | |||||
<list> | |||||
<branch-info repo="$PROJECT_DIR$" source="origin/master" /> | |||||
</list> | |||||
</value> | |||||
</entry> | |||||
</map> | |||||
</branch-storage> | |||||
</excluded-from-favorite> | |||||
<favorite-branches> | |||||
<branch-storage> | |||||
<map> | |||||
<entry type="LOCAL"> | |||||
<value> | |||||
<list> | |||||
<branch-info repo="$PROJECT_DIR$" source="develop" /> | |||||
</list> | |||||
</value> | |||||
</entry> | |||||
<entry type="REMOTE"> | |||||
<value> | |||||
<list> | |||||
<branch-info repo="$PROJECT_DIR$" source="origin/develop" /> | |||||
</list> | |||||
</value> | |||||
</entry> | |||||
</map> | |||||
</branch-storage> | |||||
</favorite-branches> | |||||
<option name="RECENT_BRANCH_BY_REPOSITORY"> | |||||
<map> | |||||
<entry key="$PROJECT_DIR$" value="develop" /> | |||||
</map> | |||||
</option> | |||||
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" /> | |||||
</component> | |||||
<component name="GitSEFilterConfiguration"> | |||||
<file-type-list> | |||||
<filtered-out-file-type name="LOCAL_BRANCH" /> | |||||
<filtered-out-file-type name="REMOTE_BRANCH" /> | |||||
<filtered-out-file-type name="TAG" /> | |||||
<filtered-out-file-type name="COMMIT_BY_MESSAGE" /> | |||||
</file-type-list> | |||||
</component> | |||||
<component name="GitToolBoxStore"> | |||||
<option name="recentBranches"> | |||||
<RecentBranches> | |||||
<option name="branchesForRepo"> | |||||
<list> | |||||
<RecentBranchesForRepo> | |||||
<option name="branches"> | |||||
<list> | |||||
<RecentBranch> | |||||
<option name="branchName" value="develop" /> | |||||
<option name="lastUsedInstant" value="1667368008" /> | |||||
</RecentBranch> | |||||
<RecentBranch> | |||||
<option name="branchName" value="release" /> | |||||
<option name="lastUsedInstant" value="1664245931" /> | |||||
</RecentBranch> | |||||
<RecentBranch> | |||||
<option name="branchName" value="master" /> | |||||
<option name="lastUsedInstant" value="1664245924" /> | |||||
</RecentBranch> | |||||
</list> | |||||
</option> | |||||
<option name="repositoryRootUrl" value="file://$PROJECT_DIR$" /> | |||||
</RecentBranchesForRepo> | |||||
</list> | |||||
</option> | |||||
</RecentBranches> | |||||
</option> | |||||
</component> | |||||
<component name="MavenImportPreferences"> | |||||
<option name="generalSettings"> | |||||
<MavenGeneralSettings> | |||||
<option name="mavenHome" value="C:/learn/maven/apache-maven-3.6.3-bin/apache-maven-3.6.3" /> | |||||
<option name="userSettingsFile" value="C:\learn\maven\apache-maven-3.6.3-bin\apache-maven-3.6.3\conf\settings.xml" /> | |||||
</MavenGeneralSettings> | |||||
</option> | |||||
<option name="importingSettings"> | |||||
<MavenImportingSettings> | |||||
<option name="jdkForImporter" value="11" /> | |||||
</MavenImportingSettings> | |||||
</option> | |||||
</component> | |||||
<component name="MavenRunner"> | |||||
<option name="jreName" value="11" /> | |||||
</component> | |||||
<component name="ProjectId" id="2DTRMTxJTz5BhFzI55HkZIMBcy5" /> | |||||
<component name="ProjectViewState"> | |||||
<option name="hideEmptyMiddlePackages" value="true" /> | |||||
<option name="showLibraryContents" value="true" /> | |||||
</component> | |||||
<component name="PropertiesComponent"> | |||||
<property name="RunOnceActivity.OpenProjectViewOnStart" value="true" /> | |||||
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" /> | |||||
<property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" /> | |||||
<property name="WebServerToolWindowFactoryState" value="true" /> | |||||
<property name="WebServerToolWindowPanel.toolwindow.highlight.mappings" value="true" /> | |||||
<property name="WebServerToolWindowPanel.toolwindow.highlight.symlinks" value="true" /> | |||||
<property name="WebServerToolWindowPanel.toolwindow.show.date" value="false" /> | |||||
<property name="WebServerToolWindowPanel.toolwindow.show.permissions" value="false" /> | |||||
<property name="WebServerToolWindowPanel.toolwindow.show.size" value="false" /> | |||||
<property name="last_opened_file_path" value="$PROJECT_DIR$/test/kafka" /> | |||||
<property name="node.js.detected.package.eslint" value="true" /> | |||||
<property name="node.js.detected.package.tslint" value="true" /> | |||||
<property name="node.js.selected.package.eslint" value="(autodetect)" /> | |||||
<property name="node.js.selected.package.tslint" value="(autodetect)" /> | |||||
<property name="project.structure.last.edited" value="项目" /> | |||||
<property name="project.structure.proportion" value="0.15429688" /> | |||||
<property name="project.structure.side.proportion" value="0.24046242" /> | |||||
<property name="settings.editor.selected.configurable" value="File.Encoding" /> | |||||
<property name="vue.rearranger.settings.migration" value="true" /> | |||||
</component> | |||||
<component name="RecentsManager"> | |||||
<key name="CopyFile.RECENT_KEYS"> | |||||
<recent name="D:\work\fangke\tuoheng_fk\test\kafka" /> | |||||
<recent name="D:\work\fangke\tuoheng_fk\util" /> | |||||
<recent name="D:\work\fangke\tuoheng_fk\concurrency" /> | |||||
<recent name="D:\work\alg_new\tuoheng_alg\util" /> | |||||
<recent name="D:\work\alg\tuoheng_alg\data" /> | |||||
</key> | |||||
<key name="MoveFile.RECENT_KEYS"> | |||||
<recent name="D:\work\alg\tuoheng_alg\image" /> | |||||
</key> | |||||
</component> | |||||
<component name="RunManager" selected="Python.producer_start"> | |||||
<configuration name="Dispatcher" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||||
<module name="tuoheng_fk" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/service" /> | |||||
<option name="IS_MODULE_SDK" value="false" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/service/Dispatcher.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<configuration name="fk_master" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||||
<module name="tuoheng_fk" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="sftp://dell@192.168.10.12:22/home/dell/anaconda3/envs/prod/bin/python3.8" /> | |||||
<option name="WORKING_DIRECTORY" value="/home/chenyukun/fk" /> | |||||
<option name="IS_MODULE_SDK" value="false" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="/home/chenyukun/fk/fk_master.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<configuration name="image" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||||
<module name="tuoheng_fk" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="" /> | |||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/协程" /> | |||||
<option name="IS_MODULE_SDK" value="true" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/协程/image.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<configuration name="minio" type="PythonConfigurationType" factoryName="Python" temporary="true"> | |||||
<module name="tuoheng_alg" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="$PROJECT_DIR$/../../../software/anaconda/envs/test/python.exe" /> | |||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/minio1" /> | |||||
<option name="IS_MODULE_SDK" value="false" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/minio1/minio_test.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<configuration name="mysqltest" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true"> | |||||
<module name="tuoheng_alg" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="" /> | |||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test" /> | |||||
<option name="IS_MODULE_SDK" value="true" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/mysqltest.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<configuration name="producer_start" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true"> | |||||
<module name="tuoheng_fk" /> | |||||
<option name="INTERPRETER_OPTIONS" value="" /> | |||||
<option name="PARENT_ENVS" value="true" /> | |||||
<envs> | |||||
<env name="PYTHONUNBUFFERED" value="1" /> | |||||
</envs> | |||||
<option name="SDK_HOME" value="" /> | |||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/test/kafka" /> | |||||
<option name="IS_MODULE_SDK" value="true" /> | |||||
<option name="ADD_CONTENT_ROOTS" value="true" /> | |||||
<option name="ADD_SOURCE_ROOTS" value="true" /> | |||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" /> | |||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test/kafka/producer_start.py" /> | |||||
<option name="PARAMETERS" value="" /> | |||||
<option name="SHOW_COMMAND_LINE" value="false" /> | |||||
<option name="EMULATE_TERMINAL" value="false" /> | |||||
<option name="MODULE_MODE" value="false" /> | |||||
<option name="REDIRECT_INPUT" value="false" /> | |||||
<option name="INPUT_FILE" value="" /> | |||||
<method v="2" /> | |||||
</configuration> | |||||
<list> | |||||
<item itemvalue="Python.mysqltest" /> | |||||
<item itemvalue="Python.minio" /> | |||||
<item itemvalue="Python.image" /> | |||||
<item itemvalue="Python.fk_master" /> | |||||
<item itemvalue="Python.producer_start" /> | |||||
<item itemvalue="Python.Dispatcher" /> | |||||
</list> | |||||
<recent_temporary> | |||||
<list> | |||||
<item itemvalue="Python.producer_start" /> | |||||
<item itemvalue="Python.fk_master" /> | |||||
<item itemvalue="Python.Dispatcher" /> | |||||
<item itemvalue="Python.image" /> | |||||
<item itemvalue="Python.minio" /> | |||||
</list> | |||||
</recent_temporary> | |||||
</component> | |||||
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" /> | |||||
<component name="SshConsoleOptionsProvider"> | |||||
<option name="myEncoding" value="UTF-8" /> | |||||
</component> | |||||
<component name="TaskManager"> | |||||
<task active="true" id="Default" summary="Default task"> | |||||
<changelist id="4f7dccd9-8f92-4a6e-90cc-33890d102263" name="Changes" comment="" /> | |||||
<created>1660721040418</created> | |||||
<option name="number" value="Default" /> | |||||
<option name="presentableId" value="Default" /> | |||||
<updated>1660721040418</updated> | |||||
<workItem from="1660721041939" duration="5378000" /> | |||||
<workItem from="1660742200263" duration="758000" /> | |||||
<workItem from="1660781586599" duration="12677000" /> | |||||
<workItem from="1660821003279" duration="3079000" /> | |||||
<workItem from="1660831418060" duration="2591000" /> | |||||
<workItem from="1660867353831" duration="14213000" /> | |||||
<workItem from="1661125394679" duration="2000000" /> | |||||
<workItem from="1661212127373" duration="12131000" /> | |||||
<workItem from="1661228338772" duration="10683000" /> | |||||
<workItem from="1661263812380" duration="582000" /> | |||||
<workItem from="1661298710414" duration="3633000" /> | |||||
<workItem from="1661385517494" duration="6862000" /> | |||||
<workItem from="1661474047536" duration="3841000" /> | |||||
<workItem from="1661506480813" duration="579000" /> | |||||
<workItem from="1661753711797" duration="4495000" /> | |||||
<workItem from="1661847814441" duration="5437000" /> | |||||
<workItem from="1661864932477" duration="11602000" /> | |||||
<workItem from="1661903556894" duration="23425000" /> | |||||
<workItem from="1661956938136" duration="695000" /> | |||||
<workItem from="1661989919031" duration="25723000" /> | |||||
<workItem from="1662039810210" duration="419000" /> | |||||
<workItem from="1662076586600" duration="25491000" /> | |||||
<workItem from="1662335184832" duration="5150000" /> | |||||
<workItem from="1662348891112" duration="7581000" /> | |||||
<workItem from="1662421409878" duration="15047000" /> | |||||
<workItem from="1663472604061" duration="19071000" /> | |||||
<workItem from="1663515200540" duration="648000" /> | |||||
<workItem from="1663545195142" duration="3595000" /> | |||||
<workItem from="1663631838059" duration="7995000" /> | |||||
<workItem from="1664253194092" duration="121000" /> | |||||
<workItem from="1664253821529" duration="612000" /> | |||||
<workItem from="1664268388633" duration="867000" /> | |||||
<workItem from="1667181028061" duration="209000" /> | |||||
<workItem from="1667433593544" duration="3987000" /> | |||||
<workItem from="1667458662892" duration="6075000" /> | |||||
<workItem from="1667465195109" duration="2193000" /> | |||||
<workItem from="1667520341261" duration="742000" /> | |||||
<workItem from="1667524924941" duration="63471000" /> | |||||
</task> | |||||
<servers /> | |||||
</component> | |||||
<component name="TypeScriptGeneratedFilesManager"> | |||||
<option name="version" value="3" /> | |||||
</component> | |||||
<component name="Vcs.Log.Tabs.Properties"> | |||||
<option name="TAB_STATES"> | |||||
<map> | |||||
<entry key="MAIN"> | |||||
<value> | |||||
<State /> | |||||
</value> | |||||
</entry> | |||||
</map> | |||||
</option> | |||||
<option name="oldMeFiltersMigrated" value="true" /> | |||||
</component> | |||||
<component name="XSLT-Support.FileAssociations.UIState"> | |||||
<expand /> | |||||
<select /> | |||||
</component> | |||||
<component name="com.intellij.coverage.CoverageDataManagerImpl"> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils__1_.coverage" NAME="KafkaUtils (1) Coverage Results" MODIFIED="1663464961001" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$mysqltest.coverage" NAME="mysqltest Coverage Results" MODIFIED="1660868712851" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc.coverage" NAME="asnyc Coverage Results" MODIFIED="1663459033435" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$minio.coverage" NAME="minio 覆盖结果" MODIFIED="1667465702864" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/minio1" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$.coverage" NAME="视频添加图片水印 Coverage Results" MODIFIED="1661873949526" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$1.coverage" NAME="视频添加文字水印1 Coverage Results" MODIFIED="1663381948693" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$asnyc__1_.coverage" NAME="asnyc (1) Coverage Results" MODIFIED="1663458917599" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$producer_start.coverage" NAME="producer_start Coverage Results" MODIFIED="1663466832843" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$dsp_master.coverage" NAME="dsp_master Coverage Results" MODIFIED="1663403978477" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$5.coverage" NAME="视频添加图片水印5 Coverage Results" MODIFIED="1661905982885" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$KafkaUtils.coverage" NAME="KafkaUtils Coverage Results" MODIFIED="1663465345491" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$read.coverage" NAME="read Coverage Results" MODIFIED="1663640070956" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$aa1.coverage" NAME="aa1 覆盖结果" MODIFIED="1667351136888" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/ffmpeg11" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$ffmpeg22.coverage" NAME="aa 覆盖结果" MODIFIED="1667350492259" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/opt/tuo_heng" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_fk$fk_master.coverage" NAME="fk_master 覆盖结果" MODIFIED="1667811327080" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="/home/chenyukun/fk" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$ImageUtils.coverage" NAME="ImageUtils Coverage Results" MODIFIED="1663499421253" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$2.coverage" NAME="视频添加图片水印2 Coverage Results" MODIFIED="1661875306428" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_fk$producer_start.coverage" NAME="producer_start 覆盖结果" MODIFIED="1667811329568" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/kafka" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$4.coverage" NAME="视频添加图片水印4 Coverage Results" MODIFIED="1661874731395" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_fk$image.coverage" NAME="image 覆盖结果" MODIFIED="1667547816366" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test/协程" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$TimeUtils.coverage" NAME="TimeUtils Coverage Results" MODIFIED="1661222768678" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/util" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_fk$Dispatcher.coverage" NAME="Dispatcher 覆盖结果" MODIFIED="1667810228478" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/service" /> | |||||
<SUITE FILE_PATH="coverage/tuoheng_alg$3.coverage" NAME="视频添加文字水印3 Coverage Results" MODIFIED="1661906152928" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/test" /> | |||||
</component> | |||||
</project> |
# 配置文件名称 | |||||
APPLICATION_CONFIG="fk_application.yml" | |||||
# 编码格式 | |||||
UTF_8="utf-8" | |||||
# 文件读模式 | |||||
R='r' | |||||
# 进度100% | |||||
success_progess="1.0000" |
from threading import Thread | |||||
from loguru import logger | |||||
class Common(Thread): | |||||
def __init__(self, func, args=()): | |||||
super(Common, self).__init__() | |||||
self.func = func | |||||
self.args = args | |||||
self.result = None | |||||
def get_result(self): | |||||
self.join(60 * 60 * 3) | |||||
return self.result | |||||
def run(self): | |||||
logger.info("开始执行线程!") | |||||
self.result = self.func(self.args) | |||||
logger.info("线程停止完成!") |
# -*- coding: utf-8 -*- | |||||
import time | |||||
from threading import Thread | |||||
from loguru import logger | |||||
from util import KafkaUtils | |||||
''' | |||||
问题反馈线程 | |||||
''' | |||||
class FeedbackThread(Thread): | |||||
def __init__(self, fbQueue, content): | |||||
super().__init__() | |||||
self.fbQueue = fbQueue | |||||
self.content = content | |||||
def getFeedback(self): | |||||
return self.fbQueue.get() | |||||
def run(self): | |||||
logger.info("启动问题反馈线程") | |||||
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.content) | |||||
while True: | |||||
logger.info("问题反馈发送消息循环") | |||||
fb = {} | |||||
try: | |||||
fb = self.getFeedback() | |||||
if fb is not None and len(fb) > 0: | |||||
kafkaProducer.sender(self.content["kafka"]["topic"]["fk-alg-results-topic"], | |||||
fb["msgId"], fb, 1) | |||||
else: | |||||
time.sleep(1) | |||||
except Exception as e: | |||||
logger.exception("问题反馈异常:{}, msgId:{}", e, fb.get("msgId")) | |||||
logger.info("问题反馈进程执行完成") |
def message_feedback(msgId, erroCode="", erroMsg="", registerRecognitionMsg=[]): | |||||
return {"msgId": msgId, "erroCode": erroCode, "erroMsg": erroMsg, "registerRecognitionMsg": registerRecognitionMsg} | |||||
def message_register(registerId="", carUrl="", carCode="", flowManRecognitionlList=[]): | |||||
return { | |||||
"registerId": registerId, | |||||
"carUrl": carUrl, | |||||
"carCode": carCode, | |||||
"flowManRecognitionlList": flowManRecognitionlList | |||||
} |
from enum import Enum, unique | |||||
# 异常枚举 | |||||
@unique | |||||
class ExceptionType(Enum): | |||||
SUCCESS = ("fk000", "成功!") | |||||
ILLEGAL_PARAMETER_FORMAT = ("fk001", "非法参数格式!") | |||||
UNIVERSAL_TEXT_RECOGNITION_FAILED = ("fk002", "通用文字识别异常!") | |||||
ABNORMAL_LICENSE_PLATE_RECOGNITION = ("fk003", "车牌识别异常!") | |||||
AI_RECOGNITION_FAILED = ("fk004", "AI识别异常!") | |||||
SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常, 请联系工程师定位处理!") |
# -*- coding: utf-8 -*- | |||||
from loguru import logger | |||||
""" | |||||
自定义异常 | |||||
""" | |||||
class ServiceException(Exception): # 继承异常类 | |||||
def __init__(self, code, msg): | |||||
self.code = code | |||||
self.msg = msg | |||||
def __str__(self): | |||||
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg) | |||||
dsp: | |||||
active: dev | |||||
kafka: | |||||
topic: | |||||
fk-alg-tasks-topic: fk-alg-tasks | |||||
fk-alg-results-topic: fk-alg-results | |||||
local: | |||||
bootstrap_servers: ['192.168.10.11:9092'] | |||||
producer: | |||||
acks: -1 | |||||
retries: 3 | |||||
linger_ms: 50 | |||||
retry_backoff_ms: 1000 | |||||
max_in_flight_requests_per_connection: 5 | |||||
consumer: | |||||
client_id: fk_ai_server | |||||
group_id: fk-ai-local | |||||
auto_offset_reset: latest | |||||
enable_auto_commit: False | |||||
max_poll_records: 1 | |||||
dev: | |||||
bootstrap_servers: ['192.168.11.13:9092'] | |||||
producer: | |||||
acks: -1 | |||||
retries: 3 | |||||
linger_ms: 50 | |||||
retry_backoff_ms: 1000 | |||||
max_in_flight_requests_per_connection: 5 | |||||
consumer: | |||||
client_id: fk_ai_server | |||||
group_id: fk-ai-dev | |||||
auto_offset_reset: latest | |||||
enable_auto_commit: False | |||||
max_poll_records: 1 | |||||
test: | |||||
bootstrap_servers: ['192.168.11.242:9092'] | |||||
producer: | |||||
acks: -1 | |||||
retries: 3 | |||||
linger_ms: 50 | |||||
retry_backoff_ms: 1000 | |||||
max_in_flight_requests_per_connection: 5 | |||||
consumer: | |||||
client_id: fk_ai_server | |||||
group_id: fk-ai-test | |||||
auto_offset_reset: latest | |||||
enable_auto_commit: False | |||||
max_poll_records: 1 | |||||
prod: | |||||
bootstrap_servers: ['101.132.127.1:19092'] | |||||
producer: | |||||
acks: -1 | |||||
retries: 3 | |||||
linger_ms: 50 | |||||
retry_backoff_ms: 1000 | |||||
max_in_flight_requests_per_connection: 5 | |||||
consumer: | |||||
client_id: fk_ai_server | |||||
group_id: fk-ai-prod | |||||
auto_offset_reset: latest | |||||
enable_auto_commit: False | |||||
max_poll_records: 1 | |||||
gpu: | |||||
# 'first'- 按升序排列可用的 GPU 设备 ID(默认) | |||||
# 'last'- 按 id 降序排列可用的 GPU 设备 id | |||||
# 'random'- 随机订购可用的 GPU 设备 ID | |||||
# 'load'- 按负载递增排序可用的 GPU 设备 ID | |||||
# 'memory'- 通过升序内存使用来排序可用的 GPU 设备 ID | |||||
order: 'memory' | |||||
# 获取可用gpu数量 | |||||
limit: 1 | |||||
# 最大负载 | |||||
maxLoad: 0.8 | |||||
# 最大内存 | |||||
maxMemory: 0.8 | |||||
includeNan: False | |||||
excludeID: [] | |||||
excludeUUID: [] | |||||
# 日志设置 | |||||
log: | |||||
# 是否开启文件输出 True:开启 False:关闭 | |||||
enable_file_log: False | |||||
# 是否开启控制台日志输出 True:开启 False:关闭 | |||||
enable_stderr: True | |||||
# 日志打印文件夹 | |||||
base_path: /home/DATA/fk/logs/ | |||||
# 日志文件名称 | |||||
log_name: fk.log | |||||
# 日志打印格式 | |||||
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}" | |||||
# 日志隔离级别 | |||||
level: INFO | |||||
# 日志每天0点创建新文件 | |||||
rotation: 00:00 | |||||
# 日志保存时间15天 | |||||
retention: 15 days | |||||
# 线程安全 | |||||
enqueue: True | |||||
# 编码格式 | |||||
encoding: utf8 | |||||
baiduocr: | |||||
APP_ID: 28173504 | |||||
API_KEY: kqrFE7VuygIaFer7z6cRxzoi | |||||
SECRET_KEY: yp7xBokyl4TItyGhay7skAN1cMwfvEXf | |||||
clientId: 4MHwsmjWR1E3POLPQlVxtWK3 | |||||
clientSecret: 86i865EgBOTG5OtWssAo1zOUc6A7DZAj | |||||
path: https://aip.baidubce.com/rest/2.0/ocr/v1/license_plate | |||||
#mysql: | |||||
# # 数据库信息 | |||||
# dev: | |||||
# host: 192.168.11.13 | |||||
# port: 3306 | |||||
# dbname: tuheng_dsp | |||||
# username: root | |||||
# password: idontcare | |||||
# test: | |||||
# host: 192.168.11.242 | |||||
# port: 3306 | |||||
# dbname: tuheng_dsp | |||||
# username: root | |||||
# password: idontcare | |||||
# prod: | |||||
# host: 172.16.1.22 | |||||
# port: 3306 | |||||
# dbname: tuheng_dsp | |||||
# username: root | |||||
# password: TH22#2022 | |||||
# db_charset: utf8 | |||||
# # mincached : 启动时开启的闲置连接数量(缺省值 0 开始时不创建连接) | |||||
# db_min_cached: 0 | |||||
# # maxcached : 连接池中允许的闲置的最多连接数量(缺省值 0 代表不闲置连接池大小) | |||||
# db_max_cached: 10 | |||||
# # maxshared : 共享连接数允许的最大数量(缺省值 0 代表所有连接都是专用的)如果达到了最大数量,被请求为共享的连接将会被共享使用 | |||||
# db_max_shared: 10 | |||||
# # maxconnecyions : 创建连接池的最大数量(缺省值 0 代表不限制) | |||||
# db_max_connecyions: 20 | |||||
# # maxusage : 单个连接的最大允许复用次数(缺省值 0 或 False 代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开) | |||||
# db_blocking: True | |||||
# # maxusage : 单个连接的最大允许复用次数(缺省值 0 或 False 代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开) | |||||
# db_max_usage: 0 | |||||
# # setsession : 一个可选的SQL命令列表用于准备每个会话,如["set datestyle to german", ...] | |||||
# db_set_session: None |
# -*- coding: utf-8 -*- | |||||
from service import Dispatcher | |||||
# import torch | |||||
''' | |||||
fk主程序入口 | |||||
''' | |||||
if __name__ == '__main__': | |||||
print("(♥◠‿◠)ノ゙ FK【算法调度服务】开始启动 ლ(´ڡ`ლ)゙") | |||||
# torch.multiprocessing.set_start_method('spawn') | |||||
Dispatcher.DispatcherService().start_service() |
# -*- coding: utf-8 -*- | |||||
import time | |||||
from concurrent.futures import ThreadPoolExecutor, as_completed | |||||
import GPUtil | |||||
from concurrency.FeedbackThread import FeedbackThread | |||||
from entity.FeedBack import message_feedback, message_register | |||||
from enums.ExceptionEnum import ExceptionType | |||||
from exception.CustomerException import ServiceException | |||||
from util import YmlUtils, LogUtils, KafkaUtils | |||||
from loguru import logger | |||||
from multiprocessing import Queue | |||||
from util import GPUtils | |||||
from util.ImageUtils import url2Array | |||||
from util.ModelUtils import FKModel | |||||
from util.OcrBaiduSdk import OcrBaiduSdk | |||||
''' | |||||
分发服务 | |||||
''' | |||||
def distinguish(flowMan, fkmodel, gpuId, orc, msgId): | |||||
registerId = flowMan.get("registerId") | |||||
carUrl = flowMan.get("carUrl") | |||||
flowManUrlList = flowMan.get("flowManUrlList") | |||||
args_list = [] | |||||
args_list.append(('plate', carUrl, gpuId, fkmodel, orc, msgId)) | |||||
for flowManUrl in flowManUrlList: | |||||
args_list.append(('code', flowManUrl, gpuId, fkmodel, orc, msgId)) | |||||
register = message_register(registerId=registerId, carUrl=carUrl, carCode="", flowManRecognitionlList=[]) | |||||
with ThreadPoolExecutor(max_workers=3) as t: | |||||
for result in t.map(ai_segmentation_recognition, args_list): | |||||
if result is not None: | |||||
if result.get("type") == '2': | |||||
register["carCode"] = result.get("carCode") | |||||
register["carCodeScore"] = result.get("score") | |||||
else: | |||||
register["flowManRecognitionlList"].append(result) | |||||
else: | |||||
return None | |||||
return register | |||||
# 2.调用百度云识别 | |||||
def ai_segmentation_recognition(param): | |||||
try: | |||||
image = url2Array(param[1]) | |||||
if param[0] == 'plate': | |||||
dataBack = param[3].process(image, param[2], param[0]) | |||||
logger.info("算法分割结果: {}", dataBack) | |||||
if dataBack is None or dataBack.get("type") is None: | |||||
raise ServiceException(ExceptionType.AI_RECOGNITION_FAILED.value[0], | |||||
ExceptionType.AI_RECOGNITION_FAILED.value[1]) | |||||
# ('plate', carUrl, gpuId, fkmodel, orc, msgId) | |||||
# {'type': 2, 'plateImage': "图片", '0.939557671546936', 'color': 'green'} | |||||
if dataBack.get("plateImage") is None or len(dataBack.get("plateImage")) == 0: | |||||
carCode = '' | |||||
score = '' | |||||
else: | |||||
result = param[4].license_plate_recognition(dataBack.get("plateImage")[0], param[5]) | |||||
score = dataBack.get("plateImage")[1] | |||||
if result is None or result.get("words_result") is None: | |||||
logger.error("车牌识别为空: {}", result) | |||||
carCode = '' | |||||
else: | |||||
carCode = result.get("words_result").get("number") | |||||
return {'type': str(dataBack.get("type")), 'carUrl': param[1], 'carCode': carCode, 'score': score} | |||||
if param[0] == 'code': | |||||
dataBack = param[3].process(image, param[2], param[0]) | |||||
logger.info("算法分割结果: {}", dataBack) | |||||
if dataBack is None or dataBack.get("type") is None: | |||||
raise ServiceException(ExceptionType.AI_RECOGNITION_FAILED.value[0], | |||||
ExceptionType.AI_RECOGNITION_FAILED.value[1]) | |||||
# 行程码 | |||||
if dataBack.get("type") == 0: | |||||
# 手机号 | |||||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0: | |||||
phoneNumberRecognition = '' | |||||
phone_score = '' | |||||
else: | |||||
phone = param[4].universal_text_recognition(dataBack.get("phoneNumberImage")[0], param[5]) | |||||
phone_score = dataBack.get("phoneNumberImage")[1] | |||||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0: | |||||
logger.error("手机号识别为空: {}", phone) | |||||
phoneNumberRecognition = '' | |||||
else: | |||||
phoneNumberRecognition = phone.get("words_result") | |||||
if dataBack.get("cityImage") is None or len(dataBack.get("cityImage")) == 0: | |||||
cityRecognition = '' | |||||
city_score = '' | |||||
else: | |||||
city = param[4].universal_text_recognition(dataBack.get("cityImage")[0], param[5]) | |||||
city_score = dataBack.get("cityImage")[1] | |||||
if city is None or city.get("words_result") is None or len(phone.get("words_result")) == 0: | |||||
logger.error("城市识别为空: {}", city) | |||||
cityRecognition = '' | |||||
else: | |||||
cityRecognition = city.get("words_result") | |||||
return {'type': str(dataBack.get("type")), | |||||
'imageUrl': param[1], | |||||
'phoneNumberRecognition': phoneNumberRecognition, | |||||
'phone_sorce': phone_score, | |||||
'cityRecognition': cityRecognition, | |||||
'city_score': city_score} | |||||
elif dataBack.get("type") == 1: | |||||
if dataBack.get("nameImage") is None or len(dataBack.get("nameImage")) == 0: | |||||
nameRecognition = '' | |||||
name_score = '' | |||||
else: | |||||
name = param[4].universal_text_recognition(dataBack.get("nameImage")[0], param[5]) | |||||
name_score = dataBack.get("nameImage")[1] | |||||
if name is None or name.get("words_result") is None or len(name.get("words_result")) == 0: | |||||
logger.error("名字识别为空: {}", name) | |||||
nameRecognition = '' | |||||
else: | |||||
nameRecognition = name.get("words_result") | |||||
if dataBack.get("phoneNumberImage") is None or len(dataBack.get("phoneNumberImage")) == 0: | |||||
phoneNumberRecognition = '' | |||||
phone_score = '' | |||||
else: | |||||
phone = param[4].universal_text_recognition(dataBack.get("phoneNumberImage")[0], param[5]) | |||||
phone_score = dataBack.get("phoneNumberImage")[1] | |||||
if phone is None or phone.get("words_result") is None or len(phone.get("words_result")) == 0: | |||||
logger.error("手机号识别为空: {}", phone) | |||||
phoneNumberRecognition = '' | |||||
else: | |||||
phoneNumberRecognition = phone.get("words_result") | |||||
if dataBack.get("hsImage") is None or len(dataBack.get("hsImage")) == 0: | |||||
hsRecognition = '' | |||||
hs_score = '' | |||||
else: | |||||
hs = param[4].universal_text_recognition(dataBack.get("hsImage")[0], param[5]) | |||||
hs_score = dataBack.get("hsImage")[1] | |||||
if hs is None or hs.get("words_result") is None or len(hs.get("words_result")) == 0: | |||||
logger.error("核酸识别为空: {}", hs) | |||||
hsRecognition = '' | |||||
else: | |||||
hsRecognition = hs.get("words_result") | |||||
return {'type': str(dataBack.get("type")), | |||||
'imageUrl': param[1], | |||||
'color': dataBack.get("color"), | |||||
'nameRecognition': nameRecognition, | |||||
'name_score': name_score, | |||||
'phoneNumberRecognition': phoneNumberRecognition, | |||||
'phone_score': phone_score, | |||||
'hsRecognition': hsRecognition, | |||||
'hs_score': hs_score} | |||||
else: | |||||
raise ServiceException(ExceptionType.AI_RECOGNITION_FAILED.value[0], | |||||
ExceptionType.AI_RECOGNITION_FAILED.value[1]) | |||||
except ServiceException as s: | |||||
logger.exception("AI划图,百度云识别失败:{}, msgId: {}", s.msg, param[5]) | |||||
return None | |||||
except Exception as e: | |||||
logger.exception("AI划图,百度云识别失败: {}, msgId:{}", e, param[5]) | |||||
return None | |||||
class DispatcherService(): | |||||
# 初始化 | |||||
def __init__(self): | |||||
# 获取DSP环境所需要的配置 | |||||
self.content = YmlUtils.getConfigs() | |||||
# 初始化日志 | |||||
LogUtils.init_log(self.content) | |||||
# # 记录当前正在执行的分析任务 | |||||
# self.tasks = {} | |||||
self.fbQueue = Queue() | |||||
# 服务调用启动方法 | |||||
def start_service(self): | |||||
# 启动问题反馈线程 | |||||
self.start_feedback_thread() | |||||
# 初始化kafka监听者 | |||||
topics = self.content["kafka"]["topic"]["fk-alg-tasks-topic"] | |||||
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.content, topics=topics) | |||||
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙") | |||||
orc = OcrBaiduSdk(self.content) | |||||
# 初始化模型 | |||||
fkmodel = FKModel() | |||||
while True: | |||||
# self.check_task() | |||||
# 获取当前可用GPU | |||||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||||
if gpu_ids is not None and len(gpu_ids) > 0: | |||||
msg = customerKafkaConsumer.poll() | |||||
if msg is None or len(msg) == 0: | |||||
time.sleep(2) | |||||
else: | |||||
for k, v in msg.items(): | |||||
for m in v: | |||||
message = m.value | |||||
try: | |||||
gpu_ids = self.checkGPU(message.get("msgId")) | |||||
customerKafkaConsumer.commit_offset(m) | |||||
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, msgId:{}", | |||||
m.topic, m.offset, m.partition, message, message.get("msgId")) | |||||
################## 消息驱动分析执行 ################## | |||||
check_result = self.check_fk_msg(message) | |||||
if not check_result: | |||||
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0], | |||||
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1]) | |||||
msgId = message.get("msgId") | |||||
registerMsg = message.get("registerMsg") | |||||
obj_list = [] | |||||
response = message_feedback(msgId, | |||||
erroCode=ExceptionType.SUCCESS.value[0], | |||||
erroMsg=ExceptionType.SUCCESS.value[1], | |||||
registerRecognitionMsg=[]) | |||||
with ThreadPoolExecutor(max_workers=3) as t: | |||||
for flowMan in registerMsg: | |||||
obj = t.submit(distinguish, flowMan, fkmodel, gpu_ids[0], orc, msgId) | |||||
obj_list.append(obj) | |||||
for future in as_completed(obj_list): | |||||
data = future.result() | |||||
if data is None: | |||||
return message_feedback(msgId, | |||||
erroCode=ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||||
erroMsg=ExceptionType.SERVICE_INNER_EXCEPTION.value[1], | |||||
registerRecognitionMsg=[]) | |||||
else: | |||||
response["registerRecognitionMsg"].append(data) | |||||
self.fbQueue.put(response) | |||||
except ServiceException as s: | |||||
logger.exception("消息监听异常:{}, msgId: {}", s.msg, message.get("msgId")) | |||||
self.fbQueue.put(message_feedback(message.get("msgId"), s.code, s.msg)) | |||||
except Exception as e: | |||||
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id")) | |||||
self.fbQueue.put(message_feedback(message.get("msgId"), | |||||
ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||||
ExceptionType.SERVICE_INNER_EXCEPTION.value[ | |||||
1])) | |||||
else: | |||||
logger.info("当前可用gpu数量: {}", gpu_ids) | |||||
GPUtil.showUtilization() | |||||
def checkGPU(self, msgId): | |||||
gpu_ids = None | |||||
while True: | |||||
GPUtil.showUtilization() | |||||
gpu_ids = GPUtils.get_gpu_ids(self.content) | |||||
if gpu_ids is None or len(gpu_ids) == 0: | |||||
logger.warning("暂无可用GPU资源,5秒后重试, 可用gpu数: {}, msgId: {}", len(gpu_ids), msgId) | |||||
time.sleep(5) | |||||
continue | |||||
else: | |||||
break | |||||
return gpu_ids | |||||
def start_feedback_thread(self): | |||||
feedbackThread = FeedbackThread(self.fbQueue, self.content) | |||||
feedbackThread.start() | |||||
# def check_task(self): | |||||
# for msgId in list(self.tasks.keys()): | |||||
# if not self.tasks[msgId].is_alive(): | |||||
# del self.tasks[msgId] | |||||
# 校验kafka消息 | |||||
def check_fk_msg(self, msg): | |||||
msgId = msg.get("msgId") | |||||
registerMsg = msg.get("registerMsg") | |||||
if msgId is None: | |||||
return False | |||||
if registerMsg is None: | |||||
return False | |||||
return True |
# import sys | |||||
# sys.path.extend(["..", "../util"]) | |||||
# from util.AliyunSdk import AliyunVodSdk | |||||
# from concurrency.CommonThread import Common | |||||
from kafka import KafkaProducer | |||||
import json | |||||
import threading | |||||
# topicName = 'dsp-alg-online-tasks' | |||||
# eBody = { | |||||
# "request_id": "d4c909912ac741ce81ccef03fd1b2ec45", | |||||
# "models": [ | |||||
# { | |||||
# "code": "001", | |||||
# "categories": [{ | |||||
# "id": "0", | |||||
# "config": {} | |||||
# }, | |||||
# { | |||||
# "id": "1", | |||||
# "config": {} | |||||
# }, | |||||
# { | |||||
# "id": "2", | |||||
# "config": {} | |||||
# }, | |||||
# { | |||||
# "id": "3", | |||||
# "config": {} | |||||
# }] | |||||
# }], | |||||
# "command": "start", | |||||
# "pull_url": "rtmp://live.play.t-aaron.com/live/THSAj_hd", | |||||
# "push_url": "rtmp://live.push.t-aaron.com/live/THSAk", | |||||
# "results_base_dir": "P20220802133841159" | |||||
# } | |||||
# def on_send_success(record_metadata,aaa, ad): | |||||
# print("kafka异步发送信息成功,topic:{}|partition:{}|offset:{}", record_metadata, aaa) | |||||
# | |||||
# def on_send_error(excp): | |||||
# print(excp) | |||||
# producer = KafkaProducer(bootstrap_servers=['101.132.127.1:19092'], | |||||
# value_serializer=lambda m: json.dumps(m).encode('utf-8')) | |||||
# future = producer.send(topicName, key=b'd4c909912ac741ce81ccef03fd1b2ec45', value=eBody).add_callback( | |||||
# on_send_success, "sdasd", "1111").add_errback(on_send_error) | |||||
# result = future.get(timeout=10) | |||||
# print(result) | |||||
topicName = 'fk-alg-tasks' | |||||
aa = { | |||||
"msgId": "1312313213", | |||||
"registerMsg": [ | |||||
{ | |||||
"registerId": '12345', | |||||
"carUrl": "https://image.t-aaron.com/imagedir/w94owdobfm_1667283710143.png", | |||||
"flowManUrlList": [ | |||||
"https://image.t-aaron.com/imagedir/kv1mx82nje_1667373524073.png", | |||||
# "https://image.t-aaron.com/imagedir/qbvx3c5ym2f_1667368182561.png" | |||||
] | |||||
} | |||||
] | |||||
} | |||||
producer = KafkaProducer(bootstrap_servers=['192.168.11.13:9092'], | |||||
value_serializer=lambda m: json.dumps(m).encode('utf-8')) | |||||
future = producer.send(topicName, key=b'1312313213', value=aa) | |||||
result = future.get(timeout=10) | |||||
print(result) |
import asyncio | |||||
import time | |||||
async def task(): | |||||
print(f"{time.strftime('%H:%M:%S')} task 开始 ") | |||||
time.sleep(2) | |||||
print(f"{time.strftime('%H:%M:%S')} task 结束") | |||||
coroutine = task() | |||||
print(f"{time.strftime('%H:%M:%S')} 产生协程对象 {coroutine},函数并未被调用") | |||||
loop = asyncio.get_event_loop() | |||||
print(f"{time.strftime('%H:%M:%S')} 开始调用协程任务") | |||||
start = time.time() | |||||
loop.run_until_complete(coroutine) | |||||
end = time.time() | |||||
print(f"{time.strftime('%H:%M:%S')} 结束调用协程任务, 耗时{end - start} 秒") | |||||
import asyncio | |||||
import time | |||||
async def _task(): | |||||
print(f"{time.strftime('%H:%M:%S')} task 开始 ") | |||||
time.sleep(2) | |||||
print(f"{time.strftime('%H:%M:%S')} task 结束") | |||||
return "运行结束" | |||||
def callback(task): | |||||
print(f"{time.strftime('%H:%M:%S')} 回调函数开始运行") | |||||
print(f"状态:{task.result()}") | |||||
coroutine = _task() | |||||
print(f"{time.strftime('%H:%M:%S')} 产生协程对象 {coroutine},函数并未被调用") | |||||
task = asyncio.ensure_future(coroutine) # 返回task对象 | |||||
task.add_done_callback(callback) # 为task增加一个回调任务 | |||||
loop = asyncio.get_event_loop() | |||||
print(f"{time.strftime('%H:%M:%S')} 开始调用协程任务") | |||||
start = time.time() | |||||
loop.run_until_complete(task) | |||||
end = time.time() | |||||
print(f"{time.strftime('%H:%M:%S')} 结束调用协程任务, 耗时{end - start} 秒") | |||||
import time | |||||
from io import BytesIO | |||||
import numpy as np | |||||
import cv2 | |||||
# from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img | |||||
import requests | |||||
from PIL import Image | |||||
import skimage.io as io | |||||
import matplotlib.pyplot as plt | |||||
import matplotlib.image as mpig | |||||
from requests.packages import urllib3 | |||||
''' | |||||
方式: 返回类型 | |||||
OpenCV np.ndarray | |||||
PIL PIL.JpegImagePlugin.JpegImageFile | |||||
keras.preprocessing.image PIL.JpegImagePlugin.JpegImageFile | |||||
Skimage.io np.ndarray | |||||
matplotlib.pyplot np.ndarray | |||||
matplotlib.image np.ndarray | |||||
''' | |||||
imagePath="https://gimg2.baidu.com/image_search/src=http%3A%2F%2Flmg.jj20.com%2Fup%2Fallimg%2F1114%2F121420113514%2F201214113514-6-1200.jpg&refer=http%3A%2F%2Flmg.jj20.com&app=2002&size=f9999,10000&q=a80&n=0&g=0n&fmt=auto?sec=1670138586&t=bcf258851b92c61ca3ee2de96dd3df45" | |||||
''' | |||||
方式一:使用OpenCV | |||||
''' | |||||
# img1=cv2.imread(imagePath) | |||||
# print("img1:",img1.shape) | |||||
# print("img1:",type(img1)) | |||||
# print("-"*10) | |||||
''' | |||||
方式二:使用PIL | |||||
''' | |||||
# img2=Image.open(imagePath) | |||||
# print("img2:",img2) | |||||
# print("img2:",type(img2)) | |||||
# #转换成np.ndarray格式 | |||||
# img2=np.array(img2) | |||||
# print("img2:",img2.shape) | |||||
# print("img2:",type(img2)) | |||||
# print("-"*10) | |||||
# | |||||
# | |||||
# ''' | |||||
# 方式三:使用keras.preprocessing.image | |||||
# ''' | |||||
# img3=load_img(imagePath) | |||||
# print("img3:",img3) | |||||
# print("img3:",type(img3)) | |||||
# #转换成np.ndarray格式,使用np.array(),或者使用keras里的img_to_array() | |||||
# #使用np.array() | |||||
# #img3=np.array(img2) | |||||
# #使用keras里的img_to_array() | |||||
# img3=img_to_array(img3) | |||||
# print("img3:",img3.shape) | |||||
# print("img3:",type(img3)) | |||||
# print("-"*10) | |||||
# | |||||
# | |||||
''' | |||||
方式四:使用Skimage.io | |||||
''' | |||||
start = time.time() | |||||
# img4=io.imread(imagePath) | |||||
origin_file = urllib3.urlopen(imagePath).read() | |||||
# img = Image.open(origin_file) | |||||
print(time.time() - start) | |||||
# | |||||
# | |||||
# ''' | |||||
# 方式五:使用matplotlib.pyplot | |||||
# ''' | |||||
# img5=plt.imread(imagePath) | |||||
# print("img5:",img5.shape) | |||||
# print("img5:",type(img5)) | |||||
# print("-"*10) | |||||
# | |||||
# | |||||
# ''' | |||||
# 方式六:使用matplotlib.image | |||||
# ''' | |||||
# img6=mpig.imread(imagePath) | |||||
# print("img6:",img6.shape) | |||||
# print("img6:",type(img6)) | |||||
# print("-"*10) | |||||
start = time.time() | |||||
response = requests.get(imagePath) | |||||
image = Image.open(BytesIO(response.content)) | |||||
img2=np.array(image) | |||||
print(time.time() - start, type(image)) |
<?xml version="1.0" encoding="UTF-8"?> | |||||
<module type="PYTHON_MODULE" version="4"> | |||||
<component name="NewModuleRootManager" inherit-compiler-output="true"> | |||||
<exclude-output /> | |||||
<content url="file://$MODULE_DIR$" /> | |||||
<orderEntry type="inheritedJdk" /> | |||||
<orderEntry type="sourceFolder" forTests="false" /> | |||||
</component> | |||||
</module> |
import oss2 | |||||
import time | |||||
from voduploadsdk.AliyunVodUtils import * | |||||
from exception.CustomerException import ServiceException | |||||
from enums.ExceptionEnum import ExceptionType | |||||
import json | |||||
from aliyunsdkcore.client import AcsClient | |||||
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest | |||||
from voduploadsdk.AliyunVodUtils import * | |||||
from voduploadsdk.AliyunVodUploader import AliyunVodUploader | |||||
from voduploadsdk.UploadVideoRequest import UploadVideoRequest | |||||
class AliyunOssSdk(): | |||||
def __init__(self, content, logger, requestId): | |||||
self.content = content | |||||
self.bucket = None | |||||
self.logger = logger | |||||
self.requestId = requestId | |||||
def get_oss_bucket(self): | |||||
if self.bucket is None: | |||||
self.logger.info("初始化oss桶, requestId:{}", self.requestId) | |||||
auth = oss2.Auth(self.content["aliyun"]["access_key"], self.content["aliyun"]["access_secret"]) | |||||
self.bucket = oss2.Bucket(auth, self.content["aliyun"]["oss"]["endpoint"], | |||||
self.content["aliyun"]["oss"]["bucket"], | |||||
connect_timeout=self.content["aliyun"]["oss"]["connect_timeout"]) | |||||
def upload_file(self, updatePath, fileByte): | |||||
self.logger.info("开始上传文件到oss, requestId:{}", self.requestId) | |||||
self.get_oss_bucket() | |||||
MAX_RETRIES = 3 | |||||
retry_count = 0 | |||||
while True: | |||||
try: | |||||
self.bucket.put_object(updatePath, fileByte) | |||||
self.logger.info("上传文件到oss成功! requestId:{}", self.requestId) | |||||
break | |||||
except Exception as e: | |||||
retry_count += 1 | |||||
time.sleep(1) | |||||
self.logger.info("上传文件到oss失败, 重试次数:{}, requestId:{}", retry_count, self.requestId) | |||||
if retry_count > MAX_RETRIES: | |||||
self.logger.exception("上传文件到oss重试失败:{}, requestId:{}", e, self.requestId) | |||||
raise e | |||||
class ThAliyunVodSdk(): | |||||
def __init__(self, content, logger, requestId): | |||||
self.content = content | |||||
self.logger = logger | |||||
self.requestId = requestId | |||||
def init_vod_client(self, accessKeyId, accessKeySecret): | |||||
regionId = self.content["aliyun"]["vod"]["ecsRegionId"] | |||||
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=5) | |||||
def get_play_info(self, videoId): | |||||
self.logger.info("开始获取视频地址,videoId:{}, requestId:{}", videoId, self.requestId) | |||||
start = time.time() | |||||
while True: | |||||
try: | |||||
clt = self.init_vod_client(self.content["aliyun"]["access_key"], | |||||
self.content["aliyun"]["access_secret"]) | |||||
request = GetPlayInfoRequest.GetPlayInfoRequest() | |||||
request.set_accept_format('JSON') | |||||
request.set_VideoId(videoId) | |||||
request.set_AuthTimeout(3600 * 5) | |||||
response = json.loads(clt.do_action_with_exception(request)) | |||||
play_url = response["PlayInfoList"]["PlayInfo"][0]["PlayURL"] | |||||
self.logger.info("获取视频地址成功,视频地址: {}, requestId: {}", play_url, self.requestId) | |||||
return play_url | |||||
except Exception as e: | |||||
self.logger.error("获取视频地址失败,5秒后重试, requestId: {}", self.requestId) | |||||
time.sleep(5) | |||||
current_time = time.time() | |||||
if "HTTP Status: 403" not in str(e): | |||||
self.logger.exception("获取视频地址失败: {}, requestId: {}", e, self.requestId) | |||||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0], | |||||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1]) | |||||
if "HTTP Status: 403" in str(e) and ("UploadFail" in str(e) or "TranscodeFail" in str(e)): | |||||
self.logger.exception("获取视频地址失败: {}, requestId: {}", e, self.requestId) | |||||
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0], | |||||
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1]) | |||||
diff_time = current_time - start | |||||
if diff_time > 60 * 60 * 2: | |||||
self.logger.exception("获取视频地址失败超时异常: {},超时时间:{}, requestId: {}", e, diff_time, self.requestId) | |||||
raise ServiceException(ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[0], | |||||
ExceptionType.GET_VIDEO_URL_TIMEOUT_EXCEPTION.value[1]) | |||||
def upload_local_video(self, filePath, file_title, storageLocation=None): | |||||
self.logger.info("开始执行vod视频上传, filePath: {}, requestId: {}", filePath, self.requestId) | |||||
uploader = AliyunVodUploader(self.content["aliyun"]["access_key"], self.content["aliyun"]["access_secret"]) | |||||
uploadVideoRequest = UploadVideoRequest(filePath, file_title) | |||||
# 可以设置视频封面,如果是本地或网络图片可使用UploadImageRequest上传图片到视频点播,获取到ImageURL | |||||
# ImageURL示例:https://example.com/sample-****.jpg | |||||
# uploadVideoRequest.setCoverURL('<your Image URL>') | |||||
# 标签 | |||||
# uploadVideoRequest.setTags('tag1,tag2') | |||||
if storageLocation: | |||||
uploadVideoRequest.setStorageLocation(storageLocation) | |||||
MAX_RETRIES = 3 | |||||
retry_count = 0 | |||||
while True: | |||||
try: | |||||
result = uploader.uploadLocalVideo(uploadVideoRequest) | |||||
self.logger.info("vod视频上传成功, videoId:{}, requestId:{}", result.get("VideoId"), self.requestId) | |||||
return result.get("VideoId") | |||||
except AliyunVodException as e: | |||||
retry_count += 1 | |||||
time.sleep(3) | |||||
self.logger.error("vod视频上传失败,重试次数:{}, requestId:{}", retry_count, self.requestId) | |||||
if retry_count >= MAX_RETRIES: | |||||
self.logger.exception("vod视频上传重试失败: {}, requestId:{}", e, self.requestId) | |||||
raise ServiceException(ExceptionType.VIDEO_UPDATE_EXCEPTION.value[0], | |||||
ExceptionType.VIDEO_UPDATE_EXCEPTION.value[1]) | |||||
def get_play_url(args): | |||||
thAliyunVodSdk = ThAliyunVodSdk(args[2], args[3], args[4]) | |||||
videoId = thAliyunVodSdk.upload_local_video(args[0], args[1]) | |||||
if videoId is None or len(videoId) == 0: | |||||
return None | |||||
return thAliyunVodSdk.get_play_info(videoId) |
# -*- coding: utf-8 -*- | |||||
import time | |||||
import cv2 | |||||
import subprocess as sp | |||||
import ffmpeg | |||||
import numpy as np | |||||
from loguru import logger | |||||
from exception.CustomerException import ServiceException | |||||
from enums.ExceptionEnum import ExceptionType | |||||
class Cv2Util(): | |||||
def __init__(self, pullUrl, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None): | |||||
self.pullUrl = pullUrl | |||||
self.pushUrl = pushUrl | |||||
self.orFilePath = orFilePath | |||||
self.aiFilePath = aiFilePath | |||||
self.cap = None | |||||
self.p = None | |||||
self.or_video_file = None | |||||
self.ai_video_file = None | |||||
self.fps = None | |||||
self.width = None | |||||
self.height = None | |||||
self.wh = None | |||||
self.all_frames = None | |||||
self.bit_rate = None | |||||
self.pull_p = None | |||||
self.requestId = requestId | |||||
self.p_push_retry_num = 0 | |||||
''' | |||||
获取视频信息 | |||||
''' | |||||
def get_video_info(self): | |||||
try: | |||||
if self.pullUrl is None: | |||||
logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0], | |||||
ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1]) | |||||
probe = ffmpeg.probe(self.pullUrl) | |||||
# 视频大小 | |||||
# format = probe['format'] | |||||
# size = int(format['size'])/1024/1024 | |||||
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) | |||||
if video_stream is None: | |||||
logger.error("根据拉流地址未获取到视频流, requestId:{}", self.requestId) | |||||
return | |||||
width = video_stream.get('width') | |||||
height = video_stream.get('height') | |||||
nb_frames = video_stream.get('nb_frames') | |||||
fps = video_stream.get('r_frame_rate') | |||||
# duration = video_stream.get('duration') | |||||
bit_rate = video_stream.get('bit_rate') | |||||
if width: | |||||
self.width = int(width) | |||||
if height: | |||||
self.height = int(height) | |||||
if width is not None and height is not None: | |||||
self.wh = int(width * height * 3) | |||||
if nb_frames: | |||||
self.all_frames = int(nb_frames) | |||||
if fps: | |||||
up, down = str(fps).split('/') | |||||
self.fps = int(eval(up) / eval(down)) | |||||
# if duration: | |||||
# self.duration = float(video_stream['duration']) | |||||
if bit_rate: | |||||
self.bit_rate = int(bit_rate) / 1000 | |||||
logger.info("视频信息, width:{}|height:{}|fps:{}|all_frames:{}|bit_rate:{}, requestId:{}", self.width, | |||||
self.height, self.fps, self.all_frames, self.bit_rate, self.requestId) | |||||
except ServiceException as s: | |||||
logger.exception("获取视频信息异常: {}, requestId:{}", s.msg, self.requestId) | |||||
raise s | |||||
except Exception as e: | |||||
logger.exception("获取视频信息异常:{}, requestId:{}", e, self.requestId) | |||||
''' | |||||
拉取视频 | |||||
''' | |||||
def build_pull_p(self): | |||||
try: | |||||
if self.width is None or self.height is None: | |||||
return | |||||
if self.pull_p: | |||||
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId) | |||||
self.pull_p.stdout.close() | |||||
self.pull_p.terminate() | |||||
self.pull_p.wait() | |||||
if self.pullUrl is None: | |||||
logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0], | |||||
ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1]) | |||||
# command = ['ffmpeg', | |||||
# # '-b:v', '3000k', | |||||
# '-i', self.pullUrl, | |||||
# '-f', 'rawvideo', | |||||
# '-vcodec', 'rawvideo', | |||||
# '-pix_fmt', 'bgr24', | |||||
# # '-s', "{}x{}".format(int(width), int(height)), | |||||
# '-an', | |||||
# '-'] | |||||
aa = {'loglevel': 'error', 'c:v': 'h264_cuvid'} | |||||
process = ( | |||||
ffmpeg | |||||
.input(self.pullUrl, **aa) | |||||
.output('pipe:', format='rawvideo', pix_fmt='bgr24', loglevel='error') | |||||
.overwrite_output() | |||||
.global_args('-an') | |||||
.run_async(pipe_stdout=True) | |||||
) | |||||
# self.pull_p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE) | |||||
self.pull_p = process | |||||
except ServiceException as s: | |||||
logger.exception("构建拉流管道异常: {}, requestId:{}", s, self.requestId) | |||||
raise s | |||||
except Exception as e: | |||||
logger.exception("构建拉流管道异常:{}, requestId:{}", e, self.requestId) | |||||
def checkconfig(self): | |||||
if self.fps is None or self.width is None or self.height is None: | |||||
return True | |||||
return False | |||||
def read(self): | |||||
result = None | |||||
try: | |||||
# if self.pull_p is None: | |||||
# logger.error("拉流管道为空, requestId:{}", self.requestId) | |||||
# raise ServiceException(ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[0], | |||||
# ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1]) | |||||
in_bytes = self.pull_p.stdout.read(self.wh) | |||||
if in_bytes is not None and len(in_bytes) > 0: | |||||
result = (np.frombuffer(in_bytes, np.uint8).reshape([int(self.height), int(self.width), 3])) | |||||
result = cv2.resize(result, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) | |||||
except ServiceException as s: | |||||
logger.exception("读流异常: {}, requestId:{}", s, self.requestId) | |||||
except Exception as e: | |||||
logger.exception("读流异常:{}, requestId:{}", e, self.requestId) | |||||
return result | |||||
def close(self): | |||||
if self.pull_p: | |||||
if self.pull_p.stdout: | |||||
self.pull_p.stdout.close() | |||||
self.pull_p.terminate() | |||||
self.pull_p.wait() | |||||
logger.info("关闭拉流管道完成, requestId:{}", self.requestId) | |||||
if self.p: | |||||
if self.p.stdin: | |||||
self.p.stdin.close() | |||||
self.p.terminate() | |||||
self.p.wait() | |||||
# self.p.communicate() | |||||
# self.p.kill() | |||||
logger.info("关闭管道完成, requestId:{}", self.requestId) | |||||
if self.or_video_file: | |||||
self.or_video_file.release() | |||||
logger.info("关闭原视频写入流完成, requestId:{}", self.requestId) | |||||
if self.ai_video_file: | |||||
self.ai_video_file.release() | |||||
logger.info("关闭AI视频写入流完成, requestId:{}", self.requestId) | |||||
# 构建 cv2 | |||||
# def build_cv2(self): | |||||
# try: | |||||
# if self.cap is not None: | |||||
# logger.info("重试, 关闭cap, requestId:{}", self.requestId) | |||||
# self.cap.release() | |||||
# if self.p is not None: | |||||
# logger.info("重试, 关闭管道, requestId:{}", self.requestId) | |||||
# self.p.stdin.close() | |||||
# self.p.terminate() | |||||
# self.p.wait() | |||||
# if self.pullUrl is None: | |||||
# logger.error("拉流地址不能为空, requestId:{}", self.requestId) | |||||
# raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0], | |||||
# ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1]) | |||||
# if self.pushUrl is None: | |||||
# logger.error("推流地址不能为空, requestId:{}", self.requestId) | |||||
# raise ServiceException(ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[0], | |||||
# ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[1]) | |||||
# self.cap = cv2.VideoCapture(self.pullUrl) | |||||
# if self.fps is None or self.fps == 0: | |||||
# self.fps = int(self.cap.get(cv2.CAP_PROP_FPS)) | |||||
# if self.width is None or self.width == 0: | |||||
# self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |||||
# if self.height is None or self.height == 0: | |||||
# self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |||||
# command = ['/usr/bin/ffmpeg', | |||||
# # '-y', # 不经过确认,输出时直接覆盖同名文件。 | |||||
# '-f', 'rawvideo', | |||||
# '-vcodec', 'rawvideo', | |||||
# '-pix_fmt', 'bgr24', # 显示可用的像素格式 | |||||
# # '-s', "{}x{}".format(self.width * 2, self.height), | |||||
# '-s', "{}x{}".format(int(self.width), int(self.height/2)), | |||||
# # '-r', str(15), | |||||
# '-i', '-', # 指定输入文件 | |||||
# '-g', '25', | |||||
# '-b:v', '3000k', | |||||
# '-tune', 'zerolatency', # 加速编码速度 | |||||
# '-c:v', 'libx264', # 指定视频编码器 | |||||
# '-sc_threshold', '0', | |||||
# '-pix_fmt', 'yuv420p', | |||||
# '-an', | |||||
# '-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||||
# # superfast, veryfast, faster, fast, medium, slow, slower, veryslow。 | |||||
# '-f', 'flv', | |||||
# self.pushUrl] | |||||
# # 管道配置 | |||||
# logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||||
# self.p = sp.Popen(command, stdin=sp.PIPE) | |||||
# except ServiceException as s: | |||||
# logger.exception("构建cv2异常: {}, requestId:{}", s, self.requestId) | |||||
# raise s | |||||
# except Exception as e: | |||||
# logger.exception("初始化cv2异常:{}, requestId:{}", e, self.requestId) | |||||
# 构建 cv2 | |||||
def build_p(self): | |||||
try: | |||||
if self.p: | |||||
logger.info("重试, 关闭管道, requestId:{}", self.requestId) | |||||
if self.p.stdin: | |||||
self.p.stdin.close() | |||||
self.p.terminate() | |||||
self.p.wait() | |||||
# self.p.communicate() | |||||
# self.p.kill() | |||||
if self.pushUrl is None: | |||||
logger.error("推流地址不能为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[0], | |||||
ExceptionType.PUSH_STREAM_URL_EXCEPTION.value[1]) | |||||
command = ['ffmpeg', | |||||
# '-loglevel', 'debug', | |||||
'-y', # 不经过确认,输出时直接覆盖同名文件。 | |||||
'-f', 'rawvideo', | |||||
'-vcodec', 'rawvideo', | |||||
'-pix_fmt', 'bgr24', | |||||
'-thread_queue_size', '16', | |||||
# '-s', "{}x{}".format(self.width * 2, self.height), | |||||
'-s', "{}x{}".format(int(self.width), int(self.height / 2)), | |||||
'-r', str(self.fps), | |||||
'-i', '-', # 指定输入文件 | |||||
'-g', str(self.fps), | |||||
# '-maxrate', '15000k', | |||||
# '-profile:v', 'high', | |||||
# '-b:v', '4000k', | |||||
# '-crf', '18', | |||||
'-rc:v', 'vbr', | |||||
'-cq:v', '30', | |||||
'-qmin', '30', | |||||
'-qmax', '30', | |||||
'-c:v', 'h264_nvenc', # | |||||
# '-bufsize', '4000k', | |||||
# '-c:v', 'libx264', # 指定视频编码器 | |||||
# '-tune', 'zerolatency', # 加速编码速度 | |||||
# '-sc_threshold', '0', | |||||
'-pix_fmt', 'yuv420p', | |||||
"-an", | |||||
# '-flvflags', 'no_duration_filesize', | |||||
'-preset', 'fast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||||
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。 | |||||
'-f', 'flv', | |||||
self.pushUrl] | |||||
# command = 'ffmpeg -loglevel debug -y -f rawvideo -vcodec rawvideo -pix_fmt bgr24' +\ | |||||
# ' -s ' + "{}x{}".format(int(self.width), int(self.height/2))\ | |||||
# + ' -i - ' + '-g ' + str(self.fps)+\ | |||||
# ' -b:v 6000k -tune zerolatency -c:v libx264 -pix_fmt yuv420p -preset ultrafast'+\ | |||||
# ' -f flv ' + self.pushUrl | |||||
# kwargs = {'format': 'rawvideo', | |||||
# # 'vcodec': 'rawvideo', | |||||
# 'pix_fmt': 'bgr24', | |||||
# 's': '{}x{}'.format(int(self.width), int(self.height/2))} | |||||
# out = { | |||||
# 'r': str(self.fps), | |||||
# 'g': str(self.fps), | |||||
# 'b:v': '5500k', # 恒定码率 | |||||
# # 'maxrate': '15000k', | |||||
# # 'crf': '18', | |||||
# 'bufsize': '5500k', | |||||
# 'tune': 'zerolatency', # 加速编码速度 | |||||
# 'c:v': 'libx264', # 指定视频编码器 | |||||
# 'sc_threshold': '0', | |||||
# 'pix_fmt': 'yuv420p', | |||||
# # 'flvflags': 'no_duration_filesize', | |||||
# 'preset': 'medium', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast, | |||||
# # superfast, veryfast, faster, fast, medium, slow, slower, veryslow。 | |||||
# 'format': 'flv'} | |||||
# 管道配置 | |||||
# process2 = ( | |||||
# ffmpeg | |||||
# .input('pipe:', **kwargs) | |||||
# .output(self.pushUrl, **out) | |||||
# .global_args('-y', '-an') | |||||
# .overwrite_output() | |||||
# .run_async(pipe_stdin=True) | |||||
# ) | |||||
logger.info("fps:{}|height:{}|width:{}|requestId:{}", self.fps, self.height, self.width, self.requestId) | |||||
self.p = sp.Popen(command, stdin=sp.PIPE, shell=False) | |||||
# self.p = process2 | |||||
except ServiceException as s: | |||||
logger.exception("构建p管道异常: {}, requestId:{}", s, self.requestId) | |||||
raise s | |||||
except Exception as e: | |||||
logger.exception("初始化p管道异常:{}, requestId:{}", e, self.requestId) | |||||
def push_stream(self, frame): | |||||
if self.p is None: | |||||
self.build_p() | |||||
try: | |||||
self.p.stdin.write(frame.tostring()) | |||||
except Exception as ex: | |||||
logger.exception("推流进管道异常:{}, requestId: {}", ex, self.requestId) | |||||
current_retry_num = 0 | |||||
while True: | |||||
try: | |||||
if self.p_push_retry_num > 20: | |||||
logger.info("推流失败重试次数过多, 请检查相关配置信息, 当前重试次数: {}, requestId: {}", | |||||
self.p_push_retry_num, self.requestId) | |||||
current_retry_num = 4 | |||||
break | |||||
self.p_push_retry_num += 1 | |||||
time.sleep(10) | |||||
self.build_p() | |||||
self.p.stdin.write(frame.tostring()) | |||||
logger.info("构建p管道重试成功, 当前重试次数: {}, requestId: {}", current_retry_num, | |||||
self.requestId) | |||||
break | |||||
except Exception as e: | |||||
current_retry_num += 1 | |||||
logger.exception("构建p管道异常:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||||
current_retry_num, self.requestId) | |||||
if current_retry_num > 3: | |||||
logger.exception("构建p管道重试失败:{}, requestId: {}", e, self.requestId) | |||||
break | |||||
if current_retry_num > 3: | |||||
raise ServiceException(ExceptionType.PUSH_STREAM_URL_E_EXCEPTION.value[0], | |||||
ExceptionType.PUSH_STREAM_URL_E_EXCEPTION.value[1]) | |||||
def video_write(self, or_frame, ai_frame): | |||||
try: | |||||
self.build_write() | |||||
if or_frame is not None and len(or_frame) > 0: | |||||
self.or_video_file.write(or_frame) | |||||
if ai_frame is not None and len(ai_frame) > 0: | |||||
self.ai_video_file.write(ai_frame) | |||||
except Exception as ex: | |||||
ai_retry_num = 0 | |||||
while True: | |||||
try: | |||||
if or_frame is not None and len(or_frame) > 0: | |||||
self.or_video_file.write(or_frame) | |||||
if ai_frame is not None and len(ai_frame) > 0: | |||||
self.ai_video_file.write(ai_frame) | |||||
logger.info("重新写入离线分析后视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num, | |||||
self.requestId) | |||||
break | |||||
except Exception as e: | |||||
ai_retry_num += 1 | |||||
logger.exception("重新写入离线分析后视频到本地:{}, 开始重试, 当前重试次数:{}, requestId: {}", e, | |||||
ai_retry_num, self.requestId) | |||||
if ai_retry_num > 3: | |||||
logger.exception("重新写入离线分析后视频到本地,重试失败:{}, requestId: {}", e, self.requestId) | |||||
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0], | |||||
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]) | |||||
def build_write(self): | |||||
try: | |||||
if self.fps is None or self.width is None or self.height is None: | |||||
raise ServiceException(ExceptionType.VIDEO_CONFIG_EXCEPTION.value[0], | |||||
ExceptionType.VIDEO_CONFIG_EXCEPTION.value[1]) | |||||
if self.orFilePath is not None and self.or_video_file is None: | |||||
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||||
(int(self.width / 2), int(self.height / 2))) | |||||
if self.or_video_file is None: | |||||
raise ServiceException(ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[0], | |||||
ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[1]) | |||||
if self.aiFilePath is not None and self.ai_video_file is None: | |||||
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, | |||||
(int(self.width), int(self.height / 2))) | |||||
if self.ai_video_file is None: | |||||
raise ServiceException(ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[0], | |||||
ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[1]) | |||||
except ServiceException as s: | |||||
logger.exception("构建文件写对象异常: {}, requestId:{}", s, self.requestId) | |||||
raise s | |||||
except Exception as e: | |||||
logger.exception("构建文件写对象异常: {}, requestId:{}", e, self.requestId) | |||||
raise e | |||||
def video_merge(self, frame1, frame2): | |||||
# frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) | |||||
# frameRight = cv2.resize(frame2, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR) | |||||
# frame_merge = np.hstack((frameLeft, frameRight)) | |||||
frame_merge = np.hstack((frame1, frame2)) | |||||
return frame_merge | |||||
def getP(self): | |||||
if self.p is None: | |||||
logger.error("获取管道为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[0], | |||||
ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1]) | |||||
return self.p | |||||
def getCap(self): | |||||
if self.cap is None: | |||||
logger.error("获取cv2为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.CV2_IS_NULL_EXCEPTION.value[0], | |||||
ExceptionType.CV2_IS_NULL_EXCEPTION.value[1]) | |||||
return self.cap | |||||
def getOrVideoFile(self): | |||||
if self.or_video_file is None: | |||||
logger.error("获取原视频写入对象为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[0], | |||||
ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[1]) | |||||
return self.or_video_file | |||||
def getAiVideoFile(self): | |||||
if self.ai_video_file is None: | |||||
logger.error("获取AI视频写入对象为空, requestId:{}", self.requestId) | |||||
raise ServiceException(ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[0], | |||||
ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[1]) | |||||
return self.ai_video_file |
# -*- coding: utf-8 -*- | |||||
import os | |||||
from loguru import logger | |||||
''' | |||||
文件处理工具类 | |||||
''' | |||||
def create_dir_not_exist(path): | |||||
logger.info("检查文件夹是否存在: {}", path) | |||||
if not os.path.exists(path): | |||||
logger.info("开始创建文件夹: {}", path) | |||||
os.makedirs(path) | |||||
logger.info("文件夹创建完成 {}", path) |
import GPUtil | |||||
# order- 确定返回可用 GPU 设备 ID 的顺序。order应指定为以下字符串之一: | |||||
# 'first'- 按升序排列可用的 GPU 设备 ID(默认) | |||||
# 'last'- 按 id 降序排列可用的 GPU 设备 id | |||||
# 'random'- 随机订购可用的 GPU 设备 ID | |||||
# 'load'- 按负载递增排序可用的 GPU 设备 ID | |||||
# 'memory'- 通过升序内存使用来排序可用的 GPU 设备 ID | |||||
# limit- 将返回的 GPU 设备 ID 数量限制为指定数量。必须是正整数。(默认 = 1) | |||||
# maxLoad- 被认为可用的 GPU 的最大当前相对负载。负载大于 的 GPUmaxLoad不会返回。(默认 = 0.5) | |||||
# maxMemory- 被视为可用的 GPU 的最大当前相对内存使用量。maxMemory不返回当前内存使用量大于的 GPU 。(默认 = 0.5) | |||||
# includeNan- 真/假标志,指示是否包括负载或内存使用为 NaN 的 GPU(指示无法检索使用情况)。(默认 = 假) | |||||
# excludeID- ID 列表,应从可用 GPU 列表中排除。见GPU类描述。(默认 = []) | |||||
# excludeUUIDexcludeID-除了它使用 UUID 之外,其他相同。(默认 = []) | |||||
# 输出 | |||||
# deviceIDs - 所有可用 GPU 设备 ID 的列表。如果当前负载和内存使用量分别小于maxLoad和maxMemory,则认为 GPU 可用。该列表是根据 排序的order。返回的设备 ID 的最大数量由 限制limit。 | |||||
def get_gpu_ids(content): | |||||
deviceIDs = GPUtil.getAvailable(order=content["gpu"]["order"], | |||||
limit=int(content["gpu"]["limit"]), | |||||
maxLoad=float(content["gpu"]["maxLoad"]), | |||||
maxMemory=float(content["gpu"]["maxMemory"]), | |||||
includeNan=content["gpu"]["includeNan"], | |||||
excludeID=content["gpu"]["excludeID"], | |||||
excludeUUID=content["gpu"]["excludeUUID"]) | |||||
return deviceIDs | |||||
def get_all_gpu_ids(): | |||||
return GPUtil.getGPUs() |
import time | |||||
from io import BytesIO | |||||
import cv2 | |||||
import requests | |||||
from PIL import Image, ImageDraw, ImageFont | |||||
import numpy as np | |||||
''' | |||||
文字水印 | |||||
''' | |||||
class TextWaterMark(): | |||||
def __init__(self): | |||||
self.color_dict = { | |||||
# R G B | |||||
# 网址查看:https://tool.oschina.net/commons?type=3 | |||||
# 网址查看:http://tools.jb51.net/static/colorpicker/ | |||||
'white': (255, 255, 255, 255), | |||||
'black': (0, 0, 0, 255), | |||||
'gray': (205, 201, 201, 255), | |||||
'red': (255, 0, 0, 255), | |||||
'yellow': (255, 215, 0, 255), | |||||
'blue': (0, 0, 170, 255), | |||||
'purple': (205, 105, 201, 255), | |||||
'green': (0, 205, 0, 255) | |||||
} | |||||
self.position_list = [1, 2, 3, 4] | |||||
""" | |||||
普通照片水印 | |||||
params: | |||||
image:图片 | |||||
text:水印文字 | |||||
position:水印位置 | |||||
1:左上 | |||||
2:右上 | |||||
3:右下 | |||||
4:左下 | |||||
fontface: 字体 | |||||
fontsize:字体大小 | |||||
fontcolor:字体颜色 | |||||
[white, black, gray, red, yellow, blue, purple, green] | |||||
""" | |||||
def common_water(self, image, text, position=1, fontface='../font/simsun.ttc', fontsize=20, fontcolor='black'): | |||||
flag = False | |||||
if isinstance(image, np.ndarray): # 判断是否OpenCV图片类型 | |||||
flag = True | |||||
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)) | |||||
if position not in self.position_list: | |||||
position = 1 | |||||
w, h = image.size[:2] | |||||
keys = self.color_dict.keys() | |||||
if fontcolor not in keys: | |||||
fontcolor = 'black' | |||||
color = self.color_dict[fontcolor] | |||||
fnt = ImageFont.truetype(fontface, fontsize) | |||||
im = image.convert('RGBA') | |||||
mask = Image.new('RGBA', im.size, (0, 0, 0, 0)) | |||||
d = ImageDraw.Draw(mask) | |||||
size_w, size_h = d.textsize(text, font=fnt) | |||||
if position == 1: | |||||
weizhi = (w * 0.1, h * 0.1) | |||||
elif position == 2: | |||||
weizhi = (w * 0.9 - size_w, h * 0.1) | |||||
elif position == 3: | |||||
weizhi = (w * 0.9 - size_w, h * 0.9 - size_h) | |||||
else: | |||||
weizhi = (w * 0.1, h * 0.9 - size_h) | |||||
# position 为左上角位置 | |||||
d.text(weizhi, text, font=fnt, fill=color) | |||||
out = Image.alpha_composite(im, mask) | |||||
if flag: | |||||
out = cv2.cvtColor(np.asarray(out), cv2.COLOR_BGRA2RGBA) | |||||
return out | |||||
""" | |||||
半透明水印,布满整张图,并且自动旋转45° | |||||
params: | |||||
image:图片 | |||||
text:文字 | |||||
fontsize:文字大小 | |||||
""" | |||||
def fill_water(self, image, text, fontsize): | |||||
flag = False | |||||
if isinstance(image, np.ndarray): # 判断是否OpenCV图片类型 | |||||
flag = True | |||||
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)) | |||||
font = ImageFont.truetype('../font/simsun.ttc', fontsize) | |||||
# 添加背景 | |||||
new_img = Image.new('RGBA', (image.size[0] * 3, image.size[1] * 3), (255, 255, 255, 255)) | |||||
new_img.paste(image, image.size) | |||||
# 添加水印 | |||||
font_len = len(text) | |||||
rgba_image = new_img.convert('RGBA') | |||||
text_overlay = Image.new('RGBA', rgba_image.size, (0, 0, 0, 0)) | |||||
image_draw = ImageDraw.Draw(text_overlay) | |||||
for i in range(0, rgba_image.size[0], font_len * 40 + 100): | |||||
for j in range(0, rgba_image.size[1], 200): | |||||
# print(f'i:{i}, j:{j}, text:{text}, font:{font}') | |||||
image_draw.text((i, j), text, font=font, fill=(0, 0, 0, 50)) | |||||
text_overlay = text_overlay.rotate(-45) | |||||
image_with_text = Image.alpha_composite(rgba_image, text_overlay) | |||||
image_with_text = image_with_text.crop((image.size[0], image.size[1], image.size[0] * 2, image.size[1] * 2)) | |||||
if flag: | |||||
image_with_text = cv2.cvtColor(np.asarray(image_with_text), cv2.COLOR_BGRA2RGBA) | |||||
return image_with_text | |||||
class PictureWaterMark: | |||||
def __init__(self): | |||||
self.logo = cv2.imread("./image/logo.png", -1) | |||||
def common_water(self, image, logo): | |||||
width, height = image.shape[1], image.shape[0] | |||||
mark_width, mark_height = logo.shape[1], logo.shape[0] | |||||
rate = int(width * 0.2) / mark_width | |||||
logo_new = cv2.resize(logo, None, fx=rate, fy=rate, interpolation=cv2.INTER_NEAREST) | |||||
position = (int(width * 0.95 - logo_new.shape[1]), int(height * 0.95 - logo_new.shape[0])) | |||||
b = Image.new('RGBA', (width, height), (0, 0, 0, 0)) # 创建新图像:透明' | |||||
a = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) | |||||
watermark = Image.fromarray(cv2.cvtColor(logo_new, cv2.COLOR_BGRA2RGBA)) | |||||
# 图片旋转 | |||||
# watermark = watermark.rotate(45) | |||||
b.paste(a, (0, 0)) | |||||
b.paste(watermark, position, mask=watermark) | |||||
return cv2.cvtColor(np.asarray(b), cv2.COLOR_BGR2RGB) | |||||
def common_water_1(self, image, logo, alpha=1): | |||||
h, w = image.shape[0], image.shape[1] | |||||
if w >= h: | |||||
rate = int(w * 0.1) / logo.shape[1] | |||||
else: | |||||
rate = int(h * 0.1) / logo.shape[0] | |||||
mask = cv2.resize(logo, None, fx=rate, fy=rate, interpolation=cv2.INTER_NEAREST) | |||||
mask_h, mask_w = mask.shape[0], mask.shape[1] | |||||
mask_channels = cv2.split(mask) | |||||
dst_channels = cv2.split(image) | |||||
# b, g, r, a = cv2.split(mask) | |||||
# 计算mask在图片的坐标 | |||||
ul_points = (int(h * 0.95) - mask_h, int(w - h * 0.05 - mask_w)) | |||||
dr_points = (int(h * 0.95), int(w - h * 0.05)) | |||||
for i in range(3): | |||||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] = dst_channels[i][ | |||||
ul_points[0]: dr_points[0], | |||||
ul_points[1]: dr_points[1]] * ( | |||||
255.0 - mask_channels[3] * alpha) / 255 | |||||
dst_channels[i][ul_points[0]: dr_points[0], ul_points[1]: dr_points[1]] += np.array( | |||||
mask_channels[i] * (mask_channels[3] * alpha / 255), dtype=np.uint8) | |||||
dst_img = cv2.merge(dst_channels) | |||||
return dst_img | |||||
#差值感知算法 | |||||
def dHash(image): | |||||
#缩放9*8 | |||||
image=cv2.resize(image,(9,8),interpolation=cv2.INTER_CUBIC) | |||||
#转换灰度图 | |||||
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) | |||||
# print(image.shape) | |||||
hash=[] | |||||
#每行前一个像素大于后一个像素为1,相反为0,生成哈希 | |||||
for i in range(8): | |||||
for j in range(8): | |||||
if image[i,j]>image[i,j+1]: | |||||
hash.append(1) | |||||
else: | |||||
hash.append(0) | |||||
return hash | |||||
#计算汉明距离 | |||||
def Hamming_distance(hash1,hash2): | |||||
num = 0 | |||||
for index in range(len(hash1)): | |||||
if hash1[index] != hash2[index]: | |||||
num += 1 | |||||
return num | |||||
def url2Array(url): | |||||
response = requests.get(url) | |||||
image = Image.open(BytesIO(response.content)) | |||||
image1= np.array(image) | |||||
img_bgr = cv2.cvtColor(image1, cv2.COLOR_RGB2BGR) | |||||
return img_bgr | |||||
if __name__ == '__main__': | |||||
# img = cv2.imread("../test/a.jpg", -1) | |||||
# fontcolor = 'yellow' | |||||
# | |||||
# water = TextWaterMark() | |||||
# text = "hello world" | |||||
# | |||||
# # fill_img = water.common_water(img, text, position=4, fontface='../font/庞门正道标题体2.0增强版.ttf', fontsize=20, fontcolor='black') | |||||
# fill_img = water.fill_water(img, text, 20) | |||||
# # 一定要保存为png格式 | |||||
# cv2.imshow('result', fill_img) | |||||
# cv2.waitKey(111111110) | |||||
# print('finish') | |||||
pic = PictureWaterMark() | |||||
image = cv2.imread("a.jpg") | |||||
logo = cv2.imread("../image/logo.png", -1) | |||||
# print(image, logo) | |||||
start = time.time() | |||||
frame = pic.common_water(image, logo) | |||||
print(time.time() - start) | |||||
start1 = time.time() | |||||
frame1 = pic.common_water_1(image, logo) | |||||
# cv2.imwrite("watermarked.jpg", frame1) | |||||
print(time.time() - start1) | |||||
# cap = cv2.VideoCapture("../data/111111.mp4") | |||||
# logo = cv2.imread("../image/logo.png", -1) | |||||
# while True: | |||||
# is_opened, frame = cap.read() | |||||
# frame = pic.common_water(frame, logo) | |||||
# cv2.imshow('frame', frame) | |||||
# cv2.waitKey(1) # 等待输入任何按键 | |||||
# # 关闭 | |||||
# cap.release() |
import time | |||||
from kafka import KafkaProducer, KafkaConsumer, TopicPartition, OffsetAndMetadata | |||||
from kafka.errors import kafka_errors | |||||
import json | |||||
from loguru import logger | |||||
# 生产者 | |||||
class CustomerKafkaProducer(): | |||||
def __init__(self, content): | |||||
self.content = content | |||||
self.configs = self.content["kafka"][self.content["dsp"]["active"]]["producer"] | |||||
self.customerProducer = None | |||||
self.get_producer() | |||||
# 获取kafka生产者 | |||||
def get_producer(self): | |||||
if self.customerProducer is None: | |||||
logger.info("配置kafka生产者!") | |||||
self.customerProducer = KafkaProducer( | |||||
bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"], | |||||
acks=self.configs["acks"], | |||||
retries=self.configs["retries"], | |||||
linger_ms=self.configs["linger_ms"], | |||||
retry_backoff_ms=self.configs["retry_backoff_ms"], | |||||
max_in_flight_requests_per_connection=self.configs[ | |||||
"max_in_flight_requests_per_connection"], | |||||
key_serializer=lambda m: json.dumps(m).encode('utf-8'), | |||||
value_serializer=lambda m: json.dumps(m).encode('utf-8')) | |||||
# mode 模式1:异步发送 2:同步发送 | |||||
# def on_send_success(record_metadata): 成功回调 | |||||
# def on_send_error(excp): 失败回调 | |||||
def sender(self, topic, key, message, mode, customer_send_success=None, customer_send_error=None): | |||||
retry_send_num = 1 | |||||
while True: | |||||
try: | |||||
self.get_producer() | |||||
logger.info("kafka发送信息,topic:{}|key:{}|message:{}|mode:{}|msgId:{}", topic, key, message, mode, | |||||
message.get("msgId")) | |||||
if mode == 1: | |||||
if not customer_send_success: | |||||
customer_send_success = CustomerKafkaProducer.on_send_success | |||||
if not customer_send_error: | |||||
customer_send_error = CustomerKafkaProducer.on_send_error | |||||
self.customerProducer.send(topic=topic, key=key, value=message) \ | |||||
.add_callback(customer_send_success, message.get("msgId")) \ | |||||
.add_errback(customer_send_error, message.get("msgId")) | |||||
if mode == 2: | |||||
try: | |||||
self.customerProducer.send(topic=topic, key=key, value=message).get(timeout=30) | |||||
logger.info("kafka同步发送信息成功, msgId:{}", message.get("msgId")) | |||||
except kafka_errors as e: | |||||
logger.exception("kafka同步发送消息异常: {}, msgId:{}", e, message.get("msgId")) | |||||
raise e | |||||
break | |||||
except Exception as e: | |||||
logger.exception("kafka发送消息异常: {}, msgId:{}", e, message.get("msgId")) | |||||
self.customerProducer = None | |||||
retry_send_num += 1 | |||||
if retry_send_num > 2: | |||||
logger.exception("kafka发送消息重试失败: {}, msgId:{}", e, message.get("msgId")) | |||||
raise e | |||||
def close_producer(self): | |||||
self.customerProducer.flush() | |||||
self.customerProducer.close() | |||||
logger.info("kafka生产者关闭完成!") | |||||
def on_send_success(msgId, record_metadata): | |||||
logger.info("kafka异步发送信息成功,topic:{}|partition:{}|offset:{}|msgId:{}", record_metadata.topic, | |||||
record_metadata.partition, record_metadata.offset, msgId) | |||||
def on_send_error(msgId, excp): | |||||
logger.exception("kafka异步发送消息异常: {}, msgId:{}", excp, msgId) | |||||
# 生产者 | |||||
class CustomerKafkaConsumer(): | |||||
def __init__(self, content, topics=()): | |||||
logger.info("初始化消费者") | |||||
self.content = content | |||||
self.configs = self.content["kafka"][self.content["dsp"]["active"]]["consumer"] | |||||
self.customerConsumer = None | |||||
self.topics = topics | |||||
self.subscribe() | |||||
logger.info("初始化消费者完成") | |||||
def subscribe(self): | |||||
if self.customerConsumer is None: | |||||
logger.info("获取消费者!") | |||||
self.customerConsumer = KafkaConsumer( | |||||
bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"], | |||||
client_id=self.configs["client_id"], | |||||
group_id=self.configs["group_id"], | |||||
auto_offset_reset=self.configs["auto_offset_reset"], | |||||
enable_auto_commit=self.configs["enable_auto_commit"], | |||||
max_poll_records=self.configs["max_poll_records"], | |||||
value_deserializer=lambda m: json.loads(m.decode('utf-8'))) | |||||
logger.info("kafka生产者订阅topic:{}", self.topics) | |||||
if self.topics is None or len(self.topics) == 0: | |||||
logger.error("消费者订阅topic不能为空!") | |||||
raise Exception("消费者订阅topic不能为空!") | |||||
# 手动配置分区 | |||||
# customer_partition = [] | |||||
# for topic in self.topics: | |||||
# for p in self.content["kafka"][self.content["dsp"]["active"]][topic]["partition"]: | |||||
# customer_partition.append(TopicPartition(topic, p)) | |||||
# self.customerConsumer.assign(customer_partition) | |||||
# 自动配置 | |||||
self.customerConsumer.subscribe(self.topics) | |||||
logger.info("kafka生产者订阅topic完成") | |||||
def poll(self): | |||||
msg = None | |||||
try: | |||||
self.subscribe() | |||||
msg = self.customerConsumer.poll() | |||||
except Exception as e: | |||||
self.customerConsumer = None | |||||
logger.exception("消费者拉取消息异常: {}", e) | |||||
return msg | |||||
def commit_offset(self, message): | |||||
retry_num = 1 | |||||
while True: | |||||
try: | |||||
self.subscribe() | |||||
logger.info("消费者开始提交offset,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1, | |||||
message.partition) | |||||
tp = TopicPartition(topic=message.topic, partition=message.partition) | |||||
self.customerConsumer.commit(offsets={tp: (OffsetAndMetadata(message.offset + 1, None))}) | |||||
logger.info("消费者提交offset完成,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1, | |||||
message.partition) | |||||
break | |||||
except Exception as e: | |||||
self.customerConsumer = None | |||||
logger.exception("消费者提交offset异常: {}, 重试次数: {}", e, retry_num) | |||||
time.sleep(1) | |||||
retry_num += 1 | |||||
if retry_num > 3: | |||||
logger.exception("消费者提交offset重试失败: {}", e) | |||||
break | |||||
# if __name__=="__main__": | |||||
# try: | |||||
# 1/0 | |||||
# except Exception as e: | |||||
# logger.exception("aaaaa:{} {}", e, "11111") |
# -*- coding: utf-8 -*- | |||||
import sys | |||||
import os | |||||
from loguru import logger | |||||
# 初始化日志配置 | |||||
def init_log(content): | |||||
if not os.path.exists(content["log"]["base_path"]): | |||||
os.makedirs(content["log"]["base_path"]) | |||||
# 移除日志设置 | |||||
logger.remove(handler_id=None) | |||||
# 打印日志到文件 | |||||
if content["log"]["enable_file_log"]: | |||||
logger.add(content["log"]["base_path"] + content["log"]["log_name"], | |||||
rotation=content["log"]["rotation"], | |||||
retention=content["log"]["retention"], | |||||
format=content["log"]["log_fmt"], | |||||
level=content["log"]["level"], | |||||
enqueue=content["log"]["enqueue"], | |||||
encoding=content["log"]["encoding"]) | |||||
# 控制台输出 | |||||
if content["log"]["enable_stderr"]: | |||||
logger.add(sys.stderr, | |||||
format=content["log"]["log_fmt"], | |||||
level=content["log"]["level"], | |||||
enqueue=True) |
# -*- coding: utf-8 -*- | |||||
import os | |||||
import sys | |||||
import torch | |||||
sys.path.extend(['..', '../healthCode']) | |||||
from utilsK.general import pre_process, post_process, get_return_data | |||||
class Model(): | |||||
def __init__(self): | |||||
self.par = {'code': {'weights': '../healthCode/weights/health_yolov5s_v2.jit', 'img_type': 'code', 'nc': 9}, | |||||
'plate': {'weights': '../healthCode/weights/plate_yolov5s.jit', 'img_type': 'plate', 'nc': 1}, | |||||
'conf_thres': 0.4, | |||||
'iou_thres': 0.45 | |||||
} | |||||
###加载模型 | |||||
self.device = torch.device('cuda:0') | |||||
self.model = torch.jit.load(self.par['code']['weights']) | |||||
self.model_plate = torch.jit.load(self.par['plate']['weights']) | |||||
# 防疫模型 | |||||
class FKModel(Model): | |||||
# def __init__(self): | |||||
# super().__init__() | |||||
# names, label_arraylist, rainbows, conf_thres, iou_thres | |||||
def process(self, im0, device, img_type): | |||||
print("bbbbbbbbbbbbbbbbnbbb", im0, device, img_type) | |||||
# 预处理 | |||||
img, padInfos = pre_process(im0, device) | |||||
# 模型推理 code, plate | |||||
if img_type == 'code': | |||||
pred = self.model(img) | |||||
if img_type == 'plate': | |||||
pred = self.model_plate(img) | |||||
boxes = post_process(pred, padInfos, device, conf_thres=self.par['conf_thres'], iou_thres=self.par['iou_thres'], | |||||
nc=self.par[img_type]['nc']) # 后处理 | |||||
dataBack = get_return_data(im0, boxes, modelType=img_type) | |||||
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", dataBack) | |||||
return dataBack | |||||
# for key in dataBack.keys(): | |||||
# if isinstance(dataBack[key], list): | |||||
# cv2.imwrite('jitimg/%s.jpg' % (key), dataBack[key][0]) ###返回值: dataBack | |||||
''' | |||||
#dataBack= {'type':1,'color':'green','nameImage':'','phoneNumberImage':'','cityImage':'','hsImage':'','plateImage':''} | |||||
type:int, 0—行程卡;1—苏康码;2-车牌 | |||||
nameImage: [姓名图像数组,score] | |||||
color: green, yellow, red | |||||
cityImage: [途径地图像数组,score] | |||||
phoneNumberImage: [手机号图像数组,score] | |||||
IdNumberImage: [身份证号数组,score] | |||||
hsImage:[核酸检测情况数组,score] | |||||
plateImage: [核酸检测情况数组,score] | |||||
如果没对应目标,返回“空值” | |||||
''' |
# -*- coding: UTF-8 -*- | |||||
import pymysql | |||||
from loguru import logger | |||||
from dbutils.pooled_db import PooledDB | |||||
""" | |||||
@功能:创建数据库连接池 | |||||
""" | |||||
class MyConnectionPool(object): | |||||
__pool = None | |||||
def __init__(self, content): | |||||
self.conn = self.__getConn(content) | |||||
self.cursor = self.conn.cursor() | |||||
# 创建数据库连接conn和游标cursor | |||||
# def __enter__(self): | |||||
# self.conn = self.__getconn() | |||||
# self.cursor = self.conn.cursor() | |||||
# 创建数据库连接池 | |||||
def __getconn(self, content): | |||||
if self.__pool is None: | |||||
self.__pool = PooledDB( | |||||
creator=pymysql, | |||||
mincached=int(content["mysql"]["db_min_cached"]), | |||||
maxcached=int(content["mysql"]["db_max_cached"]), | |||||
maxshared=int(content["mysql"]["db_max_shared"]), | |||||
maxconnections=int(content["mysql"]["db_max_connecyions"]), | |||||
blocking=content["mysql"]["db_blocking"], | |||||
maxusage=content["mysql"]["db_max_usage"], | |||||
setsession=content["mysql"]["db_set_session"], | |||||
host=content["mysql"][content["dsp"]["active"]]["host"], | |||||
port=content["mysql"][content["dsp"]["active"]]["port"], | |||||
user=content["mysql"][content["dsp"]["active"]]["username"], | |||||
passwd=content["mysql"][content["dsp"]["active"]]["password"], | |||||
db=content["mysql"][content["dsp"]["active"]]["dbname"], | |||||
use_unicode=False, | |||||
charset=content["mysql"]["db_charset"] | |||||
) | |||||
return self.__pool.connection() | |||||
# 释放连接池资源 | |||||
# def __exit__(self, exc_type, exc_val, exc_tb): | |||||
# self.cursor.close() | |||||
# self.conn.close() | |||||
# 关闭连接归还给链接池 | |||||
def close(self): | |||||
self.cursor.close() | |||||
self.conn.close() | |||||
# 从连接池中取出一个连接 | |||||
def getconn(self, content): | |||||
conn = self.__getconn(content) | |||||
cursor = conn.cursor() | |||||
return cursor, conn | |||||
# 获取连接池,实例化 | |||||
def get_my_connection(content): | |||||
return MyConnectionPool(content) | |||||
''' | |||||
执行语句查询有结果返回结果没有返回0;增/删/改返回变更数据条数,没有返回0 | |||||
''' | |||||
class MySqLHelper(object): | |||||
def __init__(self, content): | |||||
logger.info("开始加载数据库连接池!") | |||||
self.db = get_my_connection(content) | |||||
logger.info("加载数据库连接池完成!") | |||||
def __new__(cls, *args, **kwargs): | |||||
if not hasattr(cls, 'inst'): # 单例 | |||||
cls.inst = super(MySqLHelper, cls).__new__(cls, *args, **kwargs) | |||||
return cls.inst | |||||
# 封装执行命令 | |||||
def execute(self, sql, param=None, autoclose=False): | |||||
""" | |||||
【主要判断是否有参数和是否执行完就释放连接】 | |||||
:param sql: 字符串类型,sql语句 | |||||
:param param: sql语句中要替换的参数"select %s from tab where id=%s" 其中的%s就是参数 | |||||
:param autoclose: 是否关闭连接 | |||||
:return: 返回连接conn和游标cursor | |||||
""" | |||||
cursor, conn = self.db.getconn() # 从连接池获取连接 | |||||
count = 0 | |||||
try: | |||||
# count : 为改变的数据条数 | |||||
if param: | |||||
count = cursor.execute(sql, param) | |||||
else: | |||||
count = cursor.execute(sql) | |||||
conn.commit() | |||||
if autoclose: | |||||
self.close(cursor, conn) | |||||
except Exception as e: | |||||
pass | |||||
return cursor, conn, count | |||||
# 执行多条命令 | |||||
# def executemany(self, lis): | |||||
# """ | |||||
# :param lis: 是一个列表,里面放的是每个sql的字典'[{"sql":"xxx","param":"xx"}....]' | |||||
# :return: | |||||
# """ | |||||
# cursor, conn = self.db.getconn() | |||||
# try: | |||||
# for order in lis: | |||||
# sql = order['sql'] | |||||
# param = order['param'] | |||||
# if param: | |||||
# cursor.execute(sql, param) | |||||
# else: | |||||
# cursor.execute(sql) | |||||
# conn.commit() | |||||
# self.close(cursor, conn) | |||||
# return True | |||||
# except Exception as e: | |||||
# print(e) | |||||
# conn.rollback() | |||||
# self.close(cursor, conn) | |||||
# return False | |||||
# 释放连接 | |||||
def close(self, cursor, conn): | |||||
logger.info("开始释放数据库连接!") | |||||
cursor.close() | |||||
conn.close() | |||||
logger.info("释放数据库连接完成!") | |||||
# 查询所有 | |||||
def selectall(self, sql, param=None): | |||||
try: | |||||
cursor, conn, count = self.execute(sql, param) | |||||
res = cursor.fetchall() | |||||
return res | |||||
except Exception as e: | |||||
logger.error("查询所有数据异常:") | |||||
logger.exception(e) | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 查询单条 | |||||
def selectone(self, sql, param=None): | |||||
try: | |||||
cursor, conn, count = self.execute(sql, param) | |||||
res = cursor.fetchone() | |||||
self.close(cursor, conn) | |||||
return res | |||||
except Exception as e: | |||||
logger.error("查询单条数据异常:") | |||||
logger.exception(e) | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 增加 | |||||
def insertone(self, sql, param): | |||||
try: | |||||
cursor, conn, count = self.execute(sql, param) | |||||
# _id = cursor.lastrowid() # 获取当前插入数据的主键id,该id应该为自动生成为好 | |||||
conn.commit() | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 防止表中没有id返回0 | |||||
# if _id == 0: | |||||
# return True | |||||
# return _id | |||||
except Exception as e: | |||||
logger.error("新增数据异常:") | |||||
logger.exception(e) | |||||
conn.rollback() | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 增加多行 | |||||
def insertmany(self, sql, param): | |||||
""" | |||||
:param sql: | |||||
:param param: 必须是元组或列表[(),()]或((),()) | |||||
:return: | |||||
""" | |||||
cursor, conn, count = self.db.getconn() | |||||
try: | |||||
cursor.executemany(sql, param) | |||||
conn.commit() | |||||
return count | |||||
except Exception as e: | |||||
logger.error("增加多条数据异常:") | |||||
logger.exception(e) | |||||
conn.rollback() | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 删除 | |||||
def delete(self, sql, param=None): | |||||
try: | |||||
cursor, conn, count = self.execute(sql, param) | |||||
self.close(cursor, conn) | |||||
return count | |||||
except Exception as e: | |||||
logger.error("删除数据异常:") | |||||
logger.exception(e) | |||||
conn.rollback() | |||||
self.close(cursor, conn) | |||||
return count | |||||
# 更新 | |||||
def update(self, sql, param=None): | |||||
try: | |||||
cursor, conn, count = self.execute(sql, param) | |||||
conn.commit() | |||||
self.close(cursor, conn) | |||||
return count | |||||
except Exception as e: | |||||
logger.error("更新数据异常:") | |||||
logger.exception(e) | |||||
conn.rollback() | |||||
self.close(cursor, conn) | |||||
return count |
import time | |||||
import cv2 | |||||
from aip import AipOcr | |||||
from loguru import logger | |||||
from enums.ExceptionEnum import ExceptionType | |||||
from exception.CustomerException import ServiceException | |||||
class OcrBaiduSdk: | |||||
def __init__(self, content): | |||||
self.content = content | |||||
self.init_client() | |||||
def init_client(self): | |||||
self.client = AipOcr(str(self.content["baiduocr"]["APP_ID"]), self.content["baiduocr"]["API_KEY"], | |||||
self.content["baiduocr"]["SECRET_KEY"]) | |||||
''' | |||||
{ | |||||
"log_id": 2471272194, | |||||
"words_result_num": 2, | |||||
"words_result": | |||||
[ | |||||
{"words": " TSINGTAO"}, | |||||
{"words": "青島睥酒"} | |||||
] | |||||
} | |||||
''' | |||||
def universal_text_recognition(self, image, msgId): | |||||
reply_num = 1 | |||||
while True: | |||||
try: | |||||
or_result, or_image = cv2.imencode(".jpg", image) | |||||
res_image = self.client.basicGeneral(or_image.tobytes()) | |||||
return res_image | |||||
except Exception as e: | |||||
logger.exception("通用文字识别失败: {}, 当前重试次数:{}, msgId: {}", e, reply_num, msgId) | |||||
time.sleep(1) | |||||
reply_num += 1 | |||||
self.init_client() | |||||
if reply_num > 3: | |||||
logger.exception("通用文字识别失败: {}, msgId: {}", e, msgId) | |||||
raise ServiceException(ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[0], | |||||
ExceptionType.UNIVERSAL_TEXT_RECOGNITION_FAILED.value[1]) | |||||
''' | |||||
{ | |||||
"log_id": 3583925545, | |||||
"words_result": { | |||||
"color": "blue", | |||||
"number": "苏HS7766" | |||||
} | |||||
} | |||||
''' | |||||
def license_plate_recognition(self, image, msgId): | |||||
reply_num = 1 | |||||
while True: | |||||
try: | |||||
or_result, or_image = cv2.imencode(".jpg", image) | |||||
res_image = self.client.licensePlate(or_image.tobytes()) | |||||
return res_image | |||||
except Exception as e: | |||||
logger.exception("车牌识别失败: {}, 当前重试次数:{}, msgId: {}", e, reply_num, msgId) | |||||
time.sleep(1) | |||||
reply_num += 1 | |||||
self.init_client() | |||||
if reply_num > 3: | |||||
logger.exception("车牌识别失败: {}, msgId: {}", e, msgId) | |||||
raise ServiceException(ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[0], | |||||
ExceptionType.ABNORMAL_LICENSE_PLATE_RECOGNITION.value[1]) | |||||
import time | |||||
import datetime | |||||
YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S" | |||||
YMDHMSF = "%Y%m%d%H%M%S%f" | |||||
def generate_timestamp(): | |||||
"""根据当前时间获取时间戳,返回整数""" | |||||
return int(time.time()) | |||||
def now_date_to_str(fmt=None): | |||||
if fmt is None: | |||||
fmt = YY_MM_DD_HH_MM_SS | |||||
return datetime.datetime.now().strftime(fmt) | |||||
if __name__=="__main__": | |||||
print(now_date_to_str(YMDHMSF)) |
import os | |||||
import yaml | |||||
from common import Constant | |||||
# 从配置文件读取所有配置信息 | |||||
def getConfigs(): | |||||
print("开始读取配置文件,获取配置消息:", Constant.APPLICATION_CONFIG) | |||||
applicationConfigPath = os.path.abspath(Constant.APPLICATION_CONFIG) | |||||
if not os.path.exists(applicationConfigPath): | |||||
raise Exception("未找到配置文件:{}".format(Constant.APPLICATION_CONFIG)) | |||||
with open(applicationConfigPath, Constant.R, encoding=Constant.UTF_8) as f: | |||||
file_content = f.read() | |||||
content = yaml.load(file_content, yaml.FullLoader) | |||||
if not content: | |||||
raise Exception("配置项不能为空:{}".format(Constant.APPLICATION_CONFIG)) | |||||
print("读取配置文件完成!") | |||||
return content |