Sfoglia il codice sorgente

上传

tags/V2.8.3^2^2
chenyukun 1 anno fa
parent
commit
e8140672b8
100 ha cambiato i file con 8221 aggiunte e 0 eliminazioni
  1. +40
    -0
      .gitignore
  2. +0
    -0
      __init__.py
  3. BIN
      __pycache__/dsp_master.cpython-38.pyc
  4. +443
    -0
      common/Constant.py
  5. +10
    -0
      common/YmlConstant.py
  6. +0
    -0
      common/__init__.py
  7. BIN
      common/__pycache__/Constant.cpython-310.pyc
  8. BIN
      common/__pycache__/Constant.cpython-38.pyc
  9. BIN
      common/__pycache__/__init__.cpython-310.pyc
  10. BIN
      common/__pycache__/__init__.cpython-38.pyc
  11. +23
    -0
      concurrency/CommonThread.py
  12. +67
    -0
      concurrency/FeedbackThread.py
  13. +280
    -0
      concurrency/FileUploadThread.py
  14. +57
    -0
      concurrency/HeartbeatThread.py
  15. +1348
    -0
      concurrency/IntelligentRecognitionProcess.py
  16. +1097
    -0
      concurrency/IntelligentRecognitionProcess2.py
  17. +153
    -0
      concurrency/Pull2PushStreamProcess.py
  18. +55
    -0
      concurrency/Pull2PushStreamThread.py
  19. +182
    -0
      concurrency/PullStreamThread.py
  20. +329
    -0
      concurrency/PullVideoStreamProcess.py
  21. +348
    -0
      concurrency/PullVideoStreamProcess2.py
  22. +181
    -0
      concurrency/PushStreamThread.py
  23. +201
    -0
      concurrency/PushStreamThread2.py
  24. +401
    -0
      concurrency/PushVideoStreamProcess.py
  25. +412
    -0
      concurrency/PushVideoStreamProcess2.py
  26. +50
    -0
      concurrency/RecordingHeartbeatThread.py
  27. +0
    -0
      concurrency/__init__.py
  28. BIN
      concurrency/__pycache__/CommonThread.cpython-38.pyc
  29. BIN
      concurrency/__pycache__/FeedbackThread.cpython-38.pyc
  30. BIN
      concurrency/__pycache__/FileUploadThread.cpython-38.pyc
  31. BIN
      concurrency/__pycache__/HeartbeatThread.cpython-38.pyc
  32. BIN
      concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc
  33. BIN
      concurrency/__pycache__/PullStreamThread.cpython-38.pyc
  34. BIN
      concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc
  35. BIN
      concurrency/__pycache__/PushStreamThread.cpython-38.pyc
  36. BIN
      concurrency/__pycache__/PushStreamThread2.cpython-38.pyc
  37. BIN
      concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc
  38. BIN
      concurrency/__pycache__/__init__.cpython-38.pyc
  39. +10
    -0
      config/aliyun/dsp_dev_aliyun.yml
  40. +10
    -0
      config/aliyun/dsp_prod_aliyun.yml
  41. +11
    -0
      config/aliyun/dsp_test_aliyun.yml
  42. +12
    -0
      config/baidu/dsp_dev_baidu.yml
  43. +12
    -0
      config/baidu/dsp_prod_baidu.yml
  44. +12
    -0
      config/baidu/dsp_test_baidu.yml
  45. +25
    -0
      config/kafka/dsp_dev_kafka.yml
  46. +22
    -0
      config/kafka/dsp_prod_kafka.yml
  47. +24
    -0
      config/kafka/dsp_test_kafka.yml
  48. +10
    -0
      config/logger/dsp_dev_logger.yml
  49. +10
    -0
      config/logger/dsp_prod_logger.yml
  50. +10
    -0
      config/logger/dsp_test_logger.yml
  51. +30
    -0
      config/service/dsp_dev_service.yml
  52. +30
    -0
      config/service/dsp_prod_service.yml
  53. +30
    -0
      config/service/dsp_test_service.yml
  54. +27
    -0
      dsp_master.py
  55. +53
    -0
      entity/FeedBack.py
  56. +14
    -0
      entity/PullStreamDto.py
  57. +0
    -0
      entity/__init__.py
  58. BIN
      entity/__pycache__/FeedBack.cpython-38.pyc
  59. BIN
      entity/__pycache__/__init__.cpython-38.pyc
  60. +21
    -0
      enums/AnalysisStatusEnum.py
  61. +25
    -0
      enums/AnalysisTypeEnum.py
  62. +188
    -0
      enums/BaiduSdkEnum.py
  63. +86
    -0
      enums/ExceptionEnum.py
  64. +481
    -0
      enums/ModelTypeEnum.py
  65. +676
    -0
      enums/ModelTypeEnum2.py
  66. +18
    -0
      enums/RecordingStatusEnum.py
  67. +33
    -0
      enums/StatusEnum.py
  68. +0
    -0
      enums/__init__.py
  69. BIN
      enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc
  70. BIN
      enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc
  71. BIN
      enums/__pycache__/BaiduSdkEnum.cpython-310.pyc
  72. BIN
      enums/__pycache__/BaiduSdkEnum.cpython-38.pyc
  73. BIN
      enums/__pycache__/ExceptionEnum.cpython-310.pyc
  74. BIN
      enums/__pycache__/ExceptionEnum.cpython-38.pyc
  75. BIN
      enums/__pycache__/ModelTypeEnum.cpython-38.pyc
  76. BIN
      enums/__pycache__/RecordingStatusEnum.cpython-38.pyc
  77. BIN
      enums/__pycache__/__init__.cpython-310.pyc
  78. BIN
      enums/__pycache__/__init__.cpython-38.pyc
  79. +22
    -0
      exception/CustomerException.py
  80. +0
    -0
      exception/__init__.py
  81. BIN
      exception/__pycache__/CustomerException.cpython-310.pyc
  82. BIN
      exception/__pycache__/CustomerException.cpython-38.pyc
  83. BIN
      exception/__pycache__/__init__.cpython-310.pyc
  84. BIN
      exception/__pycache__/__init__.cpython-38.pyc
  85. +0
    -0
      font/__init__.py
  86. BIN
      font/simsun.ttc
  87. BIN
      image/logo.png
  88. +392
    -0
      service/Dispatcher.py
  89. +0
    -0
      service/__init__.py
  90. BIN
      service/__pycache__/Dispatcher.cpython-310.pyc
  91. BIN
      service/__pycache__/Dispatcher.cpython-38.pyc
  92. BIN
      service/__pycache__/__init__.cpython-310.pyc
  93. BIN
      service/__pycache__/__init__.cpython-38.pyc
  94. +3
    -0
      test/__init__.py
  95. BIN
      test/__pycache__/__init__.cpython-38.pyc
  96. +0
    -0
      test/aliyun/__init__.py
  97. BIN
      test/aliyun/aaa.jpeg
  98. +119
    -0
      test/aliyun/ossdemo.py
  99. +128
    -0
      test/aliyun/vod.py
  100. +0
    -0
      test/aliyun/vodTest.py

+ 40
- 0
.gitignore Vedi File

@@ -0,0 +1,40 @@
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/

### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache

### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr

### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/

### VS Code ###
.vscode/

/.idea
/.vscode
/.svn
tuoheng-ui
target/
HELP.md

+ 0
- 0
__init__.py Vedi File


BIN
__pycache__/dsp_master.cpython-38.pyc Vedi File


+ 443
- 0
common/Constant.py Vedi File

@@ -0,0 +1,443 @@
# -*- coding: utf-8 -*-
# 编码格式
UTF_8 = "utf-8"

# 文件读模式
R = 'r'
ON_OR = "_on_or_"
ON_AI = "_on_ai_"
MP4 = ".mp4"
# 初始化进度
init_progess = "0.0000"
# 进度100%
success_progess = "1.0000"

# 拉流每帧图片缩小宽度大小限制, 大于1400像素缩小一半, 小于1400像素不变
width = 1400

COLOR = (
[0, 0, 255],
[255, 0, 0],
[211, 0, 148],
[0, 127, 0],
[0, 69, 255],
[0, 255, 0],
[255, 0, 255],
[0, 0, 127],
[127, 0, 255],
[255, 129, 0],
[139, 139, 0],
[255, 255, 0],
[127, 255, 0],
[0, 127, 255],
[0, 255, 127],
[255, 127, 255],
[8, 101, 139],
[171, 130, 255],
[139, 112, 74],
[205, 205, 180])

ONLINE = "online"
OFFLINE = "offline"
PHOTO = "photo"
RECORDING = "recording"

ONLINE_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"models": {
'type': 'list',
'required': True,
'nullable': False,
'minlength': 1,
'maxlength': 3,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}

ONLINE_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}

OFFLINE_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"models": {
'type': 'list',
'required': True,
'maxlength': 3,
'minlength': 1,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}

OFFLINE_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}

IMAGE_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
},
"image_urls": {
'type': 'list',
'required': True,
'minlength': 1,
'schema': {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 5000
}
},
"models": {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"code": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "categories",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"is_video": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"is_image": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "code",
'allowed': ["0", "1"]
},
"categories": {
'type': 'list',
'required': True,
'dependencies': "code",
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{0,255}$'},
"config": {
'type': 'dict',
'required': False,
'dependencies': "id",
}
}
}
}
}
}
}
}

RECORDING_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'maxlength': 255
},
"push_url": {
'type': 'string',
'required': False,
'empty': True,
'maxlength': 255
},
"logo_url": {
'type': 'string',
'required': False,
'nullable': True,
'maxlength': 255
}
}

RECORDING_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["stop"]
}
}

PULL2PUSH_START_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start"]
},
"video_urls": {
'type': 'list',
'required': True,
'nullable': False,
'schema': {
'type': 'dict',
'required': True,
'schema': {
"id": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "pull_url",
'regex': r'^[a-zA-Z0-9]{1,255}$'
},
"pull_url": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "push_url",
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$'
},
"push_url": {
'type': 'string',
'required': True,
'empty': False,
'dependencies': "id",
'regex': r'^(https|http|rtsp|rtmp|artc|webrtc|ws)://\w.+$'
}
}
}
}
}
PULL2PUSH_STOP_SCHEMA = {
"request_id": {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,36}$'
},
"command": {
'type': 'string',
'required': True,
'allowed': ["start", "stop"]
},
"video_ids": {
'type': 'list',
'required': False,
'nullable': True,
'schema': {
'type': 'string',
'required': True,
'empty': False,
'regex': r'^[a-zA-Z0-9]{1,255}$'
}
}
}

+ 10
- 0
common/YmlConstant.py Vedi File

@@ -0,0 +1,10 @@
# -*- coding: utf-8 -*-

# 服务配置路径
service_yml_path = 'config/service/dsp_%s_service.yml'
# kafka配置路径
kafka_yml_path = 'config/kafka/dsp_%s_kafka.yml'
# 阿里云配置路径
aliyun_yml_path = "config/aliyun/dsp_%s_aliyun.yml"
# 百度配置路径
baidu_yml_path = 'config/baidu/dsp_%s_baidu.yml'

+ 0
- 0
common/__init__.py Vedi File


BIN
common/__pycache__/Constant.cpython-310.pyc Vedi File


BIN
common/__pycache__/Constant.cpython-38.pyc Vedi File


BIN
common/__pycache__/__init__.cpython-310.pyc Vedi File


BIN
common/__pycache__/__init__.cpython-38.pyc Vedi File


+ 23
- 0
concurrency/CommonThread.py Vedi File

@@ -0,0 +1,23 @@
from threading import Thread
from loguru import logger


class Common(Thread):

__slots__ = ('__func', '__param1', '__param2', '__result')

def __init__(self, func, param1, param2):
super(Common, self).__init__()
self.__func = func
self.__param1 = param1
self.__param2 = param2
self.__result = None

def get_result(self):
self.join()
return self.__result

def run(self):
logger.info("开始执行线程!")
self.__result = self.__func(self.__param1, self.__param2)
logger.info("线程停止完成!")

+ 67
- 0
concurrency/FeedbackThread.py Vedi File

@@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
import time
from threading import Thread
from traceback import format_exc

from loguru import logger

from util.KafkaUtils import CustomerKafkaProducer

'''
问题反馈线程
'''


class FeedbackThread(Thread):
__slots__ = ('__fbQueue', '__kafka_config')

def __init__(self, fbQueue, kafka_config):
super().__init__()
self.__fbQueue = fbQueue
self.__kafka_config = kafka_config

'''
阻塞获取反馈消息
'''

def getFeedback(self):
return self.__fbQueue.get()

def run(self):
logger.info("启动问题反馈线程")
kafkaProducer = CustomerKafkaProducer(self.__kafka_config)
dsp_alg_results_topic = self.__kafka_config["topic"]["dsp-alg-results-topic"]
dsp_recording_result_topic = self.__kafka_config["topic"]["dsp-recording-result-topic"]
dsp_push_stream_result_topic = self.__kafka_config["topic"]["dsp-push-stream-result-topic"]
while True:
logger.info("问题反馈发送消息循环")
feedback = None
recording = None
pull_stream = None
try:
fb = self.getFeedback()
if fb is not None and len(fb) > 0:
feedback = fb.get("feedback")
recording = fb.get("recording")
pull_stream = fb.get("pull_stream")
if feedback is not None and len(feedback) > 0:
kafkaProducer.sender(dsp_alg_results_topic, feedback["request_id"], feedback, 1)
feedback = None
if recording is not None and len(recording) > 0:
kafkaProducer.sender(dsp_recording_result_topic, recording["request_id"], recording, 1)
recording = None
if pull_stream is not None and len(pull_stream) > 0:
kafkaProducer.sender(dsp_push_stream_result_topic, pull_stream["request_id"], pull_stream, 1)
pull_stream = None
else:
time.sleep(1)
except Exception:
if feedback and feedback.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), feedback.get("request_id"))
elif recording and recording.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), recording.get("request_id"))
elif pull_stream and pull_stream.get("request_id"):
logger.error("问题反馈异常:{}, requestId:{}", format_exc(), pull_stream.get("request_id"))
else:
logger.error("问题反馈异常:{}", format_exc())
logger.info("问题反馈线程执行完成")

+ 280
- 0
concurrency/FileUploadThread.py Vedi File

@@ -0,0 +1,280 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from threading import Thread
from time import sleep, time
from traceback import format_exc

from loguru import logger
import cv2

from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.AliyunSdk import AliyunOssSdk
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue, get_no_block_queue, clear_queue


class FileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')

def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args


class ImageFileUpload(FileUpload):
__slots__ = ()

@staticmethod
def handle_image(frame_msg, frame_step):
det_xywh, frame, current_frame, all_frames, font_config = frame_msg
'''
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号:modeCode
检测目标:detectTargetCode
'''
model_info = []
# 更加模型编码解析数据
for code, det_list in det_xywh.items():
if len(det_list) > 0:
for cls, target_list in det_list.items():
if len(target_list) > 0:
aFrame = frame.copy()
for target in target_list:
# detect_targets_code, box, score, label_array, color
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config)
model_info.append({"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame})
if len(model_info) > 0:
image_result = {
"or_frame": frame,
"model_info": model_info,
"current_frame": current_frame,
"last_frame": current_frame + frame_step
}
return image_result
return None

def run(self):
msg, context = self._msg, self._context
service = context["service"]
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(service["timeout"])
frame_step = int(service["filter"]["frame_step"]) + 120
try:
with ThreadPoolExecutor(max_workers=2) as t:
# 初始化oss客户端
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("图片上线线程运行超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 获取队列中的消息
image_msg = get_no_block_queue(image_queue)
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
image_result = self.handle_image(image_msg[1], frame_step)
if image_result is not None:
task = []
or_image = cv2.imencode(".jpg", image_result["or_frame"])[1]
or_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"OR", "0", "0", request_id)
or_future = t.submit(aliyunOssSdk.put_object, or_image_name, or_image.tobytes())
task.append(or_future)
model_info_list = image_result["model_info"]
msg_list = []
for model_info in model_info_list:
ai_image = cv2.imencode(".jpg", model_info["aFrame"])[1]
ai_image_name = build_image_name(image_result["current_frame"],
image_result["last_frame"],
analyse_type,
"AI",
model_info["modelCode"],
model_info["detectTargetCode"],
request_id)
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
or_image_name,
ai_image_name,
model_info['modelCode'],
model_info['detectTargetCode']))
for tk in task:
tk.result()
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
del task, msg_list
else:
sleep(1)
del image_msg
except Exception:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
finally:
logger.info("停止图片上传线程0, requestId:{}", request_id)
clear_queue(image_queue)
logger.info("停止图片上传线程1, requestId:{}", request_id)


def build_image_name(*args):
"""
{requestId}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}" \
"-{modeCode}-{target}_{image_type}.jpg
"""
current_frame, last_frame, mode_type, image_type, modeCode, target, request_id = args
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
return "%s/%s_frame-%s-%s_type_%s-%s-%s-%s_%s.jpg" % (request_id, time_now, current_frame, last_frame,
random_num, mode_type, modeCode, target, image_type)


class ImageTypeImageFileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg', 'ex')

def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self.ex = None

@staticmethod
def handle_image(det_xywh, copy_frame, font_config):
"""
det_xywh:{
'code':{
1: [[detect_targets_code, box, score, label_array, color]]
}
}
模型编号:modeCode
检测目标:detectTargetCode
"""
model_info = []
# 更加模型编码解析数据
for code in list(det_xywh.keys()):
# 模型编号下面的检测目标对象
det_info = det_xywh[code]
if len(det_info) > 0:
for cls in list(det_info.keys()):
target_list = det_info.get(cls)
if len(target_list) > 0:
aiFrame = copy_frame.copy()
for target in target_list:
draw_painting_joint(target[1], aiFrame, target[3], target[2], target[4], font_config)
model_info.append({
"modelCode": str(code),
"detectTargetCode": str(cls),
"frame": aiFrame
})
if len(model_info) > 0:
image_result = {
"or_frame": copy_frame,
"model_info": model_info,
"current_frame": 0,
"last_frame": 0
}
return image_result
return None

def run(self):
context, msg = self._context, self._msg
base_dir, env, request_id = context["base_dir"], context["env"], msg["request_id"]
logger.info("启动图片识别图片上传线程, requestId: {}", request_id)
image_queue, fb_queue, analyse_type = self._image_queue, self._fb_queue, self._analyse_type
service_timeout = int(context["service"]["timeout"])
with ThreadPoolExecutor(max_workers=2) as t:
try:
# 初始化oss客户端
aliyunOssSdk = AliyunOssSdk(base_dir, env, request_id)
start_time = time()
while True:
try:
if time() - start_time > service_timeout:
logger.error("拉流进程运行超时, requestId: {}", request_id)
break
# 获取队列中的消息
image_msg = image_queue.get()
if image_msg is not None:
if image_msg[0] == 2:
if 'stop' == image_msg[1]:
logger.info("开始停止图片上传线程, requestId:{}", request_id)
break
if image_msg[0] == 1:
task, msg_list = [], []
det_xywh, image_url, copy_frame, font_config, result = image_msg[1]
if det_xywh is None:
ai_image_name = build_image_name(0, 0, analyse_type, "AI", result.get("modelCode"),
result.get("type"), request_id)
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name, copy_frame)
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
result.get("modelCode"),
result.get("type"),
analyse_results=result))
else:
image_result = self.handle_image(det_xywh, copy_frame, font_config)
if image_result:
# 图片帧数编码
if image_url is None:
or_result, or_image = cv2.imencode(".jpg", image_result.get("or_frame"))
image_url = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"OR", "0", "O", request_id)
or_future = t.submit(aliyunOssSdk.put_object, image_url,
or_image.tobytes())
task.append(or_future)
model_info_list = image_result.get("model_info")
for model_info in model_info_list:
ai_result, ai_image = cv2.imencode(".jpg", model_info.get("frame"))
ai_image_name = build_image_name(image_result.get("current_frame"),
image_result.get("last_frame"),
analyse_type,
"AI",
model_info.get("modelCode"),
model_info.get("detectTargetCode"),
request_id)
ai_future = t.submit(aliyunOssSdk.put_object, ai_image_name,
ai_image.tobytes())
task.append(ai_future)
msg_list.append(message_feedback(request_id,
AnalysisStatus.RUNNING.value,
analyse_type, "", "", "",
image_url,
ai_image_name,
model_info.get('modelCode'),
model_info.get('detectTargetCode'),
analyse_results=result))
for thread_result in task:
thread_result.result()
for msg in msg_list:
put_queue(fb_queue, msg, timeout=2, is_ex=False)
else:
sleep(1)
except Exception as e:
logger.error("图片上传异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
clear_queue(image_queue)
logger.info("停止图片识别图片上传线程, requestId:{}", request_id)

+ 57
- 0
concurrency/HeartbeatThread.py Vedi File

@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
from threading import Thread
from time import sleep, time
from traceback import format_exc

from loguru import logger

from common.Constant import init_progess
from enums.AnalysisStatusEnum import AnalysisStatus
from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.QueUtil import get_no_block_queue, put_queue, clear_queue


class Heartbeat(Thread):
__slots__ = ('__fb_queue', '__hb_queue', '__request_id', '__analyse_type', "_context")

def __init__(self, *args):
super().__init__()
self.__fb_queue, self.__hb_queue, self.__request_id, self.__analyse_type, self._context = args

def run(self):
request_id, hb_queue, progress = self.__request_id, self.__hb_queue, init_progess
analyse_type, fb_queue = self.__analyse_type, self.__fb_queue
service_timeout = int(self._context["service"]["timeout"]) + 120
try:
logger.info("开始启动心跳线程!requestId:{}", request_id)
start_time = time()
hb_init_num = 0
while True:
if time() - start_time > service_timeout:
logger.error("心跳运行超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
sleep(3)
hb_msg = get_no_block_queue(hb_queue)
if hb_msg is not None:
command = hb_msg.get("command")
hb_value = hb_msg.get("hb_value")
if 'stop' == command:
logger.info("开始终止心跳线程, requestId:{}", request_id)
break
if hb_value is not None:
progress = hb_value
if hb_init_num % 30 == 0:
hb_init_num = 0
put_queue(fb_queue, message_feedback(request_id, AnalysisStatus.RUNNING.value, analyse_type,
progress=progress), timeout=3, is_ex=True)

hb_init_num += 3
del hb_msg
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(hb_queue)
logger.info("心跳线程停止完成!requestId:{}", request_id)

+ 1348
- 0
concurrency/IntelligentRecognitionProcess.py
File diff soppresso perché troppo grande
Vedi File


+ 1097
- 0
concurrency/IntelligentRecognitionProcess2.py
File diff soppresso perché troppo grande
Vedi File


+ 153
- 0
concurrency/Pull2PushStreamProcess.py Vedi File

@@ -0,0 +1,153 @@
# -*- coding: utf-8 -*-
import time
from traceback import format_exc

from multiprocessing import Process, Queue

from loguru import logger

from concurrency.Pull2PushStreamThread import PushSteamThread
from enums.StatusEnum import PushStreamStatus, ExecuteStatus
from util.LogUtils import init_log

from enums.ExceptionEnum import ExceptionType
from entity.FeedBack import pull_stream_feedback
from exception.CustomerException import ServiceException
from util.QueUtil import get_no_block_queue, put_queue


class PushStreamProcess(Process):
__slots__ = ('_fb_queue', 'event_queue', '_context', '_msg', '_analysisType')

def __init__(self, *args):
super().__init__()
self._fb_queue, self._context, self._msg, self._analysisType = args
self.event_queue = Queue()

def sendEvent(self, eBody):
try:
self.event_queue.put(eBody, timeout=2)
except Exception:
logger.error("添加事件到队列超时异常:{}, requestId:{}", format_exc(), self._msg["request_id"])
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])

def run(self):
msg, context = self._msg, self._context
requestId, videoUrls = msg["request_id"], msg["video_urls"]
base_dir, env = context['base_dir'], context['env']
fb_queue = self._fb_queue
task, videoStatus = {}, {}
ex = None
try:
init_log(base_dir, env)
if videoUrls is None or len(videoUrls) == 0:
raise ServiceException(ExceptionType.PUSH_STREAM_URL_IS_NULL.value[0],
ExceptionType.PUSH_STREAM_URL_IS_NULL.value[1])
if len(videoUrls) > 5:
logger.error("推流数量超过限制, 当前推流数量: {}, requestId:{}", len(videoUrls), requestId)
raise ServiceException(ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[0],
ExceptionType.PULL_STREAM_NUM_LIMIT_EXCEPTION.value[1])
videoInfo = [{"id": url["id"], "status": PushStreamStatus.WAITING.value[0]} for url in videoUrls if
url.get("id")]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.WAITING.value[0], "", "", videoInfo))
for videoUrl in videoUrls:
pushThread = PushSteamThread(videoUrl["pull_url"], videoUrl["push_url"], requestId, videoUrl["id"])
pushThread.start()
task[videoUrl["id"]] = pushThread
enable_time = time.time()
for video in videoInfo:
videoStatus[video.get("id")] = video.get("status")
count = 0
while True:
# 整个推流任务超时时间
if time.time() - enable_time > 43200:
logger.error("任务执行超时, requestId:{}", requestId)
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.TIMEOUT.value[0]
videoInfo_timeout = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1],
videoInfo_timeout))
break
# 接受停止指令
event_result = get_no_block_queue(self.event_queue)
if event_result is not None:
command = event_result.get("command")
videoIds = event_result.get("videoIds")
if "stop" == command:
# 如果videoIds是空停止所有任务
if videoIds is None or len(videoIds) == 0:
logger.info("停止所有执行的推流任务, requestId:{}", requestId)
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.SUCCESS.value[0]
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "", "",
videoInfo_sucess))
break
else:
logger.info("停止指定的推流任务, requestId:{}", requestId)
alive_thread = 0
for t in list(task.keys()):
if task[t].is_alive():
if t in videoIds:
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.SUCCESS.value[0]
else:
alive_thread += 1
if alive_thread == 0:
videoInfo_sucess = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.SUCCESS.value[0], "",
"", videoInfo_sucess))
break
for t in list(task.keys()):
if task[t].status and not task[t].is_alive():
videoStatus[t] = PushStreamStatus.FAILED.value[0]
logger.error("检测到推流线程异常停止!videoId:{}, requestId:{}", t, requestId)
if task[t].ex:
raise task[t].ex
raise Exception("检测到推流线程异常停止!")
if task[t].is_alive():
videoStatus[t] = task[t].excute_status
if count % 10 == 0:
videoInfo_hb = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.RUNNING.value[0], "", "",
videoInfo_hb))
count = 0
count += 1
time.sleep(1)
except ServiceException as s:
ex = s.code, s.msg
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, requestId)
except Exception:
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
logger.error("服务异常: {}, requestId: {},", format_exc(), requestId)
finally:
if ex:
errorCode, errorMsg = ex
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
videoStatus[t] = PushStreamStatus.FAILED.value[0]
videoInfo_ex = [{"id": k, "status": v} for k, v in videoStatus.items()]
put_queue(fb_queue, pull_stream_feedback(requestId, ExecuteStatus.FAILED.value[0], errorCode, errorMsg,
videoInfo_ex))
for t in list(task.keys()):
if task[t].is_alive():
task[t].status = False
task[t].pushStreamUtil.close_push_stream_sp()
task[t].join(120)
logger.info("推流任务完成, requestId: {}", requestId)

+ 55
- 0
concurrency/Pull2PushStreamThread.py Vedi File

@@ -0,0 +1,55 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from traceback import format_exc

from loguru import logger

from enums.StatusEnum import PushStreamStatus
from exception.CustomerException import ServiceException
from util.PushStreamUtils import PushStreamUtil


class PushSteamThread(Thread):
__slots__ = ("pushStreamUtil", "requestId", "videoId", "status", "ex")

def __init__(self, pullUrl, pushUrl, requestId, videoId):
super().__init__()
self.pushStreamUtil = PushStreamUtil(pullUrl, pushUrl, requestId)
self.requestId = requestId
self.videoId = videoId
self.status = True
self.excute_status = PushStreamStatus.WAITING.value[0]
self.ex = None

def run(self):
logger.info("开始启动推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId)
while True:
try:
self.pushStreamUtil.start_push_stream()
self.excute_status = PushStreamStatus.RUNNING.value[0]
out, err = self.pushStreamUtil.push_stream_sp.communicate()
# 异常断流
if self.status:
logger.warning("推流异常,请检测拉流地址和推流地址是否正常!")
if self.pushStreamUtil.push_stream_sp.returncode != 0:
logger.error("推流异常:{}, 视频id: {}, requestId:{}", err.decode(), self.videoId,
self.requestId)
self.excute_status = PushStreamStatus.RETRYING.value[0]
self.pushStreamUtil.close_push_stream_sp()
time.sleep(5)
# 手动断流
if not self.status:
self.pushStreamUtil.close_push_stream_sp()
break
except ServiceException as s:
logger.error("异常: {}, 视频id: {}, requestId:{}", s.msg, self.videoId, self.requestId)
self.pushStreamUtil.close_push_stream_sp()
self.ex = s
break
except Exception as e:
logger.error("异常:{}, 视频id: {}, requestId:{}", format_exc(), self.videoId, self.requestId)
self.pushStreamUtil.close_push_stream_sp()
self.ex = e
break
logger.info("结束推流线程, 视频id: {}, requestId:{}", self.videoId, self.requestId)

+ 182
- 0
concurrency/PullStreamThread.py Vedi File

@@ -0,0 +1,182 @@
# -*- coding: utf-8 -*-
from queue import Queue
from threading import Thread
from time import time, sleep
from traceback import format_exc

from loguru import logger

from enums.ExceptionEnum import ExceptionType
from enums.RecordingStatusEnum import RecordingStatus
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, clear_pull_p, build_video_info2, pull_read_video_stream2
from util.QueUtil import put_queue, get_no_block_queue, clear_queue, put_queue_result


class PullStreamThread(Thread):
__slots__ = ('_command', '_pull_queue', '_hb_queue', '_fb_queue', '_msg', '_context')

def __init__(self, *args):
super().__init__()
self._msg, self._context, self._pull_queue, self._hb_queue, self._fb_queue, self._frame_num = args
self._command = Queue()

def sendEvent(self, result):
put_queue(self._command, result, timeout=10, is_ex=False)


class RecordingPullStreamThread(PullStreamThread):

def run(self):
msg, context, frame_num = self._msg, self._context, self._frame_num
request_id, pull_url = msg["request_id"], msg['pull_url']
service = context["service"]
pull_stream_timeout = int(service["recording_pull_stream_timeout"])
read_stream_timeout = int(service["cv2_read_stream_timeout"])
service_timeout = int(service["timeout"])
command_queue, pull_queue, fb_queue, hb_queue = self._command, self._pull_queue, self._fb_queue, self._hb_queue
width, height, width_height_3, all_frames, w, h = None, None, None, 0, None, None
read_start_time, pull_p, ex = None, None, None
frame_list, frame_index_list = [], []
stop_ex = True
pull_stream_start_time = time()
try:
logger.info("录屏拉流线程开始启动, requestId: {}", request_id)
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time = time()
while True:
# 检查任务是否超时
if time() - start_time > service_timeout:
logger.error("录屏拉流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 最终停止拉流
event = get_no_block_queue(command_queue)
if event is not None:
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出
if 'stop' == event.get("command"):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
logger.info("录屏拉流线程开始停止, requestId: {}", request_id)
break
# 主进程异常,停止子线程
if 'stop_ex' == event.get("command"):
logger.info("录屏异常拉开始停止拉流线程, requestId: {}", request_id)
stop_ex = False
break
# 如果是离线拉流
if pull_url.startswith('http'):
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
# 当是离线地址重试3次还是拉取不到视频流,关闭拉流管道,返回失败信息
if cv2_init_num > 3:
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id)
if width is not None:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=2)
else:
if cv2_init_num < 2:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=2)
continue
# 当离线视频时, 队列满了, 等待1秒后再试
if pull_queue.full():
logger.info("pull拉流队列满了: {}, requestId: {}", pull_queue.qsize(), request_id)
sleep(1)
continue
# 如果是实时拉流
else:
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
pull_stream_init_timeout = time() - pull_stream_start_time
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
if pull_stream_init_timeout > pull_stream_timeout:
logger.error("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w, h = build_video_info2(pull_url, request_id)
if width is not None:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RUNNING.value[0]}, timeout=1)
else:
if cv2_init_num < 3:
put_queue(hb_queue, {"status": RecordingStatus.RECORDING_RETRYING.value[0]}, timeout=1)
sleep(1)
continue
pull_stream_start_time = time()
cv2_init_num = 1
frame, pull_p, width, height = pull_read_video_stream2(pull_p, pull_url, width, height,
width_height_3, w, h, request_id)
if frame is None:
if pull_url.startswith('http'):
clear_pull_p(pull_p, request_id)
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束:requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
else:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
continue
init_pull_num = 1
read_start_time = None
if pull_queue.full():
sleep(1)
logger.info("pull拉流队列满了:{}, requestId: {}", pull_queue.qsize(), request_id)
continue
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
ex = s.code, s.msg
except Exception:
logger.exception("实时拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
if stop_ex:
if ex:
error_code, error_msg = ex
result = put_queue_result(pull_queue, (1, error_code, error_msg), timeout=3)
else:
result = put_queue_result(pull_queue, (2,), timeout=3)
if result:
# 3分钟超时时间
cr_time = time()
while time() - cr_time < 180:
event = get_no_block_queue(command_queue)
if event is not None:
# 当接收到停止指令,说明不会再处理视频帧了, 直接退出
if 'stop' == event.get("command"):
logger.info("录屏拉流线程开始停止, requestId: {}", request_id)
break
sleep(1)
clear_queue(command_queue)
clear_queue(pull_queue)
clear_queue(hb_queue)
del frame_list, frame_index_list
logger.info("录屏拉流线程结束, requestId: {}", request_id)

+ 329
- 0
concurrency/PullVideoStreamProcess.py Vedi File

@@ -0,0 +1,329 @@
# -*- coding: utf-8 -*-
import os
from multiprocessing import Process, Queue
from os import getpid
from time import time, sleep
from traceback import format_exc

import psutil
from loguru import logger

from util.LogUtils import init_log
from concurrency.FileUploadThread import ImageFileUpload
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result


class PullVideoStreamProcess(Process):
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type",
"_frame_num")

def __init__(self, *args):
super().__init__()
# 自带参数
self._command_queue = Queue()

# 传参
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \
self._frame_num = args

def sendCommand(self, result):
put_queue(self._command_queue, result, timeout=2, is_ex=True)

@staticmethod
def start_File_upload(fb_queue, context, msg, image_queue, analyse_type):
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
image_thread.setDaemon(True)
image_thread.start()
return image_thread

@staticmethod
def check(start_time, service_timeout, request_id, image_thread):
if time() - start_time > service_timeout:
logger.error("拉流进程运行超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 检测图片上传线程是否正常运行
if image_thread and not image_thread.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id)
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")


class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()

def run(self):
# 避免循环调用性能影响, 优先赋值
context, msg, analyse_type, frame_num = self._context, self._msg, self._analyse_type, self._frame_num
base_dir, env, service = context['base_dir'], context['env'], context["service"]
request_id, pull_url = msg["request_id"], msg["pull_url"]
pull_stream_timeout, read_stream_timeout, service_timeout = int(service["cv2_pull_stream_timeout"]), \
int(service["cv2_read_stream_timeout"]), int(service["timeout"]) + 120
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, ex = None, None
width, height, width_height_3, all_frames, w_2, h_2, pull_p = None, None, None, 0, None, None, None
frame_list, frame_index_list = [], []
ex_status = True
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启启动实时视频拉流进程, requestId:{}", request_id)
# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time, pull_stream_start_time, read_start_time, full_timeout = time(), None, None, None
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if pull_stream_start_time is None:
pull_stream_start_time = time()
pull_stream_init_timeout = time() - pull_stream_start_time
if pull_stream_init_timeout > pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
# 如果超时了, 将异常信息发送给主进程,如果队列满了,抛出异常
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
if width is None:
sleep(1)
continue
pull_stream_start_time, cv2_init_num = None, 1
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("拉流队列阻塞超时, 请检查父进程是否正常!requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id, AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]), timeout=2)
break
continue
full_timeout = None
if frame is None:
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
continue
init_pull_num, read_start_time = 1, None
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 60:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)


class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
__slots__ = ()

def run(self):
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"]
ex, service_timeout, full_timeout = None, int(context["service"]["timeout"]) + 120, None
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex_status = True
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启离线视频拉流进程, requestId:{}", request_id)

# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)

# 初始化拉流工具类
cv2_init_num, concurrent_frame = 0, 1
start_time = time()
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if cv2_init_num > 3:
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
continue
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("pull队列阻塞超时,请检测父进程是否正常!requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]), timeout=2)
break
continue
full_timeout = None
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height,
width_height_3, w_2, h_2, request_id)
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
# 允许100帧的误差
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束:requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("离线拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("离线拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)

+ 348
- 0
concurrency/PullVideoStreamProcess2.py Vedi File

@@ -0,0 +1,348 @@
# -*- coding: utf-8 -*-
import os
from multiprocessing import Process, Queue
from os import getpid
from time import time, sleep
from traceback import format_exc

import psutil
from loguru import logger

from util.LogUtils import init_log
from concurrency.FileUploadThread import ImageFileUpload
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import check_video_stream, build_video_info, pull_read_video_stream, clear_pull_p
from util.QueUtil import get_no_block_queue, put_queue, clear_queue, put_queue_result


class PullVideoStreamProcess2(Process):
__slots__ = ("_command_queue", "_msg", "_context", "_fb_queue", "_pull_queue", "_image_queue", "_analyse_type",
"_frame_num")

def __init__(self, *args):
super().__init__()
# 自带参数
self._command_queue = Queue()

# 传参
self._msg, self._context, self._fb_queue, self._pull_queue, self._image_queue, self._analyse_type, \
self._frame_num = args

def sendCommand(self, result):
try:
self._command_queue.put(result, timeout=10)
except Exception:
logger.error("添加队列超时异常:{}, requestId:{}", format_exc(), self._msg.get("request_id"))
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])

@staticmethod
def start_File_upload(*args):
fb_queue, context, msg, image_queue, analyse_type = args
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
image_thread.setDaemon(True)
image_thread.start()
return image_thread

@staticmethod
def check(start_time, service_timeout, request_id, image_thread):
if time() - start_time > service_timeout:
logger.error("分析超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
# 检测图片上传线程是否正常运行
if image_thread is not None and not image_thread.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, requestId:{}", request_id)
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")


class OnlinePullVideoStreamProcess2(PullVideoStreamProcess2):
__slots__ = ()

def run(self):
# 避免循环调用性能影响, 优先赋值
context, msg, analyse_type = self._context, self._msg, self._analyse_type
request_id, base_dir, env = msg["request_id"], context['base_dir'], context['env']
pull_url, frame_num = msg["pull_url"], self._frame_num
pull_stream_timeout = int(context["service"]["cv2_pull_stream_timeout"])
read_stream_timeout = int(context["service"]["cv2_read_stream_timeout"])
service_timeout = int(context["service"]["timeout"])
command_queue, pull_queue, image_queue = self._command_queue, self._pull_queue, self._image_queue
fb_queue = self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex = None
ex_status = True
full_timeout = None
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启实时视频拉流进程, requestId:{}", request_id)

# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)

# 初始化拉流工具类
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time, pull_start_time, read_start_time = time(), None, None
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if pull_start_time is None:
pull_start_time = time()
pull_stream_init_timeout = time() - pull_start_time
if pull_stream_init_timeout > pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout, request_id)
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
if width is None:
sleep(1)
continue
pull_start_time, cv2_init_num = None, 1
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if frame is None:
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, request_id)
if read_start_time is None:
read_start_time = time()
pull_stream_read_timeout = time() - read_start_time
if pull_stream_read_timeout > read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
init_pull_num += 1
continue
init_pull_num, read_start_time = 1, None
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 180:
logger.error("拉流队列阻塞异常, requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]))
break
continue
full_timeout = None
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", format_exc(), pull_queue.qsize(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止实时拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("实时拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)


class OfflinePullVideoStreamProcess2(PullVideoStreamProcess2):
__slots__ = ()

def run(self):
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"]
ex, service_timeout = None, int(context["service"]["timeout"])
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue
image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], []
ex_status = True
full_timeout = None
try:
# 初始化日志
init_log(base_dir, env)
logger.info("开启离线视频拉流进程, requestId:{}", request_id)

# 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)

# 初始化拉流工具类
cv2_init_num = 0
concurrent_frame = 1
start_time = time()
while True:
# 检测任务执行是否超时、图片上传线程是否正常
self.check(start_time, service_timeout, request_id, image_thread)
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
break
if 'stop_ex' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
ex_status = False
break
# 检测视频信息是否存在或拉流对象是否存在
if check_video_stream(width, height):
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1)
frame_list, frame_index_list = [], []
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, request_id)
if cv2_init_num > 3:
clear_pull_p(pull_p, request_id)
logger.info("离线拉流重试失败, 重试次数: {}, requestId: {}", cv2_init_num, request_id)
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
sleep(1)
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
continue
if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None:
full_timeout = time()
if time() - full_timeout > 300:
logger.error("拉流队列阻塞超时, 请检查父进程是否正常!requestId: {}", request_id)
raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
if psutil.Process(getpid()).ppid() == 1:
clear_pull_p(pull_p, request_id)
ex_status = False
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
image_thread.join(120)
logger.info("检测到父进程异常停止, 请检测服务器资源是否负载过高, requestId: {}", request_id)
put_queue(self._fb_queue, message_feedback(request_id,
AnalysisStatus.FAILED.value,
self._analyse_type,
ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1]))
break
continue
full_timeout = None
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id)
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", all_frames, concurrent_frame, request_id)
clear_pull_p(pull_p, request_id)
if len(frame_list) > 0:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=2, is_ex=False)
frame_list, frame_index_list = [], []
# 允许100帧的误差
if concurrent_frame < all_frames - 100:
logger.info("离线拉流异常结束:requestId: {}", request_id)
raise ServiceException(ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id)
break
frame_list.append(frame)
frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
frame_list, frame_index_list = [], []
concurrent_frame += 1
del frame
except ServiceException as s:
logger.error("实时拉流异常: {}, 队列大小:{}, requestId:{}", s.msg, pull_queue.qsize(), request_id)
ex = s.code, s.msg
except Exception:
logger.error("实时拉流异常: {}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
clear_pull_p(pull_p, request_id)
del frame_list, frame_index_list
if ex_status:
if ex:
code, msg = ex
r = put_queue_result(pull_queue, (1, code, msg), timeout=10)
else:
r = put_queue_result(pull_queue, (2,), timeout=10)
if r:
c_time = time()
while time() - c_time < 180:
command_msg = get_no_block_queue(command_queue)
if command_msg is not None:
if 'stop' == command_msg.get("command"):
logger.info("开始停止离线拉流进程, requestId:{}", request_id)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
break
for q in [command_queue, pull_queue, image_queue]:
clear_queue(q)
if image_thread and image_thread.is_alive():
put_queue(image_queue, (2, "stop"), timeout=1)
logger.info("停止图片上传线程, requestId:{}", request_id)
image_thread.join(120)
logger.info("停止图片上传线程结束, requestId:{}", request_id)
logger.info("离线拉流线程结束, 图片队列: {}, 拉流队列: {}, 图片进程的状态: {} requestId: {}",
image_queue.qsize(), pull_queue.qsize(), image_thread.is_alive(), request_id)

+ 181
- 0
concurrency/PushStreamThread.py Vedi File

@@ -0,0 +1,181 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor
from os.path import join
from threading import Thread
from traceback import format_exc

import cv2
import numpy as np
from loguru import logger
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing
from util.ImageUtils import url2Array, add_water_pic
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue


class OnPushStreamThread(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')

def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)

def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo
or_video_file, ai_video_file, push_p = None, None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_parm = push_queue.get()
if push_parm is not None:
# [(1, 原视频帧, 分析视频帧)]
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# [(2, 操作指令)]
if push_parm[0] == 1: # 视频帧操作
frame, current_frame, all_frames, ques_list = push_parm[1]
copy_frame = frame.copy()
det_xywh = {}
if len(ques_list) > 0:
for qs in ques_list:
det_xywh[qs[1]] = {}
detect_targets_code = int(qs[0][0])
score = qs[0][-1]
label_array = qs[3][detect_targets_code]
color = qs[4][detect_targets_code]
if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[0][1]
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
cd = det_xywh[qs[1]].get(detect_targets_code)
if cd is None:
det_xywh[qs[1]][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[qs[1]][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
or_video_file = write_or_video_result.result()
if push_parm[0] == 2:
if 'stop' == push_parm[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, or_video_file, ai_video_file, request_id)
or_video_file, ai_video_file, push_p = None, None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, or_video_file, ai_video_file, request_id)
logger.info("推流线程停止完成!requestId:{}", request_id)


class OffPushStreamThread(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')

def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)

def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
aiFilePath, logo = self._context.get("aiFilePath"), self._logo
ai_video_file, push_p = None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=1) as t:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_parm = push_queue.get()
if push_parm is not None:
# [(1, 原视频帧, 分析视频帧)]
# # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# [(2, 操作指令)]
if push_parm[0] == 1: # 视频帧操作
frame, current_frame, all_frames, ques_list = push_parm[1]
copy_frame = frame.copy()
det_xywh = {}
if len(ques_list) > 0:
for qs in ques_list:
det_xywh[qs[1]] = {}
detect_targets_code = int(qs[0][0])
score = qs[0][-1]
label_array = qs[3][detect_targets_code]
color = qs[4][detect_targets_code]
if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[0][1]
draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
cd = det_xywh[qs[1]].get(detect_targets_code)
if cd is None:
det_xywh[qs[1]][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[qs[1]][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
if push_parm[0] == 2:
if 'stop' == push_parm[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, None, ai_video_file, request_id)
ai_video_file, push_p = None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, None, ai_video_file, request_id)
logger.info("推流线程停止完成!requestId:{}", request_id)

+ 201
- 0
concurrency/PushStreamThread2.py Vedi File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
from os.path import join
from threading import Thread
from traceback import format_exc

import cv2
import numpy as np
from loguru import logger
from util.Cv2Utils import write_or_video, write_ai_video, push_video_stream, close_all_p, video_conjuncing
from util.ImageUtils import url2Array, add_water_pic
from util.PlotsUtils import draw_painting_joint
from util.QueUtil import put_queue


class OnPushStreamThread2(Thread):
__slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')

def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._context = args
# 自带参数
self.ex = None
self._logo = None
if self._context["video"]["video_add_water"]:
self._logo = self._msg.get("logo_url")
if self._logo:
self._logo = url2Array(self._logo, enable_ex=False)
if not self._logo:
self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)

def run(self):
request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
orFilePath, aiFilePath, logo = self._context.get("orFilePath"), self._context.get("aiFilePath"), self._logo
or_video_file, ai_video_file, push_p = None, None, None
push_url = self._msg.get("push_url")
try:
logger.info("开始启动推流线程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
with ThreadPoolExecutor(max_workers=5) as tt:
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
while True:
push_r = push_queue.get()
if push_r is not None:
# [(1, 原视频帧, 分析视频帧)]
# [(code, retResults[2])]
# [(2, 操作指令)]
if push_r[0] == 1: # 视频帧操作
frame_list, frame_index_list, all_frames = push_r[1]
allowedList, rainbows, label_arrays, font_config = push_r[2]
for i, frame in enumerate(frame_list):
copy_frame = frame.copy()
det_xywh = {}
# 每帧可能存在多模型,多模型问题处理
thread_p = []
for det in push_r[3]:
code, retResults = det
det_xywh[code] = {}
# 如果识别到了检测目标
if len(retResults[i]) > 0:
for qs in retResults[i]:
detect_targets_code = int(qs[6])
if detect_targets_code not in allowedList:
logger.warning("当前检测目标不在检测目标中: {}, requestId: {}", detect_targets_code, request_id)
continue
score = qs[5]
label_array = label_arrays[detect_targets_code]
color = rainbows[detect_targets_code]
if not isinstance(qs[1], (list, tuple, np.ndarray)):
xc, yc, x2, y2 = int(qs[1]), int(qs[2]), int(qs[3]), int(qs[4])
box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
else:
box = qs[1]
# box, img, label_array, score=0.5, color=None, config=None
dp = tt.submit(draw_painting_joint, box, copy_frame, label_array, score,
color, font_config)
thread_p.append(dp)
cd = det_xywh[code].get(detect_targets_code)
if cd is None:
det_xywh[code][detect_targets_code] = [
[detect_targets_code, box, score, label_array, color]]
else:
det_xywh[code][detect_targets_code].append(
[detect_targets_code, box, score, label_array, color])
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
completed_results = wait(thread_p, timeout=60, return_when=ALL_COMPLETED)
completed_futures = completed_results.done
for r in completed_futures:
if r.exception():
raise r.exception()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
ai_write_status, request_id)
if len(det_xywh) > 0:
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
font_config)))
push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
ai_video_file = write_ai_video_result.result()
or_video_file = write_or_video_result.result()
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
close_all_p(push_p, or_video_file, ai_video_file, request_id)
or_video_file, ai_video_file, push_p = None, None, None
break
except Exception as e:
logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
self.ex = e
finally:
close_all_p(push_p, or_video_file, ai_video_file, request_id)
logger.info("推流线程停止完成!requestId:{}", request_id)


# class OffPushStreamThread(Thread):
# __slots__ = ('_msg', '_push_queue', '_context', 'ex', '_logo', '_image_queue')
#
# def __init__(self, *args):
# super().__init__()
# # 传参
# self._msg, self._push_queue, self._image_queue, self._context = args
# # 自带参数
# self.ex = None
# self._logo = None
# if self._context["video"]["video_add_water"]:
# self._logo = self._msg.get("logo_url")
# if self._logo:
# self._logo = url2Array(self._logo, enable_ex=False)
# if not self._logo:
# self._logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
#
# def run(self):
# request_id, push_queue, image_queue = self._msg.get("request_id"), self._push_queue, self._image_queue
# aiFilePath, logo = self._context.get("aiFilePath"), self._logo
# ai_video_file, push_p = None, None
# push_url = self._msg.get("push_url")
# try:
# logger.info("开始启动推流线程!requestId:{}", request_id)
# with ThreadPoolExecutor(max_workers=1) as t:
# p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
# while True:
# push_parm = push_queue.get()
# if push_parm is not None:
# # [(1, 原视频帧, 分析视频帧)]
# # # [视频帧、当前帧数、 总帧数、 [(问题数组、code、allowedList、label_arraylist、rainbows)]]
# # res = (1, (pull_frame[1], pull_frame[2], pull_frame[3], []))
# # [(2, 操作指令)]
# if push_parm[0] == 1: # 视频帧操作
# frame, current_frame, all_frames, ques_list = push_parm[1]
# copy_frame = frame.copy()
# det_xywh = {}
# if len(ques_list) > 0:
# for qs in ques_list:
# det_xywh[qs[1]] = {}
# detect_targets_code = int(qs[0][0])
# score = qs[0][-1]
# label_array = qs[3][detect_targets_code]
# color = qs[4][detect_targets_code]
# if not isinstance(qs[0][1], (list, tuple, np.ndarray)):
# xc, yc, x2, y2 = int(qs[0][1]), int(qs[0][2]), int(qs[0][3]), int(qs[0][4])
# box = [(xc, yc), (x2, yc), (x2, y2), (xc, y2)]
# else:
# box = qs[0][1]
# draw_painting_joint(box, copy_frame, label_array, score, color, "leftTop")
# cd = det_xywh[qs[1]].get(detect_targets_code)
# if cd is None:
# det_xywh[qs[1]][detect_targets_code] = [
# [detect_targets_code, box, score, label_array, color]]
# else:
# det_xywh[qs[1]][detect_targets_code].append(
# [detect_targets_code, box, score, label_array, color])
# if logo:
# frame = add_water_pic(frame, logo, request_id)
# copy_frame = add_water_pic(copy_frame, logo, request_id)
# frame_merge = video_conjuncing(frame, copy_frame)
# # 写识别视频到本地
# write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath, ai_video_file,
# ai_write_status, request_id)
# if len(det_xywh) > 0:
# put_queue(image_queue, (1, (det_xywh, frame, current_frame, all_frames)))
# push_p = push_video_stream(frame_merge, push_p, push_url, p_push_status, request_id)
# ai_video_file = write_ai_video_result.result()
# if push_parm[0] == 2:
# if 'stop' == push_parm[1]:
# logger.info("停止推流线程, requestId: {}", request_id)
# close_all_p(push_p, None, ai_video_file, request_id)
# ai_video_file, push_p = None, None
# break
# except Exception as e:
# logger.error("推流线程异常:{}, requestId:{}", format_exc(), request_id)
# self.ex = e
# finally:
# close_all_p(push_p, None, ai_video_file, request_id)
# logger.info("推流线程停止完成!requestId:{}", request_id)

+ 401
- 0
concurrency/PushVideoStreamProcess.py Vedi File

@@ -0,0 +1,401 @@
# -*- coding: utf-8 -*-

from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from os import getpid

from os.path import join
from time import time, sleep
from traceback import format_exc

import cv2
import psutil

from loguru import logger

from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util import ImageUtils
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p
from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log

from util.PlotsUtils import draw_painting_joint, xywh2xyxy2

from util.QueUtil import get_no_block_queue, put_queue, clear_queue


class PushStreamProcess(Process):
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context")

def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args

def build_logo_url(self):
logo = None
if self._context["video"]["video_add_water"]:
logo = self._msg.get("logo_url")
if logo:
logo = url2Array(logo, enable_ex=False)
if logo is None:
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo

@staticmethod
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list):
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
draw_painting_joint(box, copy_frame, label_array, score, color, font_config)
if det_xywh.get(code) is None:
det_xywh[code], code_list[code] = {}, {}
cd = det_xywh[code].get(cls)
if cd is None:
code_list[code][cls] = 1
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
code_list[code][cls] += 1
det_xywh[code][cls].append([cls, box, score, label_array, color])


class OnPushStreamProcess(PushStreamProcess):
__slots__ = ()

def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
base_dir, env, orFilePath, aiFilePath, logo, service_timeout, frame_score = context["base_dir"], \
context['env'], context["orFilePath"], context["aiFilePath"], context["logo"], \
int(context["service"]["timeout"]) + 120, context["service"]["filter"]["frame_score"]
request_id, push_url = msg["request_id"], msg["push_url"]
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
or_video_file, ai_video_file, push_p, ex = None, None, None, None
ex_status = True
high_score_image = {}
# 相似度, 默认值0.65
similarity = context["service"]["filter"]["similarity"]
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
frame_step = int(context["service"]["filter"]["frame_step"])
try:
init_log(base_dir, env)
logger.info("开始实时启动推流进程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间, 1.防止任务运行超时 2.主进程挂了,子进程运行超时
if time() - start_time > service_timeout:
logger.error("推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
ex_status = False
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
for i, frame in enumerate(frame_list):
# 复制帧用来画图
copy_frame = frame.copy()
det_xywh, code_list, thread_p = {}, {}, []
for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file, ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
ex_status = False
logger.info("停止推流进程, requestId: {}", request_id)
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 原视频写对象, 分析视频写对象
close_all_p(push_p, or_video_file, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成!图片队列大小: {}, requestId:{}", image_queue.qsize(), request_id)


class OffPushStreamProcess(PushStreamProcess):
__slots__ = ()

def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
aiFilePath, logo = context["aiFilePath"], context["logo"]
ai_video_file, push_p, push_url = None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
high_score_image = {}
# 相似度, 默认值0.65
similarity = context["service"]["filter"]["similarity"]
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
frame_step = int(context["service"]["filter"]["frame_step"])
try:
init_log(base_dir, env)
logger.info("开始启动离线推流进程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, ai_write_status = [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("离线推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
ex_status = False
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames:
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
put_queue(hb_queue, {"hb_value": task_process}, timeout=2)
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh = {}
code_list = {}
# 每帧可能存在多模型,多模型问题处理
thread_p = []
for det in push_objs[i]:
rr = t.submit(self.handle_image, det_xywh, det, frame_score, copy_frame,
draw_config, code_list)
thread_p.append(rr)
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file,
ai_write_status, request_id)
push_stream_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status, request_id)
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])), timeout=2)
push_p = push_stream_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 分析视频写对象
close_all_p(push_p, None, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成!requestId:{}", request_id)

+ 412
- 0
concurrency/PushVideoStreamProcess2.py Vedi File

@@ -0,0 +1,412 @@
# -*- coding: utf-8 -*-

from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from os import getpid

from os.path import join
from time import time, sleep
from traceback import format_exc

import cv2
import psutil

from loguru import logger

from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util import ImageUtils
from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push_video_stream, close_all_p
from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log

from util.PlotsUtils import draw_painting_joint, xywh2xyxy2

from util.QueUtil import get_no_block_queue, put_queue, clear_queue


class PushStreamProcess2(Process):
__slots__ = ("_msg", "_push_queue", "_image_queue", '_push_ex_queue', '_hb_queue', "_context")

def __init__(self, *args):
super().__init__()
# 传参
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args

def build_logo_url(self):
logo = None
if self._context["video"]["video_add_water"]:
logo = self._msg.get("logo_url")
if logo:
logo = url2Array(logo, enable_ex=False)
if logo is None:
logo = cv2.imread(join(self._context['base_dir'], "image/logo.png"), -1)
self._context["logo"] = logo

@staticmethod
def handle_image(det_xywh, det, frame_score, copy_frame, draw_config, code_list):
code, det_result = det
# 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0:
font_config, allowedList = draw_config["font_config"], draw_config[code]["allowedList"]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
for qs in det_result:
box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls]
# box, img, label_array, score=0.5, color=None, config=None
draw_painting_joint(box, copy_frame, label_array, score, color, font_config)
if det_xywh.get(code) is None:
det_xywh[code], code_list[code] = {}, {}
cd = det_xywh[code].get(cls)
if cd is None:
code_list[code][cls] = 1
det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else:
code_list[code][cls] += 1
det_xywh[code][cls].append([cls, box, score, label_array, color])


class OnPushStreamProcess2(PushStreamProcess2):
__slots__ = ()

def run(self):
msg, context = self._msg, self._context
self.build_logo_url()
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
orFilePath, aiFilePath, logo = context["orFilePath"], context["aiFilePath"], context["logo"]
or_video_file, ai_video_file, push_p, push_url = None, None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
high_score_image = {}
# 相似度, 默认值0.65
similarity = context["service"]["filter"]["similarity"]
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
frame_step = int(context["service"]["filter"]["frame_step"])
try:
init_log(base_dir, env)
logger.info("开始启动推流进程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=3) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, or_write_status, ai_write_status = [0, 0], [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
ex_status = False
logger.info("推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
# 如果是多模型push_objs数组可能包含[模型1识别数组, 模型2识别数组, 模型3识别数组]
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
# [模型1识别数组, 模型2识别数组, 模型3识别数组]
for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame,
draw_config, code_list)
thread_p.append(rr)
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写原视频到本地
write_or_video_result = t.submit(write_or_video, frame, orFilePath, or_video_file,
or_write_status, request_id)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file, ai_write_status, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
or_video_file = write_or_video_result.result(timeout=5)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 原视频写对象, 分析视频写对象
close_all_p(push_p, or_video_file, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成!requestId:{}", request_id)


class OffPushStreamProcess2(PushStreamProcess2):
__slots__ = ()

def run(self):
self.build_logo_url()
msg, context = self._msg, self._context
request_id = msg["request_id"]
base_dir, env = context["base_dir"], context['env']
push_queue, image_queue, push_ex_queue, hb_queue = self._push_queue, self._image_queue, self._push_ex_queue, \
self._hb_queue
aiFilePath, logo = context["aiFilePath"], context["logo"]
ai_video_file, push_p, push_url = None, None, msg["push_url"]
service_timeout = int(context["service"]["timeout"]) + 120
frame_score = context["service"]["filter"]["frame_score"]
ex = None
ex_status = True
high_score_image = {}
# 相似度, 默认值0.65
similarity = context["service"]["filter"]["similarity"]
# 图片相似度开关
picture_similarity = bool(context["service"]["filter"]["picture_similarity"])
frame_step = int(context["service"]["filter"]["frame_step"])
try:
init_log(base_dir, env)
logger.info("开始启动离线推流进程!requestId:{}", request_id)
with ThreadPoolExecutor(max_workers=2) as t:
# 定义三种推流、写原视频流、写ai视频流策略
# 第一个参数时间, 第二个参数重试次数
p_push_status, ai_write_status = [0, 0], [0, 0]
start_time = time()
while True:
# 检测推流执行超时时间
if time() - start_time > service_timeout:
logger.error("离线推流超时, requestId: {}", request_id)
raise ServiceException(ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PUSH_STREAM_TIMEOUT_EXCEPTION.value[1])
# 系统由于各种问题可能会杀死内存使用多的进程, 自己杀掉自己
if psutil.Process(getpid()).ppid() == 1:
ex_status = False
logger.info("离线推流进程检测到父进程异常停止, 自动停止推流进程, requestId: {}", request_id)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
break
# 获取推流的视频帧
push_r = get_no_block_queue(push_queue)
if push_r is not None:
# [(1, ...] 视频帧操作
# [(2, 操作指令)] 指令操作
if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片
for i, frame in enumerate(frame_list):
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames:
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
put_queue(hb_queue, {"hb_value": task_process}, timeout=2)
# 复制帧用来画图
copy_frame = frame.copy()
# 所有问题记录字典
det_xywh, code_list, thread_p = {}, {}, []
for s_det_list in push_objs:
code, det_list = s_det_list
rr = t.submit(self.handle_image, det_xywh, (code, det_list[i]), frame_score,
copy_frame, draw_config, code_list)
thread_p.append(rr)
if logo:
frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id)
if len(thread_p) > 0:
for r in thread_p:
r.result()
frame_merge = video_conjuncing(frame, copy_frame)
# 写识别视频到本地
write_ai_video_result = t.submit(write_ai_video, frame_merge, aiFilePath,
ai_video_file,
ai_write_status, request_id)
push_p_result = t.submit(push_video_stream, frame_merge, push_p, push_url,
p_push_status,
request_id)
if len(det_xywh) > 0:
flag = True
if len(high_score_image) > 0:
# 检查当前帧和上一帧的间距是多少, 如果小于指定间距, 不处理
diff_frame_num = frame_index_list[i] - high_score_image["current_frame"]
if diff_frame_num < frame_step:
flag = False
det_codes = set(det_xywh.keys())
cache_codes = set(high_score_image["code"].keys())
# 如果是一样的模型
if det_codes == cache_codes:
for code in cache_codes:
det_clss = set(det_xywh[code].keys())
cache_clss = set(high_score_image["code"][code].keys())
# 如果检测目标的数量大于缓存的检测目标数量
if det_clss > cache_clss:
flag = True
break
elif det_clss.isdisjoint(cache_clss):
flag = True
break
# 如果检测目标的数量相等,判断检测目标识别的数量谁比较多
elif det_clss == cache_clss:
for cls in cache_clss:
# 如果检测目标的识别的数量大于缓存中的数量
if len(det_xywh[code][cls]) > \
high_score_image["code"][code][cls]:
flag = True
break
if flag:
break
# 如果现在的检测结果模型的结果多余上一次问题的模型数量,判断为不同问题, 需要上传图片
elif det_codes > cache_codes:
flag = True
# 如果检测的模型不一样
elif det_codes.isdisjoint(cache_codes):
flag = True
else:
high_score_image = {}
# 检查图片和上一张问题图片相似度是多少, 相似度高不处理
if picture_similarity and len(high_score_image) > 0:
hash1 = ImageUtils.dHash(high_score_image["or_frame"])
hash2 = ImageUtils.dHash(frame)
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity_1 = 1 - dist * 1.0 / 64
if similarity_1 >= similarity:
flag = False
if flag:
high_score_image["or_frame"] = frame
high_score_image["current_frame"] = frame_index_list[i]
high_score_image["code"] = code_list
put_queue(image_queue, (1, (det_xywh, frame, frame_index_list[i], all_frames,
draw_config["font_config"])))
push_p = push_p_result.result(timeout=5)
ai_video_file = write_ai_video_result.result(timeout=5)
# 接收停止指令
if push_r[0] == 2:
if 'stop' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
break
if 'stop_ex' == push_r[1]:
logger.info("停止推流线程, requestId: {}", request_id)
ex_status = False
break
del push_r
else:
sleep(1)
except ServiceException as s:
logger.error("推流进程异常:{}, requestId:{}", s.msg, request_id)
ex = s.code, s.msg
except Exception:
logger.error("推流进程异常:{}, requestId:{}", format_exc(), request_id)
ex = ExceptionType.SERVICE_INNER_EXCEPTION.value[0], ExceptionType.SERVICE_INNER_EXCEPTION.value[1]
finally:
# 关闭推流管, 分析视频写对象
close_all_p(push_p, None, ai_video_file, request_id)
if ex:
code, msg = ex
put_queue(push_ex_queue, (1, code, msg), timeout=2)
else:
if ex_status:
# 关闭推流的时候, 等待1分钟图片队列处理完,如果1分钟内没有处理完, 清空图片队列, 丢弃没有上传的图片
c_time = time()
while time() - c_time < 60:
if image_queue.qsize() == 0 or image_queue.empty():
break
sleep(2)
for q in [push_queue, image_queue, push_ex_queue, hb_queue]:
clear_queue(q)
logger.info("推流进程停止完成!requestId:{}", request_id)

+ 50
- 0
concurrency/RecordingHeartbeatThread.py Vedi File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
from threading import Thread
import time
from traceback import format_exc

from loguru import logger

from entity.FeedBack import recording_feedback
from enums.RecordingStatusEnum import RecordingStatus
from util.QueUtil import get_no_block_queue, put_queue, clear_queue


class RecordingHeartbeat(Thread):
__slots__ = ('_fb_queue', '_hb_queue', '_request_id')

def __init__(self, fb_queue, hb_queue, request_id):
super().__init__()
self._fb_queue = fb_queue
self._hb_queue = hb_queue
self._request_id = request_id

def run(self):
request_id = self._request_id
hb_queue, fb_queue = self._hb_queue, self._fb_queue
logger.info("开始启动录屏心跳线程!requestId:{}", request_id)
hb_init_num, progress = 0, '0.0000'
status = RecordingStatus.RECORDING_WAITING.value[0]
try:
while True:
time.sleep(3)
hb_msg = get_no_block_queue(hb_queue)
if hb_msg is not None and len(hb_msg) > 0:
command_que = hb_msg.get("command")
progress_que = hb_msg.get("progress")
status_que = hb_msg.get("status")
if progress_que is not None:
progress = progress_que
if status_que is not None:
status = status_que
if 'stop' == command_que:
logger.info("开始终止心跳线程, requestId:{}", request_id)
break
if hb_init_num % 30 == 0:
put_queue(fb_queue, recording_feedback(request_id, status, progress=progress), timeout=5, is_ex=True)
hb_init_num += 3
except Exception:
logger.error("心跳线程异常:{}, requestId:{}", format_exc(), request_id)
finally:
clear_queue(hb_queue)
logger.info("心跳线程停止完成!requestId:{}", request_id)

+ 0
- 0
concurrency/__init__.py Vedi File


BIN
concurrency/__pycache__/CommonThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/FeedbackThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/FileUploadThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/HeartbeatThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/IntelligentRecognitionProcess.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/PullStreamThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/PullVideoStreamProcess.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/PushStreamThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/PushStreamThread2.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/RecordingHeartbeatThread.cpython-38.pyc Vedi File


BIN
concurrency/__pycache__/__init__.cpython-38.pyc Vedi File


+ 10
- 0
config/aliyun/dsp_dev_aliyun.yml Vedi File

@@ -0,0 +1,10 @@
access_key: "LTAI5tSJ62TLMUb4SZuf285A"
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468341

+ 10
- 0
config/aliyun/dsp_prod_aliyun.yml Vedi File

@@ -0,0 +1,10 @@
access_key: "LTAI5tSJ62TLMUb4SZuf285A"
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468340

+ 11
- 0
config/aliyun/dsp_test_aliyun.yml Vedi File

@@ -0,0 +1,11 @@
access_key: "LTAI5tSJ62TLMUb4SZuf285A"
access_secret: "MWYynm30filZ7x0HqSHlU3pdLVNeI7"
oss:
endpoint: "http://oss-cn-shanghai.aliyuncs.com"
bucket: "ta-tech-image"
connect_timeout: 30
vod:
host_address: "https://vod.play.t-aaron.com/"
ecsRegionId: "cn-shanghai"
cateId: 1000468338


+ 12
- 0
config/baidu/dsp_dev_baidu.yml Vedi File

@@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

+ 12
- 0
config/baidu/dsp_prod_baidu.yml Vedi File

@@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

+ 12
- 0
config/baidu/dsp_test_baidu.yml Vedi File

@@ -0,0 +1,12 @@
orc:
APP_ID: 28173504
API_KEY: "kqrFE7VuygIaFer7z6cRxzoi"
SECRET_KEY: "yp7xBokyl4TItyGhay7skAN1cMwfvEXf"
vehicle:
APP_ID: 31096670
API_KEY: "Dam3O4tgPRN3qh4OYE82dbg7"
SECRET_KEY: "1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa"
person:
APP_ID: 31096755
API_KEY: "CiWrt4iyxOly36n3kR7utiAG"
SECRET_KEY: "K7y6V3XTGdyXvgtCNCwTGUEooxxDuX9v"

+ 25
- 0
config/kafka/dsp_dev_kafka.yml Vedi File

@@ -0,0 +1,25 @@
bootstrap_servers: ["192.168.11.13:9092"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-dev"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1




+ 22
- 0
config/kafka/dsp_prod_kafka.yml Vedi File

@@ -0,0 +1,22 @@
bootstrap_servers: ["101.132.127.1:19094"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-prod"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1

+ 24
- 0
config/kafka/dsp_test_kafka.yml Vedi File

@@ -0,0 +1,24 @@
bootstrap_servers: ["106.14.96.218:19092"]
topic:
dsp-alg-online-tasks-topic: "dsp-alg-online-tasks"
dsp-alg-offline-tasks-topic: "dsp-alg-offline-tasks"
dsp-alg-image-tasks-topic: "dsp-alg-image-tasks"
dsp-alg-results-topic: "dsp-alg-task-results"
dsp-recording-task-topic: "dsp-recording-task"
dsp-recording-result-topic: "dsp-recording-result"
dsp-push-stream-task-topic: "dsp-push-stream-task"
dsp-push-stream-result-topic: "dsp-push-stream-result"
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: "dsp_ai_server"
group_id: "dsp-ai-test"
auto_offset_reset: "latest"
enable_auto_commit: false
max_poll_records: 1



+ 10
- 0
config/logger/dsp_dev_logger.yml Vedi File

@@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: true
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "1 days"
encoding: "utf8"


+ 10
- 0
config/logger/dsp_prod_logger.yml Vedi File

@@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: false
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "7 days"
encoding: "utf8"


+ 10
- 0
config/logger/dsp_test_logger.yml Vedi File

@@ -0,0 +1,10 @@
enable_file_log: true
enable_stderr: false
base_path: "../dsp/logs"
log_name: "dsp.log"
log_fmt: "{time:YYYY-MM-DD HH:mm:ss.SSS} [{level}][{process.name}-{process.id}-{thread.name}-{thread.id}][{line}] {module}-{function} - {message}"
level: "INFO"
rotation: "00:00"
retention: "3 days"
encoding: "utf8"


+ 30
- 0
config/service/dsp_dev_service.yml Vedi File

@@ -0,0 +1,30 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:
filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 1
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20



+ 30
- 0
config/service/dsp_prod_service.yml Vedi File

@@ -0,0 +1,30 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:

filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 2
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20


+ 30
- 0
config/service/dsp_test_service.yml Vedi File

@@ -0,0 +1,30 @@
video:
# 视频本地保存地址
file_path: "../dsp/video/"
# 是否添加水印
video_add_water: false
service:

filter:
# 图片得分多少分以上返回图片
frame_score: 0.4
# 图片相似度过滤
picture_similarity: true
similarity: 1
frame_step: 160
timeout: 21600
cv2_pull_stream_timeout: 1000
cv2_read_stream_timeout: 1000
recording_pull_stream_timeout: 600
model:
# 使用哪种识别方式
# 1 普通方式
# 2 模型追踪
model_type: 2
limit: 3
task:
# 任务限制5个
limit: 5
image:
limit: 20


+ 27
- 0
dsp_master.py Vedi File

@@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
from os.path import dirname, realpath
from sys import argv

from loguru import logger
from torch import multiprocessing

from service.Dispatcher import DispatcherService
from util.LogUtils import init_log


'''
dsp主程序入口
'''
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
base_dir = dirname(realpath(__file__))
arg = argv
print("脚本启动参数: ", arg)
envs = ('dev', 'test', 'prod')
env = envs[0]
active = [env for env in envs if env in arg]
if len(active) != 0:
env = active[0]
init_log(base_dir, env)
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】开始启动 ლ(´ڡ`ლ)゙")
DispatcherService(base_dir, env)

+ 53
- 0
entity/FeedBack.py Vedi File

@@ -0,0 +1,53 @@
from json import dumps

from util.TimeUtils import now_date_to_str


def message_feedback(requestId, status, analyse_type, error_code="", error_msg="", progress="", original_url="",
sign_url="", modelCode="", detectTargetCode="", analyse_results="", video_url="", ai_video_url=""):
if len(analyse_results) > 0:
analyse_results = dumps(analyse_results)
taskbar = {
"request_id": requestId,
"status": status,
"type": analyse_type,
"video_url": video_url,
"ai_video_url": ai_video_url,
"error_code": error_code,
"error_msg": error_msg,
"progress": progress,
"results": [
{
"original_url": original_url,
"sign_url": sign_url,
"analyse_results": analyse_results,
"model_code": modelCode,
"detect_targets_code": detectTargetCode,
"analyse_time": now_date_to_str()
}
]
}
return {"feedback": taskbar}


def recording_feedback(requestId, status, error_code="", error_msg="", progress="", video_url=""):
rdf = {
"request_id": requestId,
"status": status,
"error_code": error_code,
"error_msg": error_msg,
"progress": progress,
"video_url": video_url
}
return {"recording": rdf}


def pull_stream_feedback(requestId, status, error_code="", error_msg="", videoInfo=[]):
return {"pull_stream": {
"request_id": requestId,
"video_info_list": videoInfo,
"push_stream_status": status,
"error_code": error_code,
"error_msg": error_msg,
"current_time": now_date_to_str()
}}

+ 14
- 0
entity/PullStreamDto.py Vedi File

@@ -0,0 +1,14 @@

class PullStreamDto:

__slots__ = ('msg', 'context', 'pullQueue', 'fbQueue', 'hbQueue', 'imageQueue', 'analyse_type')

def __init__(self, msg, context, pullQueue, fbQueue, hbQueue, imageQueue, analyse_type):
self.msg = msg
self.context = context
self.pullQueue = pullQueue
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.imageQueue = imageQueue
self.analyse_type = analyse_type


+ 0
- 0
entity/__init__.py Vedi File


BIN
entity/__pycache__/FeedBack.cpython-38.pyc Vedi File


BIN
entity/__pycache__/__init__.cpython-38.pyc Vedi File


+ 21
- 0
enums/AnalysisStatusEnum.py Vedi File

@@ -0,0 +1,21 @@
from enum import Enum, unique


# 分析状态枚举
@unique
class AnalysisStatus(Enum):
# 等待
WAITING = "waiting"

# 分析中
RUNNING = "running"

# 分析完成
SUCCESS = "success"

# 超时
TIMEOUT = "timeout"

# 失败
FAILED = "failed"

+ 25
- 0
enums/AnalysisTypeEnum.py Vedi File

@@ -0,0 +1,25 @@
from enum import Enum, unique


# 分析类型枚举
@unique
class AnalysisType(Enum):
# 在线
ONLINE = "1"

# 离线
OFFLINE = "2"

# 图片
IMAGE = "3"

# 录屏
RECORDING = "9999"

# 转推流
PULLTOPUSH = "10000"






+ 188
- 0
enums/BaiduSdkEnum.py Vedi File

@@ -0,0 +1,188 @@
from enum import Enum, unique

'''
ocr官方文档: https://ai.baidu.com/ai-doc/OCR/zkibizyhz
官方文档: https://ai.baidu.com/ai-doc/VEHICLE/rk3inf9tj
参数1: 异常编号
参数2: 异常英文描述
参数3: 异常中文描述
参数4: 0-异常信息统一输出为内部异常
1-异常信息可以输出
2-输出空的异常信息
参数5: 指定异常重试的次数
'''


# 异常枚举
@unique
class BaiduSdkErrorEnum(Enum):

UNKNOWN_ERROR = (1, "Unknown error", "未知错误", 0, 0)

SERVICE_TEMPORARILY_UNAVAILABLE = (2, "Service temporarily unavailable", "服务暂不可用,请再次请求", 0, 3)

UNSUPPORTED_OPENAPI_METHOD = (3, "Unsupported openapi method", "调用的API不存在", 0, 0)

API_REQUEST_LIMIT_REACHED = (4, "Open api request limit reached", "请求量限制, 请稍后再试!", 1, 5)

NO_PERMISSION_TO_ACCESS_DATA = (6, "No permission to access data", "无权限访问该用户数据", 1, 0)

GET_SERVICE_TOKEN_FAILED = (13, "Get service token failed", "获取token失败", 0, 2)

IAM_CERTIFICATION_FAILED = (14, "IAM Certification failed", "IAM 鉴权失败", 0, 1)

APP_NOT_EXSITS_OR_CREATE_FAILED = (15, "app not exsits or create failed", "应用不存在或者创建失败", 0, 0)

API_DAILY_REQUEST_LIMIT_REACHED = (17, "Open api daily request limit reached", "每天请求量超限额!", 1, 2)

API_QPS_REQUEST_LIMIT_REACHED = (18, "Open api qps request limit reached", "QPS超限额!", 1, 10)

API_TOTAL_REQUEST_LIMIT_REACHED = (19, "Open api total request limit reached", "请求总量超限额!", 1, 2)

INVALID_TOKEN = (100, "Invalid parameter", "无效的access_token参数,token拉取失败", 0, 1)

ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID = (110, "Access token invalid or no longer valid", "access_token无效,token有效期为30天", 0, 1)

ACCESS_TOKEN_EXPIRED = (111, "Access token expired", "access token过期,token有效期为30天", 0, 1)

INTERNAL_ERROR = (282000, "internal error", "服务器内部错误", 0, 1)

INVALID_PARAM = (216100, "invalid param", "请求中包含非法参数!", 0, 1)

NOT_ENOUGH_PARAM = (216101, "not enough param", "缺少必须的参数!", 0, 0)

SERVICE_NOT_SUPPORT = (216102, "service not support", "请求了不支持的服务,请检查调用的url", 0, 0)

PARAM_TOO_LONG = (216103, "param too long", "请求中某些参数过长!", 1, 0)

APPID_NOT_EXIST = (216110, "appid not exist", "appid不存在", 0, 0)

EMPTY_IMAGE = (216200, "empty image", "图片为空!", 1, 0)

IMAGE_FORMAT_ERROR = (216201, "image format error", "上传的图片格式错误,现阶段我们支持的图片格式为:PNG、JPG、JPEG、BMP", 1, 0)

IMAGE_SIZE_ERROR = (216202, "image size error", "上传的图片大小错误,分辨率不高于4096*4096", 1, 0)

IMAGE_SIZE_BASE_ERROR = (216203, "image size error", "上传的图片编码有误", 1, 0)

RECOGNIZE_ERROR = (216630, "recognize error", "识别错误", 2, 2)

DETECT_ERROR = (216634, "detect error", "检测错误", 2, 2)

MISSING_PARAMETERS = (282003, "missing parameters: {参数名}", "请求参数缺失", 0, 0)

BATCH_ROCESSING_ERROR = (282005, "batch processing error", "处理批量任务时发生部分或全部错误", 0, 5)

BATCH_TASK_LIMIT_REACHED = (282006, "batch task limit reached", "批量任务处理数量超出限制,请将任务数量减少到10或10以下", 1, 5)

IMAGE_TRANSCODE_ERROR = (282100, "image transcode error", "图片压缩转码错误", 0, 1)

IMAGE_SPLIT_LIMIT_REACHED = (282101, "image split limit reached", "长图片切分数量超限!", 1, 1)

TARGET_DETECT_ERROR = (282102, "target detect error", "未检测到图片中识别目标!", 2, 1)

TARGET_RECOGNIZE_ERROR = (282103, "target recognize error", "图片目标识别错误!", 2, 1)

URLS_NOT_EXIT = (282110, "urls not exit", "URL参数不存在,请核对URL后再次提交!", 1, 0)

URL_FORMAT_ILLEGAL = (282111, "url format illegal", "URL格式非法!", 1, 0)

URL_DOWNLOAD_TIMEOUT = (282112, "url download timeout", "URL格式非法!", 1, 0)

URL_RESPONSE_INVALID = (282113, "url response invalid", "URL返回无效参数!", 1, 0)

URL_SIZE_ERROR = (282114, "url size error", "URL长度超过1024字节或为0!", 1, 0)

REQUEST_ID_NOT_EXIST = (282808, "request id: xxxxx not exist", "request id xxxxx 不存在", 0, 0)

RESULT_TYPE_ERROR = (282809, "result type error", "返回结果请求错误(不属于excel或json)", 0, 0)

IMAGE_RECOGNIZE_ERROR = (282810, "image recognize error", "图像识别错误", 2, 1)

INVALID_ARGUMENT = (283300, "Invalid argument", "入参格式有误,可检查下图片编码、代码格式是否有误", 1, 0)

INTERNAL_ERROR_2 = (336000, "Internal error", "服务器内部错误", 0, 0)

INVALID_ARGUMENT_2 = (336001, "Invalid Argument", "入参格式有误,比如缺少必要参数、图片编码错误等等,可检查下图片编码、代码格式是否有误", 0, 0)

SDK_IMAGE_SIZE_ERROR = ('SDK100', "image size error", "图片大小超限,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内,图片请求格式支持:PNG、JPG、BMP", 1, 0)

SDK_IMAGE_LENGTH_ERROR = ('SDK101', "image length error", "图片边长不符合要求,最短边至少50px,最长边最大4096px ,建议长宽比3:1以内", 1, 0)

SDK_READ_IMAGE_FILE_ERROR = ('SDK102', "read image file error", "读取图片文件错误", 0, 1)

SDK_CONNECTION_OR_READ_DATA_TIME_OUT = ('SDK108', "connection or read data time out", "连接超时或读取数据超时,请检查本地网络设置、文件读取设置", 0, 3)

SDK_UNSUPPORTED_IMAGE_FORMAT = ('SDK109', "unsupported image format", "不支持的图片格式,当前支持以下几类图片:PNG、JPG、BMP", 1, 0)


BAIDUERRORDATA = {
BaiduSdkErrorEnum.UNKNOWN_ERROR.value[0]: BaiduSdkErrorEnum.UNKNOWN_ERROR,
BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE.value[0]: BaiduSdkErrorEnum.SERVICE_TEMPORARILY_UNAVAILABLE,
BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD.value[0]: BaiduSdkErrorEnum.UNSUPPORTED_OPENAPI_METHOD,
BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA.value[0]: BaiduSdkErrorEnum.NO_PERMISSION_TO_ACCESS_DATA,
BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED.value[0]: BaiduSdkErrorEnum.GET_SERVICE_TOKEN_FAILED,
BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED.value[0]: BaiduSdkErrorEnum.IAM_CERTIFICATION_FAILED,
BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED.value[0]: BaiduSdkErrorEnum.APP_NOT_EXSITS_OR_CREATE_FAILED,
BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_DAILY_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_QPS_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.API_TOTAL_REQUEST_LIMIT_REACHED,
BaiduSdkErrorEnum.INVALID_TOKEN.value[0]: BaiduSdkErrorEnum.INVALID_TOKEN,
BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_INVALID_OR_NO_LONGER_VALID,
BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED.value[0]: BaiduSdkErrorEnum.ACCESS_TOKEN_EXPIRED,
BaiduSdkErrorEnum.INTERNAL_ERROR.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR,
BaiduSdkErrorEnum.INVALID_PARAM.value[0]: BaiduSdkErrorEnum.INVALID_PARAM,
BaiduSdkErrorEnum.NOT_ENOUGH_PARAM.value[0]: BaiduSdkErrorEnum.NOT_ENOUGH_PARAM,
BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT.value[0]: BaiduSdkErrorEnum.SERVICE_NOT_SUPPORT,
BaiduSdkErrorEnum.PARAM_TOO_LONG.value[0]: BaiduSdkErrorEnum.PARAM_TOO_LONG,
BaiduSdkErrorEnum.APPID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.APPID_NOT_EXIST,
BaiduSdkErrorEnum.EMPTY_IMAGE.value[0]: BaiduSdkErrorEnum.EMPTY_IMAGE,
BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_FORMAT_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_SIZE_BASE_ERROR,
BaiduSdkErrorEnum.RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.RECOGNIZE_ERROR,
BaiduSdkErrorEnum.DETECT_ERROR.value[0]: BaiduSdkErrorEnum.DETECT_ERROR,
BaiduSdkErrorEnum.MISSING_PARAMETERS.value[0]: BaiduSdkErrorEnum.MISSING_PARAMETERS,
BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR.value[0]: BaiduSdkErrorEnum.BATCH_ROCESSING_ERROR,
BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.BATCH_TASK_LIMIT_REACHED,
BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_TRANSCODE_ERROR,
BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED.value[0]: BaiduSdkErrorEnum.IMAGE_SPLIT_LIMIT_REACHED,
BaiduSdkErrorEnum.TARGET_DETECT_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_DETECT_ERROR,
BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.TARGET_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.URL_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.URL_SIZE_ERROR,
BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST.value[0]: BaiduSdkErrorEnum.REQUEST_ID_NOT_EXIST,
BaiduSdkErrorEnum.RESULT_TYPE_ERROR.value[0]: BaiduSdkErrorEnum.RESULT_TYPE_ERROR,
BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR.value[0]: BaiduSdkErrorEnum.IMAGE_RECOGNIZE_ERROR,
BaiduSdkErrorEnum.INVALID_ARGUMENT.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT,
BaiduSdkErrorEnum.INTERNAL_ERROR_2.value[0]: BaiduSdkErrorEnum.INTERNAL_ERROR_2,
BaiduSdkErrorEnum.INVALID_ARGUMENT_2.value[0]: BaiduSdkErrorEnum.INVALID_ARGUMENT_2,
BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_SIZE_ERROR,
BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR.value[0]: BaiduSdkErrorEnum.SDK_IMAGE_LENGTH_ERROR,
BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR.value[0]: BaiduSdkErrorEnum.SDK_READ_IMAGE_FILE_ERROR,
BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT.value[0]: BaiduSdkErrorEnum.SDK_CONNECTION_OR_READ_DATA_TIME_OUT,
BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT.value[0]: BaiduSdkErrorEnum.SDK_UNSUPPORTED_IMAGE_FORMAT,
BaiduSdkErrorEnum.URLS_NOT_EXIT.value[0]: BaiduSdkErrorEnum.URLS_NOT_EXIT,
BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL.value[0]: BaiduSdkErrorEnum.URL_FORMAT_ILLEGAL,
BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT.value[0]: BaiduSdkErrorEnum.URL_DOWNLOAD_TIMEOUT,
BaiduSdkErrorEnum.URL_RESPONSE_INVALID.value[0]: BaiduSdkErrorEnum.URL_RESPONSE_INVALID
}

@unique
class VehicleEnum(Enum):
CAR = ("car", "小汽车", 0)
TRICYCLE = ("tricycle", "三轮车", 1)
MOTORBIKE = ("motorbike", "摩托车", 2)
CARPLATE = ("carplate", "车牌", 3)
TRUCK = ("truck", "卡车", 4)
BUS = ("bus", "巴士", 5)


VehicleEnumVALUE={
VehicleEnum.CAR.value[0]: VehicleEnum.CAR,
VehicleEnum.TRICYCLE.value[0]: VehicleEnum.TRICYCLE,
VehicleEnum.MOTORBIKE.value[0]: VehicleEnum.MOTORBIKE,
VehicleEnum.CARPLATE.value[0]: VehicleEnum.CARPLATE,
VehicleEnum.TRUCK.value[0]: VehicleEnum.TRUCK,
VehicleEnum.BUS.value[0]: VehicleEnum.BUS
}

+ 86
- 0
enums/ExceptionEnum.py Vedi File

@@ -0,0 +1,86 @@
from enum import Enum, unique


# 异常枚举
@unique
class ExceptionType(Enum):

OR_VIDEO_ADDRESS_EXCEPTION = ("SP000", "未拉取到视频流, 请检查拉流地址是否有视频流!")

ANALYSE_TIMEOUT_EXCEPTION = ("SP001", "AI分析超时!")

PULLSTREAM_TIMEOUT_EXCEPTION = ("SP002", "原视频拉流超时!")

READSTREAM_TIMEOUT_EXCEPTION = ("SP003", "原视频读取视频流超时!")

GET_VIDEO_URL_EXCEPTION = ("SP004", "获取视频播放地址失败!")

GET_VIDEO_URL_TIMEOUT_EXCEPTION = ("SP005", "获取原视频播放地址超时!")

PULL_STREAM_URL_EXCEPTION = ("SP006", "拉流地址不能为空!")

PUSH_STREAM_URL_EXCEPTION = ("SP007", "推流地址不能为空!")

PUSH_STREAM_TIME_EXCEPTION = ("SP008", "未生成本地视频地址!")

AI_MODEL_MATCH_EXCEPTION = ("SP009", "未匹配到对应的AI模型!")

ILLEGAL_PARAMETER_FORMAT = ("SP010", "非法参数格式!")

PUSH_STREAMING_CHANNEL_IS_OCCUPIED = ("SP011", "推流通道可能被占用, 请稍后再试!")

VIDEO_RESOLUTION_EXCEPTION = ("SP012", "不支持该分辨率类型的视频,请切换分辨率再试!")

READ_IAMGE_URL_EXCEPTION = ("SP013", "未能解析图片地址!")

DETECTION_TARGET_TYPES_ARE_NOT_SUPPORTED = ("SP014", "不支持该类型的检测目标!")

WRITE_STREAM_EXCEPTION = ("SP015", "写流异常!")

OR_VIDEO_DO_NOT_EXEIST_EXCEPTION = ("SP016", "原视频不存在!")

MODEL_LOADING_EXCEPTION = ("SP017", "模型加载异常!")

MODEL_ANALYSE_EXCEPTION = ("SP018", "算法模型分析异常!")

AI_MODEL_CONFIG_EXCEPTION = ("SP019", "模型配置不能为空!")

AI_MODEL_GET_CONFIG_EXCEPTION = ("SP020", "获取模型配置异常, 请检查模型配置是否正确!")

MODEL_GROUP_LIMIT_EXCEPTION = ("SP021", "模型组合个数超过限制!")

MODEL_NOT_SUPPORT_VIDEO_EXCEPTION = ("SP022", "%s不支持视频识别!")

MODEL_NOT_SUPPORT_IMAGE_EXCEPTION = ("SP023", "%s不支持图片识别!")

THE_DETECTION_TARGET_CANNOT_BE_EMPTY = ("SP024", "检测目标不能为空!")

URL_ADDRESS_ACCESS_FAILED = ("SP025", "URL地址访问失败, 请检测URL地址是否正确!")

UNIVERSAL_TEXT_RECOGNITION_FAILED = ("SP026", "识别失败!")

COORDINATE_ACQUISITION_FAILED = ("SP027", "飞行坐标识别异常!")

PUSH_STREAM_EXCEPTION = ("SP028", "推流异常!")

MODEL_DUPLICATE_EXCEPTION = ("SP029", "存在重复模型配置!")

DETECTION_TARGET_NOT_SUPPORT = ("SP031", "存在不支持的检测目标!")

TASK_EXCUTE_TIMEOUT = ("SP032", "任务执行超时!")

PUSH_STREAM_URL_IS_NULL = ("SP033", "拉流、推流地址不能为空!")

PULL_STREAM_NUM_LIMIT_EXCEPTION = ("SP034", "转推流数量超过限制!")

NOT_REQUESTID_TASK_EXCEPTION = ("SP993", "未查询到该任务,无法停止任务!")

NO_RESOURCES = ("SP995", "服务器暂无资源可以使用,请稍后30秒后再试!")

NO_CPU_RESOURCES = ("SP996", "暂无CPU资源可以使用,请稍后再试!")

SERVICE_COMMON_EXCEPTION = ("SP997", "公共服务异常!")

NO_GPU_RESOURCES = ("SP998", "暂无GPU资源可以使用,请稍后再试!")

SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常!")

+ 481
- 0
enums/ModelTypeEnum.py Vedi File

@@ -0,0 +1,481 @@
import sys
from enum import Enum, unique

from common.Constant import COLOR

sys.path.extend(['..', '../AIlib2'])
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess
from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess

'''
参数说明
1. 编号
2. 模型编号
3. 模型名称
4. 选用的模型名称
5. 模型配置
6. 模型引用配置[Detweights文件, Segweights文件, 引用计数]
'''


@unique
class ModelType(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: {
'device': device,
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"],
'seg_nclass': 2,
'trtFlag_seg': True,
'trtFlag_det': True,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [5, 6, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName
})

FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"],
'trtFlag_det': True,
'trtFlag_seg': False,
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 2,
'segRegionCnt': 0,
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': str(device),
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 3,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': tracfficAccidentMixFunction,
'pars': {
'modelSize': (640, 360),
'RoadArea': 16000,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
'radius': 50,
'vehicleFlag': False,
'distanceFlag': False
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName
})

EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None)

PLATE_MODEL = ("5", "005", "车牌模型", None, None)

VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["车辆"],
'seg_nclass': 2,
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["行人"],
'seg_nclass': 2,
'segRegionCnt': 0,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName,
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["烟雾", "火焰"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName,
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["钓鱼", "游泳"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["违法种植"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: {
'model_size': (608, 608),
'K': 100,
'conf_thresh': 0.18,
'device': 'cuda:%s' % device,
'down_ratio': 4,
'num_classes': 15,
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName,
'dataset': 'dota',
'half': False,
'mean': (0.5, 0.5, 0.5),
'std': (1, 1, 1),
'heads': {'hm': None, 'wh': 10, 'reg': 2, 'cls_theta': 1},
'decoder': None,
'test_flag': True,
"rainbows": COLOR,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'drawBox': False,
'label_array': None,
'labelnames': ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"),
})

BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None)

CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["人"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None
})

RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 1,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName
})

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["车辆", "垃圾", "商贩"],
'seg_nclass': 2, # 分割模型类别数目,默认2类
'segRegionCnt': 0,
'slopeIndex': [],
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"rainbows": COLOR
},
'Segweights': None
})

DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
'device': device,
'labelnames': ["人头", "人", "船只"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 2,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixDrowing_water_postprocess,
'pars': {
'modelSize': (640, 360)
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
# "../AIlib2/weights/conf/%s/yolov5.pt" % modeType.value[3]
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName,
# '../AIlib2/weights/conf/%s/stdc_360X640.pth' % modeType.value[3]
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName
})

NOPARKING_MODEL = (
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "违停"],
'trtFlag_seg': True,
'trtFlag_det': True,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True, ###分割模型预处理参数
'mixFunction': {
'function': mixNoParking_road_postprocess,
'pars': {
'modelSize': (640, 360),
'roundness': 0.3,
'cls': 9,
'laneArea': 10,
'laneAngleCha': 5,
'RoadArea': 16000
}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName
})

ILLPARKING_MODEL = ("19", "019", "车辆违停模型", 'illParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车", "T角点", "L角点", "违停"],
'trtFlag_seg': False,
'trtFlag_det': True,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'mixFunction': {
'function': illParking_postprocess,
'pars': {}
}
},
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'Detweights': "../AIlib2/weights/illParking/yolov5_%s_fp16.engine" % gpuName,
'Segweights': None
})

@staticmethod
def checkCode(code):
for model in ModelType:
if model.value[1] == code:
return True
return False


'''
参数1: 检测目标名称
参数2: 检测目标
参数3: 初始化百度检测客户端
'''


@unique
class BaiduModelTarget(Enum):
VEHICLE_DETECTION = (
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id))

HUMAN_DETECTION = (
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id))

PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id))


BAIDU_MODEL_TARGET_CONFIG = {
BaiduModelTarget.VEHICLE_DETECTION.value[1]: BaiduModelTarget.VEHICLE_DETECTION,
BaiduModelTarget.HUMAN_DETECTION.value[1]: BaiduModelTarget.HUMAN_DETECTION,
BaiduModelTarget.PEOPLE_COUNTING.value[1]: BaiduModelTarget.PEOPLE_COUNTING
}

EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}

# 模型分析方式
@unique
class ModelMethodTypeEnum(Enum):
# 方式一: 正常识别方式
NORMAL = 1

# 方式二: 追踪识别方式
TRACE = 2

+ 676
- 0
enums/ModelTypeEnum2.py Vedi File

@@ -0,0 +1,676 @@
import sys
from enum import Enum, unique

from common.Constant import COLOR

sys.path.extend(['..', '../AIlib2'])
from segutils.segmodel import SegModel
from utilsK.queRiver import riverDetSegMixProcess
from segutils.trafficUtils import tracfficAccidentMixFunction
from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess

'''
参数说明
1. 编号
2. 模型编号
3. 模型名称
4. 选用的模型名称
'''


@unique
class ModelType2(Enum):
WATER_SURFACE_MODEL = ("1", "001", "河道模型", 'river', lambda device, gpuName: {
'device': device,
'labelnames': ["排口", "水生植被", "其它", "漂浮物", "污染排口", "菜地", "违建", "岸坡垃圾"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/river/yolov5_%s_fp16.engine" % gpuName,
'detModelpara': [],
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225), 'numpy': False,
'RGB_convert_first': True, # 分割模型预处理参数
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [5, 6, 7],
'riverIou': 0.1
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/river/stdc_360X640_%s_fp16.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

FOREST_FARM_MODEL = ("2", "002", "森林模型", 'forest2', lambda device, gpuName: {
'device': device,
'labelnames': ["林斑", "病死树", "行人", "火焰", "烟雾"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/forest2/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

TRAFFIC_FARM_MODEL = ("3", "003", "交通模型", 'highWay2', lambda device, gpuName: {
'device': device,
'labelnames': ["行人", "车辆", "纵向裂缝", "横向裂缝", "修补", "网状裂纹", "坑槽", "块状裂纹", "积水", "事故"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/highWay2/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 3,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': tracfficAccidentMixFunction,
'pars': {
'RoadArea': 16000,
'modelSize': (640, 360),
'vehicleArea': 10,
'roadVehicleAngle': 15,
'speedRoadVehicleAngleMax': 75,
'roundness': 1.0,
'cls': 9,
'vehicleFactor': 0.1,
'confThres': 0.25,
'roadIou': 0.6,
'radius': 50,
'vehicleFlag': False,
'distanceFlag': False
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/highWay2/stdc_360X640_%s_fp16.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
})

EPIDEMIC_PREVENTION_MODEL = ("4", "004", "防疫模型", None, None)

PLATE_MODEL = ("5", "005", "车牌模型", None, None)

VEHICLE_MODEL = ("6", "006", "车辆模型", 'vehicle', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/vehicle/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 3
}
})

PEDESTRIAN_MODEL = ("7", "007", "行人模型", 'pedestrian', lambda device, gpuName: {
'device': device,
'labelnames': ["行人"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/pedestrian/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

SMOGFIRE_MODEL = ("8", "008", "烟火模型", 'smogfire', lambda device, gpuName: {
'device': device,
'labelnames': ["烟雾", "火焰"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/smogfire/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
'device': device,
'labelnames': ["钓鱼", "游泳"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/AnglerSwimmer/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
},
})

COUNTRYROAD_MODEL = ("10", "010", "乡村模型", 'countryRoad', lambda device, gpuName: {
'device': device,
'labelnames': ["违法种植"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/countryRoad/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

SHIP_MODEL = ("11", "011", "船只模型", 'ship2', lambda device, gpuName: {
'obbModelPar': {
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"],
'model_size': (608, 608),
'K': 100,
'conf_thresh': 0.3,
'down_ratio': 4,
'num_classes': 15,
'dataset': 'dota',
'heads': {
'hm': None,
'wh': 10,
'reg': 2,
'cls_theta': 1
},
'mean': (0.5, 0.5, 0.5),
'std': (1, 1, 1),
'half': False,
'test_flag': True,
'decoder': None,
'weights': '../AIlib2/weights/ship2/obb_608X608_%s_fp16.engine' % gpuName
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'device': "cuda:%s" % device,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'drawBox': False,
'drawPar': {
"rainbows": COLOR,
'digitWordFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'wordSize': 40,
'fontSize': 1.0,
'label_location': 'leftTop'
}
},
'labelnames': ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "船只"]
})

BAIDU_MODEL = ("12", "012", "百度AI图片识别模型", None, None)

CHANNEL_EMERGENCY_MODEL = ("13", "013", "航道模型", 'channelEmergency', lambda device, gpuName: {
'device': device,
'labelnames': ["人"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/channelEmergency/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

RIVER2_MODEL = ("15", "015", "河道检测模型", 'river2', lambda device, gpuName: {
'device': device,
'labelnames': ["漂浮物", "岸坡垃圾", "排口", "违建", "菜地", "水生植物", "河湖人员", "钓鱼人员", "船只",
"蓝藻"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/river2/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225), 'numpy': False,
'RGB_convert_first': True, # 分割模型预处理参数
'mixFunction': {
'function': riverDetSegMixProcess,
'pars': {
'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/river2/stdc_360X640_%s_fp16.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.3,
"ovlap_thres_crossCategory": 0.65,
"classes": 5,
"rainbows": COLOR
},
'txtFontSize': 80,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

CITY_MANGEMENT_MODEL = ("16", "016", "城管模型", 'cityMangement', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "垃圾", "商贩"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/cityMangement/yolov5_%s_fp16.engine" % gpuName,
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'seg_nclass': 2,
'segRegionCnt': 0,
'segPar': None,
'Segweights': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"ovlap_thres_crossCategory": 0.6,
"classes": 2,
"rainbows": COLOR
},
'txtFontSize': 40,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'segLineShow': False,
'waterLineColor': (0, 255, 255),
'waterLineWidth': 3
}
})

DROWING_MODEL = ("17", "017", "人员落水模型", 'drowning', lambda device, gpuName: {
'device': device,
'labelnames': ["人头", "人", "船只"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/drowning/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixDrowing_water_postprocess,
'pars': {
'modelSize': (640, 360),
}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/drowning/stdc_360X640_%s_fp16.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
})

NOPARKING_MODEL = (
"18", "018", "城市违章模型", 'noParking', lambda device, gpuName: {
'device': device,
'labelnames': ["车辆", "违停"],
'half': True,
'trtFlag_det': True,
'Detweights': "../AIlib2/weights/noParking/yolov5_%s_fp16.engine" % gpuName,
'seg_nclass': 4,
'segRegionCnt': 2,
'segPar': {
'trtFlag_seg': True,
'modelSize': (640, 360),
'mean': (0.485, 0.456, 0.406),
'std': (0.229, 0.224, 0.225),
'predResize': True,
'numpy': False,
'RGB_convert_first': True,
'mixFunction': {
'function': mixNoParking_road_postprocess,
'pars': {'modelSize': (640, 360), 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5,
'RoadArea': 16000}
}
},
'trackPar': {
'sort_max_age': 2, # 跟踪链断裂时允许目标消失最大的次数。超过之后,会认为是新的目标。
'sort_min_hits': 3, # 每隔目标连续出现的次数,超过这个次数才认为是一个目标。
'sort_iou_thresh': 0.2, # 检测最小的置信度。
'det_cnt': 5, # 每隔几次做一个跟踪和检测,默认10。
'windowsize': 25, # 轨迹平滑长度,一定是奇数,表示每隔几帧做一平滑,默认29。一个目标在多个帧中出现,每一帧中都有一个位置,这些位置的连线交轨迹。
'patchCnt': 100, # 每次送入图像的数量,不宜少于100帧。
},
'Segweights': '../AIlib2/weights/noParking/stdc_360X640_%s_fp16.engine' % gpuName,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.25,
"classes": 9,
"rainbows": COLOR
},
'txtFontSize': 20,
'digitFont': {
'line_thickness': 2,
'boxLine_thickness': 1,
'fontSize': 1.0,
'waterLineColor': (0, 255, 255),
'segLineShow': False,
'waterLineWidth': 2
}
}
)

@staticmethod
def checkCode(code):
for model in ModelType2:
if model.value[1] == code:
return True
return False


'''
参数1: 检测目标名称
参数2: 检测目标
参数3: 初始化百度检测客户端
'''


@unique
class BaiduModelTarget2(Enum):
VEHICLE_DETECTION = (
"车辆检测", 0, lambda client0, client1, url, request_id: client0.vehicleDetectUrl(url, request_id))

HUMAN_DETECTION = (
"人体检测与属性识别", 1, lambda client0, client1, url, request_id: client1.bodyAttr(url, request_id))

PEOPLE_COUNTING = ("人流量统计", 2, lambda client0, client1, url, request_id: client1.bodyNum(url, request_id))


BAIDU_MODEL_TARGET_CONFIG2 = {
BaiduModelTarget2.VEHICLE_DETECTION.value[1]: BaiduModelTarget2.VEHICLE_DETECTION,
BaiduModelTarget2.HUMAN_DETECTION.value[1]: BaiduModelTarget2.HUMAN_DETECTION,
BaiduModelTarget2.PEOPLE_COUNTING.value[1]: BaiduModelTarget2.PEOPLE_COUNTING
}

EPIDEMIC_PREVENTION_CONFIG = {1: "行程码", 2: "健康码"}

# 模型分析方式
@unique
class ModelMethodTypeEnum2(Enum):
# 方式一: 正常识别方式
NORMAL = 1

# 方式二: 追踪识别方式
TRACE = 2

+ 18
- 0
enums/RecordingStatusEnum.py Vedi File

@@ -0,0 +1,18 @@
from enum import Enum, unique


# 录屏状态枚举
@unique
class RecordingStatus(Enum):
RECORDING_WAITING = ("5", "待录制")

RECORDING_RETRYING = ("10", "重试中")

RECORDING_RUNNING = ("15", "录制中")

RECORDING_SUCCESS = ("20", "录制完成")

RECORDING_TIMEOUT = ("25", "录制超时")

RECORDING_FAILED = ("30", "录制失败")

+ 33
- 0
enums/StatusEnum.py Vedi File

@@ -0,0 +1,33 @@
from enum import Enum, unique


@unique
class PushStreamStatus(Enum):
WAITING = (5, "待推流")

RETRYING = (10, "重试中")

RUNNING = (15, "推流中")

STOPPING = (20, "停止中")

SUCCESS = (25, "完成")

TIMEOUT = (30, "超时")

FAILED = (35, "失败")


@unique
class ExecuteStatus(Enum):
WAITING = (5, "待执行")

RUNNING = (10, "执行中")

STOPPING = (15, "停止中")

SUCCESS = (20, "执行完成")

TIMEOUT = (25, "超时")

FAILED = (30, "失败")

+ 0
- 0
enums/__init__.py Vedi File


BIN
enums/__pycache__/AnalysisStatusEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/AnalysisTypeEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/BaiduSdkEnum.cpython-310.pyc Vedi File


BIN
enums/__pycache__/BaiduSdkEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/ExceptionEnum.cpython-310.pyc Vedi File


BIN
enums/__pycache__/ExceptionEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/ModelTypeEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/RecordingStatusEnum.cpython-38.pyc Vedi File


BIN
enums/__pycache__/__init__.cpython-310.pyc Vedi File


BIN
enums/__pycache__/__init__.cpython-38.pyc Vedi File


+ 22
- 0
exception/CustomerException.py Vedi File

@@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
from loguru import logger


"""
自定义异常
"""


class ServiceException(Exception): # 继承异常类
def __init__(self, code, msg, desc=None):
self.code = code
if desc is None:
self.msg = msg
else:
self.msg = msg % desc

def __str__(self):
logger.error("异常编码:{}, 异常描述:{}", self.code, self.msg)




+ 0
- 0
exception/__init__.py Vedi File


BIN
exception/__pycache__/CustomerException.cpython-310.pyc Vedi File


BIN
exception/__pycache__/CustomerException.cpython-38.pyc Vedi File


BIN
exception/__pycache__/__init__.cpython-310.pyc Vedi File


BIN
exception/__pycache__/__init__.cpython-38.pyc Vedi File


+ 0
- 0
font/__init__.py Vedi File


BIN
font/simsun.ttc Vedi File


BIN
image/logo.png Vedi File

Before After
Width: 277  |  Height: 48  |  Size: 26KB

+ 392
- 0
service/Dispatcher.py Vedi File

@@ -0,0 +1,392 @@
# -*- coding: utf-8 -*-
import time
from os.path import join
from traceback import format_exc

from cerberus import Validator

from common.Constant import ONLINE_START_SCHEMA, ONLINE_STOP_SCHEMA, OFFLINE_START_SCHEMA, OFFLINE_STOP_SCHEMA, \
IMAGE_SCHEMA, RECORDING_START_SCHEMA, RECORDING_STOP_SCHEMA, PULL2PUSH_START_SCHEMA, PULL2PUSH_STOP_SCHEMA
from common.YmlConstant import service_yml_path, kafka_yml_path
from concurrency.FeedbackThread import FeedbackThread
from concurrency.IntelligentRecognitionProcess2 import OnlineIntelligentRecognitionProcess2, \
OfflineIntelligentRecognitionProcess2, PhotosIntelligentRecognitionProcess2
from concurrency.Pull2PushStreamProcess import PushStreamProcess
from entity.FeedBack import message_feedback, recording_feedback, pull_stream_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelMethodTypeEnum, ModelType
from enums.RecordingStatusEnum import RecordingStatus
from enums.StatusEnum import PushStreamStatus, ExecuteStatus
from exception.CustomerException import ServiceException
from loguru import logger
from multiprocessing import Queue
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess, ScreenRecordingProcess
from util.CpuUtils import print_cpu_ex_status
from util.FileUtils import create_dir_not_exist
from util.GPUtils import get_first_gpu_name, print_gpu_ex_status, check_cude_is_available
from util.KafkaUtils import CustomerKafkaConsumer
from util.QueUtil import put_queue
from util.RWUtils import getConfigs

'''
分发服务
'''


class DispatcherService:
__slots__ = ('__context', '__feedbackThread', '__listeningProcesses', '__fbQueue', '__topics', '__task_type',
'__kafka_config', '__recordingProcesses', '__pull2PushProcesses')

def __init__(self, base_dir, env):
# 检测cuda是否活动
check_cude_is_available()
# 获取全局上下文配置
self.__context = getConfigs(join(base_dir, service_yml_path % env))
# 创建任务执行, 视频保存路径
create_dir_not_exist(join(base_dir, self.__context["video"]["file_path"]))
# 将根路径和环境设置到上下文中
self.__context["base_dir"], self.__context["env"] = base_dir, env

# 问题反馈线程
self.__feedbackThread, self.__fbQueue = None, Queue()
# 实时、离线、图片任务进程字典
self.__listeningProcesses = {}
# 录屏任务进程字典
self.__recordingProcesses = {}
# 转推流任务进程字典
self.__pull2PushProcesses = {}
self.__kafka_config = getConfigs(join(base_dir, kafka_yml_path % env))
self.__topics = (
self.__kafka_config["topic"]["dsp-alg-online-tasks-topic"], # 实时监听topic
self.__kafka_config["topic"]["dsp-alg-offline-tasks-topic"], # 离线监听topic
self.__kafka_config["topic"]["dsp-alg-image-tasks-topic"], # 图片监听topic
self.__kafka_config["topic"]["dsp-recording-task-topic"], # 录屏监听topic
self.__kafka_config["topic"]["dsp-push-stream-task-topic"] # 推流监听topic
)
# 对应topic的各个lambda表达式
self.__task_type = {
self.__topics[0]: (AnalysisType.ONLINE.value, lambda x, y: self.online(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[1]: (AnalysisType.OFFLINE.value, lambda x, y: self.offline(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[2]: (AnalysisType.IMAGE.value, lambda x, y: self.image(x, y),
lambda x, y, z: self.identify_method(x, y, z)),
self.__topics[3]: (AnalysisType.RECORDING.value, lambda x, y: self.recording(x, y),
lambda x, y, z: self.recording_method(x, y, z)),
self.__topics[4]: (AnalysisType.PULLTOPUSH.value, lambda x, y: self.pullStream(x, y),
lambda x, y, z: self.push_stream_method(x, y, z))
}
gpu_name_array = get_first_gpu_name()
gpu_array = [g for g in ('3090', '2080', '4090', 'A10') if g in gpu_name_array]
gpu_name = '2080Ti'
if len(gpu_array) > 0:
if gpu_array[0] != '2080':
gpu_name = gpu_array[0]
else:
raise Exception("GPU资源不在提供的模型所支持的范围内!请先提供对应的GPU模型!")
logger.info("当前服务环境为: {}, 服务器GPU使用型号: {}", env, gpu_name)
self.__context["gpu_name"] = gpu_name
self.start_service()

# 服务调用启动方法
def start_service(self):
# 初始化kafka监听者
customerKafkaConsumer = CustomerKafkaConsumer(self.__kafka_config, topics=self.__topics)
logger.info("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙")
while True:
try:
# 检查任务进程运行情况,去除结束的任务
self.check_process_task()
# 启动反馈线程
self.start_feedback_thread()
msg = customerKafkaConsumer.poll()
if msg is not None and len(msg) > 0:
for k, v in msg.items():
for m in v:
message = m.value
requestId = message.get("request_id")
if requestId is None:
logger.error("请求参数格式错误, 请检查请求体格式是否正确!")
continue
customerKafkaConsumer.commit_offset(m, requestId)
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}",
m.topic, m.offset, m.partition, message, requestId)
topic_method = self.__task_type[m.topic]
topic_method[2](topic_method[1], message, topic_method[0])
else:
print_gpu_ex_status()
print_cpu_ex_status(self.__context["base_dir"])
time.sleep(1)
except Exception:
logger.error("主线程异常:{}", format_exc())

def identify_method(self, handle_method, message, analysisType):
try:
check_cude_is_available()
handle_method(message, analysisType)
except ServiceException as s:
logger.error("消息监听异常:{}, requestId: {}", s.msg, message["request_id"])
put_queue(self.__fbQueue, message_feedback(message["request_id"], AnalysisStatus.FAILED.value, analysisType,
s.code, s.msg), timeout=1)
except Exception:
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message["request_id"])
put_queue(self.__fbQueue, message_feedback(message["request_id"], AnalysisStatus.FAILED.value, analysisType,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=1)
finally:
del message

def push_stream_method(self, handle_method, message, analysisType):
try:
check_cude_is_available()
handle_method(message, analysisType)
except ServiceException as s:
logger.error("消息监听异常:{}, requestId: {}", s.msg, message['request_id'])
videoInfo = [{"id": url.get("id"), "status": PushStreamStatus.FAILED.value[0]} for url in
message.get("video_urls", []) if url.get("id") is not None]
put_queue(self.__fbQueue, pull_stream_feedback(message['request_id'], ExecuteStatus.FAILED.value[0],
s.code, s.msg, videoInfo), timeout=1)
except Exception:
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message['request_id'])
videoInfo = [{"id": url.get("id"), "status": PushStreamStatus.FAILED.value[0]} for url in
message.get("video_urls", []) if url.get("id") is not None]
put_queue(self.__fbQueue, pull_stream_feedback(message.get("request_id"), ExecuteStatus.FAILED.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1], videoInfo),
timeout=1)
finally:
del message

def recording_method(self, handle_method, message, analysisType):
try:
check_cude_is_available()
handle_method(message, analysisType)
except ServiceException as s:
logger.error("消息监听异常:{}, requestId: {}", s.msg, message["request_id"])
put_queue(self.__fbQueue,
recording_feedback(message["request_id"], RecordingStatus.RECORDING_FAILED.value[0],
error_code=s.code, error_msg=s.msg), timeout=1)
except Exception:
logger.error("消息监听异常:{}, requestId: {}", format_exc(), message["request_id"])
put_queue(self.__fbQueue,
recording_feedback(message["request_id"], RecordingStatus.RECORDING_FAILED.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1]), timeout=1)
finally:
del message

# 开启实时进程
def startOnlineProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg["request_id"]):
logger.warning("实时重复任务,请稍后再试!requestId:{}", msg["request_id"])
return
model_type = self.__context["service"]["model"]["model_type"]
codes = [model.get("code") for model in msg["models"] if model.get("code")]
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes:
coir = OnlineIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context)
else:
coir = OnlineIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context)
coir.start()
self.__listeningProcesses[msg["request_id"]] = coir

# 结束实时进程
def stopOnlineProcess(self, msg):
ps = self.__listeningProcesses.get(msg["request_id"])
if ps is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"])
return
ps.sendEvent({"command": "stop"})

@staticmethod
def check_process(listeningProcess):
for requestId in list(listeningProcess.keys()):
if not listeningProcess[requestId].is_alive():
del listeningProcess[requestId]

def check_process_task(self):
self.check_process(self.__listeningProcesses)
self.check_process(self.__recordingProcesses)
self.check_process(self.__pull2PushProcesses)

# 开启离线进程
def startOfflineProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg["request_id"]):
logger.warning("离线重复任务,请稍后再试!requestId:{}", msg["request_id"])
return
model_type = self.__context["service"]["model"]["model_type"]
codes = [model.get("code") for model in msg["models"] if model.get("code")]
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes:
first = OfflineIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context)
else:
first = OfflineIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context)
first.start()
self.__listeningProcesses[msg["request_id"]] = first

# 结束离线进程
def stopOfflineProcess(self, msg):
ps = self.__listeningProcesses.get(msg["request_id"])
if ps is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"])
return
ps.sendEvent({"command": "stop"})

# 开启图片分析进程
def startImageProcess(self, msg, analysisType):
pp = self.__listeningProcesses.get(msg["request_id"])
if pp is not None:
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"])
return
model_type = self.__context["service"]["model"]["model_type"]
codes = [model.get("code") for model in msg["models"] if model.get("code")]
if ModelMethodTypeEnum.NORMAL.value == model_type or ModelType.ILLPARKING_MODEL.value[1] in codes:
imaged = PhotosIntelligentRecognitionProcess(self.__fbQueue, msg, analysisType, self.__context)
else:
imaged = PhotosIntelligentRecognitionProcess2(self.__fbQueue, msg, analysisType, self.__context)
# 创建在线识别进程并启动
imaged.start()
self.__listeningProcesses[msg["request_id"]] = imaged

'''
校验kafka消息
'''

@staticmethod
def check_msg(msg, schema):
try:
v = Validator(schema, allow_unknown=True)
result = v.validate(msg)
if not result:
logger.error("参数校验异常: {}, requestId: {}", v.errors, msg["request_id"])
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
except ServiceException as s:
raise s
except Exception:
logger.error("参数校验异常: {}, requestId: {}", format_exc(), msg["request_id"])
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

'''
开启反馈线程,用于发送消息
'''

def start_feedback_thread(self):
if self.__feedbackThread is None:
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__kafka_config)
self.__feedbackThread.setDaemon(True)
self.__feedbackThread.start()
time.sleep(1)
if self.__feedbackThread and not self.__feedbackThread.is_alive():
logger.error("反馈线程异常停止, 开始重新启动反馈线程!!!!!")
self.__feedbackThread = FeedbackThread(self.__fbQueue, self.__kafka_config)
self.__feedbackThread.setDaemon(True)
self.__feedbackThread.start()
time.sleep(1)

'''
在线分析逻辑
'''

def online(self, message, analysisType):
if "start" == message.get("command"):
self.check_msg(message, ONLINE_START_SCHEMA)
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startOnlineProcess(message, analysisType)
elif "stop" == message.get("command"):
self.check_msg(message, ONLINE_STOP_SCHEMA)
self.stopOnlineProcess(message)
else:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

def offline(self, message, analysisType):
if "start" == message.get("command"):
self.check_msg(message, OFFLINE_START_SCHEMA)
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startOfflineProcess(message, analysisType)
elif "stop" == message.get("command"):
self.check_msg(message, OFFLINE_STOP_SCHEMA)
self.stopOfflineProcess(message)
else:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

def image(self, message, analysisType):
if "start" == message.get("command"):
self.check_msg(message, IMAGE_SCHEMA)
if len(self.__listeningProcesses) >= int(self.__context['service']["task"]["image"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startImageProcess(message, analysisType)
else:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

def recording(self, message, analysisType):
if "start" == message.get("command"):
self.check_msg(message, RECORDING_START_SCHEMA)
if len(self.__recordingProcesses) >= int(self.__context['service']["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])
self.startRecordingProcess(message, analysisType)
elif "stop" == message.get("command"):
self.check_msg(message, RECORDING_STOP_SCHEMA)
self.stopRecordingProcess(message)
else:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

# 开启录屏进程
def startRecordingProcess(self, msg, analysisType):
if self.__listeningProcesses.get(msg["request_id"]):
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"])
return
srp = ScreenRecordingProcess(self.__fbQueue, self.__context, msg, analysisType)
srp.start()
self.__recordingProcesses[msg["request_id"]] = srp

# 结束录屏进程
def stopRecordingProcess(self, msg):
rdp = self.__recordingProcesses.get(msg["request_id"])
if rdp is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"])
return
rdp.sendEvent({"command": "stop"})

def pullStream(self, message, analysisType):
if "start" == message.get("command"):
self.check_msg(message, PULL2PUSH_START_SCHEMA)
if len(self.__pull2PushProcesses) >= int(self.__context['service']["task"]["limit"]):
raise ServiceException(ExceptionType.NO_RESOURCES.value[0],
ExceptionType.NO_RESOURCES.value[1])

self.startPushStreamProcess(message, analysisType)
elif "stop" == message.get("command"):
self.check_msg(message, PULL2PUSH_STOP_SCHEMA)
self.stopPushStreamProcess(message)
else:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])

def startPushStreamProcess(self, msg, analysisType):
if self.__pull2PushProcesses.get(msg["request_id"]):
logger.warning("重复任务,请稍后再试!requestId:{}", msg["request_id"])
return
srp = PushStreamProcess(self.__fbQueue, self.__context, msg, analysisType)
srp.start()
self.__pull2PushProcesses[msg["request_id"]] = srp

# 结束录屏进程
def stopPushStreamProcess(self, msg):
srp = self.__pull2PushProcesses.get(msg["request_id"])
if srp is None:
logger.warning("未查询到该任务,无法停止任务!requestId:{}", msg["request_id"])
return
srp.sendEvent({"command": "stop", "videoIds": msg.get("video_ids", [])})

+ 0
- 0
service/__init__.py Vedi File


BIN
service/__pycache__/Dispatcher.cpython-310.pyc Vedi File


BIN
service/__pycache__/Dispatcher.cpython-38.pyc Vedi File


BIN
service/__pycache__/__init__.cpython-310.pyc Vedi File


BIN
service/__pycache__/__init__.cpython-38.pyc Vedi File


+ 3
- 0
test/__init__.py Vedi File

@@ -0,0 +1,3 @@

dd = {}
print(dd.get('name', 'aaa'))

BIN
test/__pycache__/__init__.cpython-38.pyc Vedi File


+ 0
- 0
test/aliyun/__init__.py Vedi File


BIN
test/aliyun/aaa.jpeg Vedi File

Before After
Width: 1000  |  Height: 1000  |  Size: 189KB

+ 119
- 0
test/aliyun/ossdemo.py Vedi File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
import datetime

import cv2
import oss2
import time

from loguru import logger

'''
图片上传使用OSS
1. 阿里云对象存储OSS官网地址:https://help.aliyun.com/product/31815.html?spm=a2c4g.32006.0.0.8c546cf0BpkAQ2
2. 阿里云对象存储OSS SDK示例地址:https://help.aliyun.com/document_detail/32006.html?spm=a2c4g.32006.0.0.66874b78q1pwLa
3. python安装SDK地址: https://help.aliyun.com/document_detail/85288.html?spm=a2c4g.32026.0.0.3f24417coCphWj
4. 安装SDK: pip install oss2
5. 安装python-devel
安装python-devel
由于SDK需要crcmod库计算CRC校验码,而crcmod依赖Python.h文件,如果系统缺少这个头文件,安装SDK不会失败,但crcmod的C扩展模式安装会失败,因此导致上传、下载等操作效率非常低下。
如果python-devel包不存在,则首先要安装这个包。
对于Windows系统和Mac OS X系统,由于安装Python的时候会将Python依赖的头文件一并安装,因此您无需安装python-devel。
对于CentOS、RHEL、Fedora系统,请执行以下命令安装python-devel。
sudo yum install python-devel
对于Debian,Ubuntu系统,请执行以下命令安装python-devel。
sudo apt-get install python-dev
6、图片域名地址:https://image.t-aaron.com/
'''


class AliyunOssSdk:

def __init__(self):
self.__client = None
self.__access_key = 'LTAI5tMiefafZ6br4zmrQWv9'
self.__access_secret = 'JgzQjSCkwZ7lefZO6egOArw38YH1Tk'
self.__endpoint = 'http://oss-cn-shanghai.aliyuncs.com'
self.__bucket = 'ta-tech-image'

def get_oss_bucket(self):
if not self.__client:
auth = oss2.Auth(self.__access_key, self.__access_secret)
self.__client = oss2.Bucket(auth, self.__endpoint, self.__bucket, connect_timeout=30)

def upload_file(self, updatePath, fileByte):
logger.info("开始上传文件到oss!")
MAX_RETRIES = 3
retry_count = 0
while True:
try:
self.get_oss_bucket()
result = self.__client.put_object(updatePath, fileByte)
return result
logger.info("上传文件到oss成功!")
break
except Exception as e:
self.__client = None
retry_count += 1
time.sleep(1)
logger.info("上传文件到oss失败, 重试次数:{}", retry_count)
if retry_count > MAX_RETRIES:
logger.exception("上传文件到oss重试失败:{}", e)
raise e


YY_MM_DD_HH_MM_SS = "%Y-%m-%d %H:%M:%S"
YMDHMSF = "%Y%m%d%H%M%S%f"

def generate_timestamp():
"""根据当前时间获取时间戳,返回整数"""
return int(time.time())

def now_date_to_str(fmt=None):
if fmt is None:
fmt = YY_MM_DD_HH_MM_SS
return datetime.datetime.now().strftime(fmt)

if __name__ == "__main__":
# 初始化oss对象
ossClient = AliyunOssSdk()
# 读取本地图片
image_frame = cv2.imread('aaa.jpeg')
or_result, or_image = cv2.imencode(".jpg", image_frame)
# 图片名称命名规则
# 1、base_dir 基本文件夹名称,由拓恒公司传参
# 2、time_now 现在的时间
# 3、current_frame 当前视频的帧数
# 4、last_frame 如果有跳帧操作, 填写跳帧的步长,如果没有,和current_frame参数保持一致
# 5、random_num 随机时间字符串
# 6、mode_type 类型:实时视频直播的方式用(online) 离线视频直播(填写视频地址识别)用(offline)
# 7、requestId 请求id, 拓恒公司传参
# 8、image_type 原图用(OR) AI识别后的图片用(AI)
random_num = now_date_to_str(YMDHMSF)
time_now = now_date_to_str("%Y-%m-%d-%H-%M-%S")
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type_{random_num}-{mode_type}-{base_dir}" \
"-{requestId}_{image_type}.jpg"
image_name = image_format.format(
base_dir='PWL202304141639429276',
time_now=time_now,
current_frame='0',
last_frame='0',
random_num=random_num,
mode_type='offline',
requestId='111111111111111111',
image_type='OR')
result = ossClient.upload_file(image_name, or_image.tobytes())
# print('http status: {0}'.format(result.status))
# # 请求ID。请求ID是本次请求的唯一标识,强烈建议在程序日志中添加此参数。
# print('request_id: {0}'.format(result.request_id))
# # ETag是put_object方法返回值特有的属性,用于标识一个Object的内容。
# print('ETag: {0}'.format(result.etag))
# # HTTP响应头部。
# print('date: {0}'.format(result.headers['date']))
# print(result.__reduce__())
# 对于图片上传, 上传成功后,直接将image_name给拓恒公司就可以了
# 如果测试查看图片是否上传成功
# 可以使用域名拼接
image_url = 'https://image.t-aaron.com/' + image_name
print(image_url)
# 拓恒公司只需要image_name


+ 128
- 0
test/aliyun/vod.py Vedi File

@@ -0,0 +1,128 @@
# -*- coding: UTF-8 -*-
import json
import traceback
from aliyunsdkcore.client import AcsClient
from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest
from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest
from vodsdk.AliyunVodUtils import *
from vodsdk.AliyunVodUploader import AliyunVodUploader
from vodsdk.UploadVideoRequest import UploadVideoRequest

# # # 填入AccessKey信息
def init_vod_client(accessKeyId, accessKeySecret):
regionId = 'cn-shanghai' # 点播服务接入地域
connectTimeout = 3 # 连接超时,单位为秒
return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
def create_upload_video(clt):
request = CreateUploadVideoRequest.CreateUploadVideoRequest()
request.set_Title('dddddd')
request.set_FileName('/home/thsw/chenyukun/video/111111.mp4')
request.set_Description('Video Description')
# //CoverURL示例:http://192.168.0.0/16/tps/TB1qnJ1PVXXXXXCXXXXXXXXXXXX-700-700.png
# request.set_CoverURL('<your Cover URL>')
# request.set_Tags('tag1,tag2')
# request.set_CateId(0)

# request.set_accept_format('JSON')
response = json.loads(clt.do_action_with_exception(request))
return response

try:
clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
uploadInfo = create_upload_video(clt)
print(json.dumps(uploadInfo, ensure_ascii=False, indent=4))

except Exception as e:
print(e)
print(traceback.format_exc())

# 刷新音视频凭证
# from aliyunsdkvod.request.v20170321 import RefreshUploadVideoRequest
# def refresh_upload_video(clt, videoId):
# request = RefreshUploadVideoRequest.RefreshUploadVideoRequest()
# request.set_VideoId(videoId)
# request.set_accept_format('JSON')
# return json.loads(clt.do_action_with_exception(request))
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# uploadInfo = refresh_upload_video(clt, "d6c419c33da245758f71e362b5ee8b56")
# print(json.dumps(uploadInfo, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())
#
#
# # 获取播放地址
# def init_vod_client(accessKeyId, accessKeySecret):
# regionId = 'cn-shanghai' # 点播服务接入地域
# connectTimeout = 3 # 连接超时,单位为秒
# return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout)
# def get_play_info(clt, videoId):
# request = GetPlayInfoRequest.GetPlayInfoRequest()
# request.set_accept_format('JSON')
# request.set_VideoId(videoId)
# request.set_AuthTimeout(3600*5)
# response = json.loads(clt.do_action_with_exception(request))
# return response
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# playInfo = get_play_info(clt, uploadInfo["VideoId"])
# print(json.dumps(playInfo, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())
#
# # 获取视频播放凭证
# from aliyunsdkvod.request.v20170321 import GetVideoPlayAuthRequest
# def get_video_playauth(clt, videoId):
# request = GetVideoPlayAuthRequest.GetVideoPlayAuthRequest()
# request.set_accept_format('JSON')
# request.set_VideoId(videoId)
# request.set_AuthInfoTimeout(3000)
# response = json.loads(clt.do_action_with_exception(request))
# return response
#
# try:
# clt = init_vod_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
# playAuth = get_video_playauth(clt, uploadInfo["VideoId"])
# print(json.dumps(playAuth, ensure_ascii=False, indent=4))
#
# except Exception as e:
# print(e)
# print(traceback.format_exc())







# accessKeyId='LTAI5tSJ62TLMUb4SZuf285A'
# accessKeySecret='MWYynm30filZ7x0HqSHlU3pdLVNeI7'
# filePath="/home/thsw/chenyukun/video/111111.mp4"
# # 测试上传本地音视频
# def testUploadLocalVideo(accessKeyId, accessKeySecret, filePath, storageLocation=None):
# try:
# # 可以指定上传脚本部署的ECS区域。如果ECS区域和视频点播存储区域相同,则自动使用内网上传,上传更快且更省公网流量。
# # ecsRegionId ="cn-shanghai"
# # uploader = AliyunVodUploader(accessKeyId, accessKeySecret, ecsRegionId)
# # 不指定上传脚本部署的ECS区域。
# uploader = AliyunVodUploader(accessKeyId, accessKeySecret)
# uploadVideoRequest = UploadVideoRequest(filePath, 'aiOnLineVideo')
# # 可以设置视频封面,如果是本地或网络图片可使用UploadImageRequest上传图片到视频点播,获取到ImageURL
# #ImageURL示例:https://example.com/sample-****.jpg
# #uploadVideoRequest.setCoverURL('<your Image URL>')
# # 标签
# # uploadVideoRequest.setTags('taa')
# if storageLocation:
# uploadVideoRequest.setStorageLocation(storageLocation)
# videoId = uploader.uploadLocalVideo(uploadVideoRequest)
# print("videoId: %s" % (videoId))
#
# except AliyunVodException as e:
# print(e)
# testUploadLocalVideo(accessKeyId, accessKeySecret, filePath)

+ 0
- 0
test/aliyun/vodTest.py Vedi File


Dato che sono stati cambiati molti file in questo diff, alcuni di essi non verranno mostrati

Loading…
Annulla
Salva