Browse Source

dsp支持图片分析

tags/V2.4.0
chenyukun 1 year ago
parent
commit
f37e9a4ce1
54 changed files with 1731 additions and 1433 deletions
  1. +0
    -96
      concurrency/CommonProcess.py
  2. +39
    -0
      concurrency/FeedbackThread.py
  3. +10
    -10
      concurrency/FileUpdateThread.py
  4. +31
    -34
      concurrency/HeartbeatThread.py
  5. +536
    -291
      concurrency/IntelligentRecognitionProcess.py
  6. +0
    -61
      concurrency/MessagePollingThread.py
  7. +250
    -0
      concurrency/PullVideoStreamProcess.py
  8. BIN
      concurrency/__pycache__/CommonProcess.cpython-38.pyc
  9. BIN
      concurrency/__pycache__/HeartbeatThread.cpython-38.pyc
  10. BIN
      concurrency/__pycache__/MessagePollingThread.cpython-38.pyc
  11. +27
    -66
      dsp_application.yml
  12. +9
    -2
      enums/AnalysisTypeEnum.py
  13. +4
    -0
      enums/ExceptionEnum.py
  14. +151
    -109
      service/Dispatcher.py
  15. +0
    -11
      test/Producer2.py
  16. +0
    -0
      test/aliyun/__init__.py
  17. +0
    -0
      test/aliyun/vod.py
  18. +0
    -0
      test/aliyun/vodTest.py
  19. +0
    -0
      test/aliyun/vodtest1.py
  20. +0
    -0
      test/aliyun/vodtest2.py
  21. +0
    -147
      test/experimental.py
  22. +179
    -0
      test/ffmpeg11/aa.py
  23. +0
    -0
      test/ffmpeg11/cv2test.py
  24. +0
    -0
      test/ffmpeg11/cv2test1.py
  25. +22
    -14
      test/ffmpeg11/ffmpeg11.py
  26. +0
    -0
      test/ffmpeg11/ffmpeg2.py
  27. +0
    -0
      test/ffmpeg11/ffmpeg3.py
  28. +0
    -0
      test/gpu/__init__.py
  29. +0
    -0
      test/gpu/gputest.py
  30. +0
    -0
      test/gpu/gputest1.py
  31. +0
    -0
      test/kafka/__init__.py
  32. +149
    -0
      test/kafka/producer_start.py
  33. +0
    -0
      test/kafka/producer_stop.py
  34. BIN
      test/minio1/__pycache__/minio_test.cpython-310.pyc
  35. +31
    -0
      test/minio1/minio_test.py
  36. +0
    -9
      test/mysqltest.py
  37. +0
    -103
      test/producer_start.py
  38. +0
    -20
      test/read.py
  39. +0
    -47
      test/test1.py
  40. +0
    -318
      test/torch_utils.py
  41. +24
    -0
      test/协程/协程.py
  42. +13
    -0
      test/协程/协程1.py
  43. +49
    -0
      test/协程/协程2.py
  44. +10
    -0
      test/协程/协程3.py
  45. +0
    -0
      test/字典/__init__.py
  46. +3
    -0
      test/字典/字典.py
  47. +0
    -0
      test/水印/same1.py
  48. +0
    -0
      test/水印/same2.py
  49. +0
    -0
      test/水印/same3.py
  50. +18
    -0
      test/装饰器/装饰器.py
  51. +5
    -3
      util/AliyunSdk.py
  52. +65
    -34
      util/Cv2Utils.py
  53. +8
    -1
      util/ImageUtils.py
  54. +98
    -57
      util/KafkaUtils.py

+ 0
- 96
concurrency/CommonProcess.py View File

@@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
import json
from multiprocessing import Process, Queue
from util import LogUtils
from util import KafkaUtils
from loguru import logger
from concurrency.FileUpdateThread import ImageFileUpdate
from concurrency.HeartbeatThread import Heartbeat
import time


class CommonProcess(Process):
def __init__(self, fbQueue, hbQueue, content, msg, imageQueue, mode_service):
super().__init__()
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.content = content
self.msg = msg
self.mode_service = mode_service
self.imageQueue = imageQueue


def getFeedback(self):
eBody = None
try:
eBody = self.fbQueue.get(block=False)
except Exception as e:
pass
return eBody

# 推送执行结果
def sendImageResult(self, result):
while self.imageQueue.full():
logger.info("图片上传队列已满, 2秒后重试! requestId:{}", self.msg.get("request_id"))
time.sleep(2)
self.imageQueue.put(result)

def run(self):
# 初始化日志配置
LogUtils.init_log(self.content)
logger.info("心跳、图片上传,反馈进程开始执行, requestId:{}", self.msg.get("request_id"))
# 启动心跳线程
hb = Heartbeat(self.fbQueue, self.hbQueue, self.msg.get("request_id"), self.mode_service)
hb.setDaemon(True)
hb.start()
# 图片上传线程
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, self.mode_service)
imageFileUpdate.setDaemon(True)
imageFileUpdate.start()
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.content, self.msg.get("request_id"))
# 心跳线程检测
heartbeat_num = 0
# 图片上传线程检测
imageFileUpdate_num = 0
while True:
try:
if heartbeat_num == 0 and not hb.is_alive():
logger.error("未检测到心跳线程活动,心跳线程可能出现异常, reuqestId:{}", self.msg.get("request_id"))
break
if imageFileUpdate_num == 0 and not imageFileUpdate.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id"))
break
fb = self.getFeedback()
if fb is not None and len(fb) > 0:
feedback = fb.get("feedback")
command = fb.get("command")
if feedback is not None and len(feedback) > 0:
kafkaProducer.get_producer()
kafkaProducer.sender(self.content["kafka"]["topic"]["dsp-alg-results-topic"],
feedback["request_id"], feedback, 1)
if command is not None and len(command) > 0:
# 接收心跳线程和图片上传停止指令
if 'stop_heartbeat_imageFileUpdate' == command:
heartbeat_num += 1
imageFileUpdate_num += 1
hb.run_status = False
self.sendImageResult({"command": "stop"})
hb.join(60 * 5)
imageFileUpdate.join(60 * 5)
# 接收进程停止指令
if 'stop' == command:
heartbeat_num += 1
imageFileUpdate_num += 1
hb.run_status = False
self.sendImageResult({"command": "stop"})
hb.join(60 * 5)
imageFileUpdate.join(60 * 5)
break
else:
time.sleep(1)
except Exception as e:
logger.exception("结果反馈异常:{}, requestId:{}", e, self.msg.get("request_id"))
logger.info("心跳、图片上传,反馈进程执行完成, requestId:{}", self.msg.get("request_id"))




+ 39
- 0
concurrency/FeedbackThread.py View File

@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
import time
from threading import Thread
from loguru import logger
from util import KafkaUtils

'''
问题反馈线程
'''


class FeedbackThread(Thread):

def __init__(self, fbQueue, content):
super().__init__()
self.fbQueue = fbQueue
self.content = content

def getFeedback(self):
return self.fbQueue.get()

def run(self):
logger.info("启动问题反馈线程")
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.content)
while True:
logger.info("问题反馈发送消息循环")
feedback = {}
try:
fb = self.getFeedback()
if fb is not None and len(fb) > 0:
feedback = fb.get("feedback")
if feedback is not None and len(feedback) > 0:
kafkaProducer.sender(self.content["kafka"]["topic"]["dsp-alg-results-topic"],
feedback["request_id"], feedback, 1)
else:
time.sleep(1)
except Exception as e:
logger.exception("问题反馈异常:{}, requestId:{}", e, feedback.get("request_id"))
logger.info("问题反馈进程执行完成")

+ 10
- 10
concurrency/FileUpdateThread.py View File

@@ -1,3 +1,4 @@
import asyncio
import time
from threading import Thread
from loguru import logger
@@ -33,16 +34,14 @@ class FileUpdate(Thread):
self.fbQueue.put(result)


def build_image_name(base_dir, time_now, current_frame, last_frame, descrition, random_num, mode_type,
requestId, image_type):
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_type-{descrition}_{random_num}-{" \
"mode_type}-{base_dir}-{requestId}_{image_type}.jpg"
def build_image_name(base_dir, time_now, current_frame, last_frame, random_num, mode_type, requestId, image_type):
image_format = "{base_dir}/{time_now}_frame-{current_frame}-{last_frame}_{random_num}-{mode_type}-{base_dir}" \
"-{requestId}_{image_type}.jpg"
image_name = image_format.format(
base_dir=base_dir,
time_now=time_now,
current_frame=current_frame,
last_frame=last_frame,
descrition=descrition,
random_num=random_num,
mode_type=mode_type,
requestId=requestId,
@@ -56,6 +55,8 @@ class ImageFileUpdate(FileUpdate):
logger.info("开始启动图片上传线程, requestId:{}", self.msg.get("request_id"))
aliyunOssSdk = AliyunOssSdk(self.content, logger, self.msg.get("request_id"))
aliyunOssSdk.get_oss_bucket()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
while True:
try:
image_msg = self.getImageQueue()
@@ -76,21 +77,19 @@ class ImageFileUpdate(FileUpdate):
or_image_name = build_image_name(self.msg.get('results_base_dir'), time_now,
str(image_dict.get("current_frame")),
str(image_dict.get("last_frame")),
image_dict.get("model_detection_code"),
random_num,
image_dict.get("mode_service"),
self.msg.get('request_id'), "OR")
ai_image_name = build_image_name(self.msg.get('results_base_dir'), time_now,
str(image_dict.get("current_frame")),
str(image_dict.get("last_frame")),
image_dict.get("model_detection_code"),
random_num,
image_dict.get("mode_service"),
self.msg.get('request_id'), "AI")

task = loop.create_task(aliyunOssSdk.upload_file(or_image_name, or_image.tobytes()))
task1 = loop.create_task(aliyunOssSdk.upload_file(ai_image_name, ai_image.tobytes()))
loop.run_until_complete(asyncio.wait([task, task1]))
# 上传原图片
aliyunOssSdk.upload_file(or_image_name, or_image.tobytes())
aliyunOssSdk.upload_file(ai_image_name, ai_image.tobytes())
# aliyunOssSdk.upload_file(or_image_name, Image.fromarray(np.uint8(or_image)).tobytes())
# aliyunOssSdk.upload_file(ai_image_name, Image.fromarray(np.uint8(ai_image)).tobytes())
# 发送kafka消息
@@ -105,4 +104,5 @@ class ImageFileUpdate(FileUpdate):
TimeUtils.now_date_to_str())})
except Exception as e:
logger.exception("图片上传异常:{}, requestId:{}", e, self.msg.get("request_id"))
loop.close()
logger.info("结束图片上传线程, requestId:{}", self.msg.get("request_id"))

+ 31
- 34
concurrency/HeartbeatThread.py View File

@@ -1,22 +1,20 @@
# -*- coding: utf-8 -*-
from queue import Queue
from threading import Thread
import time
from loguru import logger
from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus
from entity.FeedBack import message_feedback
from enums.AnalysisTypeEnum import AnalysisType


class Heartbeat(Thread):
def __init__(self, fbQueue, hbQueue, request_id, mode_service):
def __init__(self, fbQueue, request_id, mode_service):
super().__init__()
self.fbQueue = fbQueue
self.hbQueue = hbQueue
self.hbQueue = Queue()
self.request_id = request_id
self.mode_service = mode_service
self.run_status = True
self.progress = "0.0000"

def getHbQueue(self):
eBody = None
@@ -30,35 +28,34 @@ class Heartbeat(Thread):
def sendResult(self, result):
self.fbQueue.put(result)

def sendHbQueue(self, result):
self.hbQueue.put(result)

def sendhbMessage(self, analysisStatus):
self.sendResult({"feedback": message_feedback(self.request_id,
analysisStatus,
self.mode_service,
progress='',
analyse_time=TimeUtils.now_date_to_str())})

def run(self):
logger.info("开始启动心跳线程!requestId:{}", self.request_id)
# 发送waiting状态信息
feedback = message_feedback(self.request_id, AnalysisStatus.WAITING.value, self.mode_service,
analyse_time=TimeUtils.now_date_to_str())
self.sendResult({"feedback": feedback})
time.sleep(5)
num = 0
while self.run_status:
hb_value = self.getHbQueue()
if hb_value is not None:
cf = hb_value.get("cf")
af = hb_value.get("af")
if cf is not None and af is not None:
self.progress = str(format(float(cf) / float(af), '.4f'))
else:
num += 3
time.sleep(3)
else:
if num % 30 == 0:
if self.mode_service == AnalysisType.OFFLINE.value:
feedback = message_feedback(self.request_id, AnalysisStatus.RUNNING.value, self.mode_service,
progress=self.progress,
analyse_time=TimeUtils.now_date_to_str())
if self.mode_service == AnalysisType.ONLINE.value:
feedback = message_feedback(self.request_id, AnalysisStatus.RUNNING.value, self.mode_service,
analyse_time=TimeUtils.now_date_to_str())
self.sendResult({"feedback": feedback})
num += 3
time.sleep(3)

hb_init_num = 0
while True:
try:
time.sleep(2)
hb_msg = self.getHbQueue()
if hb_msg is not None and len(hb_msg) > 0:
command = hb_msg.get("command")
hb_value = hb_msg.get("hb_value")
if 'stop' == command:
logger.info("开始终止心跳线程, requestId:{}", self.request_id)
break
if hb_value is not None:
self.progress = hb_value
if hb_init_num % 20 == 0:
self.sendhbMessage(AnalysisStatus.RUNNING.value)
hb_init_num += 2
except Exception as e:
logger.exception("心跳线程异常:{}, requestId:{}", e, self.request_id)
logger.info("心跳线程停止完成!requestId:{}", self.request_id)

+ 536
- 291
concurrency/IntelligentRecognitionProcess.py View File

@@ -1,23 +1,32 @@
# -*- coding: utf-8 -*-
import asyncio
import os
import time
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed

import cv2

from common import Constant
from multiprocessing import Process, Queue
from loguru import logger

from concurrency.FileUpdateThread import build_image_name
from concurrency.HeartbeatThread import Heartbeat
from concurrency.PullVideoStreamProcess import OnlinePullVideoStreamProcess, OfflinePullVideoStreamProcess
from entity import FeedBack
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelType
from util import LogUtils, TimeUtils, ModelUtils, ImageUtils
from util.AliyunSdk import AliyunOssSdk
from util.Cv2Utils import Cv2Util
from entity.FeedBack import message_feedback
from util import AliyunSdk
from concurrency.CommonThread import Common
from concurrency.CommonProcess import CommonProcess
from exception.CustomerException import ServiceException
from util.ImageUtils import PictureWaterMark
from util.ImageUtils import PictureWaterMark, url2Array


class IntelligentRecognitionProcess(Process):
@@ -26,54 +35,99 @@ class IntelligentRecognitionProcess(Process):
# 初始化日志
self.fbQueue = cfg.get("fbQueue")
self.eventQueue = Queue()
self.imageQueue = Queue()
self.pullQueue = Queue(120)
self.content = cfg.get("content")
self.msg = cfg.get("msg")
self.imageQueue = cfg.get("imageQueue")
self.gpu_ids = cfg.get("gpu_ids")
self.pic = PictureWaterMark()
self.type = cfg.get("type")
# 定义原视频、AI视频保存名称
random_time = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
self.orFilePath = "%s%s%s%s%s" % (self.content["video"]["file_path"], random_time, "_on_or_",
self.msg.get("request_id"), ".mp4")
self.aiFilePath = "%s%s%s%s%s" % (self.content["video"]["file_path"], random_time, "_on_ai_",
self.msg.get("request_id"), ".mp4")
self.step = int(self.content["service"]["frame_step"])
self.service_timeout = int(self.content["service"]["timeout"])
self.frame_score = float(self.content["service"]["frame_score"])
self.picture_similarity = self.content["service"]["filter"]["picture_similarity"]
self.similarity = self.content["service"]["filter"]["similarity"]

# 给本进程发送事件
# 给本进程发送事件
def sendEvent(self, eBody):
self.eventQueue.put(eBody)

# 获取下一个事件
def getEvent(self):
eBody = None
try:
eBody = self.eventQueue.get(block=False)
return eBody
except Exception as e:
pass
return eBody

def getPullQueue(self):
eBody = None
try:
eBody = self.pullQueue.get(block=False)
return eBody
except Exception as e:
pass
return eBody

# 推送执行结果
def sendResult(self, result):
self.fbQueue.put(result)

def sendhbMessage(self, analysisStatus, progress, mode_service):
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"),
analysisStatus,
mode_service,
progress=progress,
analyse_time=TimeUtils.now_date_to_str())})


'''
实时任务进程
'''


def process(frame):
try:
p_result, timeOut = frame[1].process(copy.deepcopy(frame[0].get("frame")), int(frame[0].get("width") / 2))
if frame[2]["video"]["video_add_water"]:
frame[0]["frame"] = frame[3].common_water_1(frame[0].get("frame"), frame[3].logo)
p_result[1] = frame[3].common_water_1(p_result[1], frame[3].logo)
frame_merge = frame[4].video_merge(frame[0].get("frame"), p_result[1])
return p_result, frame, frame_merge
except Exception as e:
logger.exception("模型分析异常: {}", e)
return None


class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):

def __init__(self, cfg):
super(OnlineIntelligentRecognitionProcess, self).__init__(cfg)
# 定义原视频、AI视频保存名称
random_time = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
self.orFilePath = "%s%s%s%s%s" % (self.content["video"]["file_path"], random_time, "_on_or_",
self.msg.get("request_id"), ".mp4")
self.aiFilePath = "%s%s%s%s%s" % (self.content["video"]["file_path"], random_time, "_on_ai_",
self.msg.get("request_id"), ".mp4")
self.pull_stream_timeout = int(self.content["service"]["cv2_pull_stream_timeout"])

# 停止任务方法
def stop_task(self, cv2tool, orFilePath, aiFilePath, snalysisStatus):
def stop_task(self, cv2tool, pullProcess, snalysisStatus):
# 停止cv2相关配置
cv2tool.close()
if not os.path.exists(orFilePath) or not os.path.exists(aiFilePath):
if not os.path.exists(self.orFilePath) or not os.path.exists(self.aiFilePath):
logger.error("原视频或AI视频不存在!requestId:{}", self.msg.get("request_id"))
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
raise ServiceException(ExceptionType.VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.VIDEO_ADDRESS_EXCEPTION.value[1])
params1 = (orFilePath, "orOnLineVideo", self.content, logger, self.msg.get("request_id"))
hb = Heartbeat(self.fbQueue, self.msg.get("request_id"), AnalysisType.ONLINE.value)
hb.setDaemon(True)
hb.start()
params1 = (self.orFilePath, "orOnLineVideo", self.content, logger, self.msg.get("request_id"))
or_update_thread = Common(content=self.content, func=AliyunSdk.get_play_url, args=params1)
params2 = (aiFilePath, "aiOnLineVideo", self.content, logger, self.msg.get("request_id"))
params2 = (self.aiFilePath, "aiOnLineVideo", self.content, logger, self.msg.get("request_id"))
ai_update_thread = Common(content=self.content, func=AliyunSdk.get_play_url, args=params2)
or_update_thread.setDaemon(True)
ai_update_thread.setDaemon(True)
@@ -86,7 +140,10 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
ai_play_url, self.msg.get("request_id"))
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
hb.sendHbQueue({"command": "stop"})
hb.join(60 * 3)
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), snalysisStatus,
AnalysisType.ONLINE.value,
progress=Constant.success_progess,
@@ -96,163 +153,158 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):

def run(self):
cv2tool = None
commonProcess = None
feedback = None
pullProcess = None
loop = None
try:

# 程序开始时间
start_time = time.time()
start_time_1 = time.time()
start_time_2 = time.time()
LogUtils.init_log(self.content)
self.sendhbMessage(AnalysisStatus.WAITING.value, "0.0000", AnalysisType.ONLINE.value)
mod, model_type_code = get_model((str(self.gpu_ids[0]), self.msg["models"]))
# 结果反馈进程启动
commonProcess = CommonProcess(self.fbQueue, None, self.content, self.msg, self.imageQueue, self.type)
commonProcess.daemon = True
commonProcess.start()
cv2tool = Cv2Util(self.msg.get('pull_url'), self.msg.get('push_url'), self.orFilePath, self.aiFilePath,
pullProcess = OnlinePullVideoStreamProcess(self.msg, self.content, self.pullQueue, self.fbQueue,
self.imageQueue)
pullProcess.daemon = True
pullProcess.start()
cv2tool = Cv2Util(None, self.msg.get('push_url'), self.orFilePath, self.aiFilePath,
self.msg.get("request_id"))

cv2_init_num = 1
cv2_init1_num = 1
high_score_image = {}
step = int(self.content["service"]["frame_step"])
concurrent_frame = 1
service_timeout = int(self.content["service"]["timeout"])
while True:
end_time = time.time()
create_task_time = int(end_time - start_time)
if create_task_time > service_timeout:
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id"))
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
if not commonProcess.is_alive():
logger.info("图片上传、心跳、问题反馈进程停止异常, requestId: {}", self.msg.get("request_id"))
raise Exception("图片上传、心跳、问题反馈进程异常停止")
eBody = self.getEvent()
if eBody is not None and len(eBody) > 0:
cmdStr = eBody.get("command")
# 接收到停止指令
if 'stop' == cmdStr:
if high_score_image is not None and len(high_score_image) > 0:

loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
pullProcess_timeout = None
task_frame = None
self.sendhbMessage(AnalysisStatus.RUNNING.value, "0.0000", AnalysisType.ONLINE.value)
with ThreadPoolExecutor(max_workers=6) as t:
while True:
if not pullProcess.is_alive():
if pullProcess_timeout is None:
pullProcess_timeout = time.time()
if time.time() - pullProcess_timeout > 300:
logger.info("拉流进程停止异常, requestId: {}", self.msg.get("request_id"))
raise Exception("拉流进程异常停止")
eBody = self.getEvent()
if eBody is not None and len(eBody) > 0:
cmdStr = eBody.get("command")
# 接收到停止指令
if 'stop' == cmdStr:
logger.info("实时任务开始停止, requestId: {}", self.msg.get("request_id"))
pullProcess.sendCommand({"command": "stop_pull_stream"})
frames = []
status = None
if task_frame is not None:
frames, status = task_frame.result()
task_frame = t.submit(buildFrame, self.pullQueue, cv2tool, mod, self.content, self.pic)
if frames is not None and len(frames) > 0:
for result in t.map(process, frames):
if result is not None:
p_result, frame_all, frame_merge = result
task = loop.create_task(cv2tool.push_stream(frame_merge))
task1 = loop.create_task(cv2tool.video_write(frame_all[0].get("frame"), frame_merge))
loop.run_until_complete(asyncio.wait([task, task1]))
if frame_all[0].get("cct_frame") % 400 == 0:
self.sendhbMessage(AnalysisStatus.RUNNING.value, '', AnalysisType.ONLINE.value)
# # 问题图片加入队列, 暂时写死,后期修改为真实问题
if p_result[2] is not None and len(p_result[2]) > 0:
for ai_analyse_result in p_result[2]:
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None and conf_c >= self.frame_score:
high_score_image[order] = {
"or_frame": frame_all[0].get("frame"),
"ai_frame": p_result[1],
"current_frame": frame_all[0].get("cct_frame"),
"last_frame": frame_all[0].get("cct_frame") + self.step,
"progress": "",
"mode_service": "online",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
if conf_c >= self.frame_score and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame_all[0].get("frame"),
"ai_frame": p_result[1],
"current_frame": frame_all[0].get("cct_frame"),
"last_frame": frame_all[0].get("cct_frame") + self.step,
"progress": "",
"mode_service": "online",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if frame_all[0].get("cct_frame") % self.step == 0 and len(high_score_image) > 0:
if self.picture_similarity:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.similarity:
self.imageQueue.put({"image": high_score_image.pop(key)})
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()

else:
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image.pop(key)})
pullProcess.sendCommand({"command": "stop_ex"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.TIMEOUT.value)
break
if status is None:
continue
if status.get("status") == "1":
raise ServiceException(status.get("error").get("code"), status.get("error").get("msg"))
elif status.get("status") == "3":
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("实时任务开始停止, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, self.orFilePath, self.aiFilePath, AnalysisStatus.SUCCESS.value)
self.imageQueue.put({"image": high_score_image.pop(key)})
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.TIMEOUT.value)
break
if cv2tool.checkconfig() or cv2tool.pull_p is None:
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
pull_stream_init_timeout = time.time() - start_time_1
if pull_stream_init_timeout > int(self.content["service"]["cv2_pull_stream_timeout"]):
logger.info("开始拉流超时, 超时时间: {}, requestId:{}", pull_stream_init_timeout,
self.msg.get("request_id"))
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(1)
cv2tool.get_video_info()
cv2tool.build_pull_p()
continue
start_time_1 = time.time()
cv2_init_num = 1
frame = cv2tool.read()
if frame is None:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", cv2_init1_num, self.msg.get("request_id"))
pull_stream_read_timeout = time.time() - start_time_2
if pull_stream_read_timeout > int(self.content["service"]["cv2_read_stream_timeout"]):
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout, self.msg.get("request_id"))
if high_score_image is not None and len(high_score_image) > 0:
elif status.get("status") == "9":
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
self.stop_task(cv2tool, self.orFilePath, self.aiFilePath, AnalysisStatus.TIMEOUT.value)
self.imageQueue.put({"image": high_score_image.pop(key)})
logger.info("实时任务正常结束:requestId: {}", self.msg.get("request_id"))
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.SUCCESS.value)
break
cv2_init1_num += 1
time.sleep(0.5)
cv2tool.build_pull_p()
continue
cv2_init1_num = 1
start_time_2 = time.time()
# time00 = time.time()
# 调用AI模型
p_result, timeOut = mod.process(copy.deepcopy(frame), int(cv2tool.width/2))
# time11 = time.time()
# if time11 - time00 > 1:
# logger.info("算法模型调度时间:{}s, requestId:{}", int(time11-time00), self.msg.get("request_id"))
# AI推流
if self.content["video"]["video_add_water"]:
frame = self.pic.common_water_1(frame, self.pic.logo)
p_result[1] = self.pic.common_water_1(p_result[1], self.pic.logo)
frame_merge = cv2tool.video_merge(frame, p_result[1])
cv2tool.push_stream(frame_merge)
cv2tool.video_write(frame, frame_merge)
# # 问题图片加入队列, 暂时写死,后期修改为真实问题
if p_result[2] is not None and len(p_result[2]) > 0:
for ai_analyse_result in p_result[2]:
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None and conf_c >= float(self.content["service"]["frame_score"]):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
"current_frame": concurrent_frame,
"last_frame": concurrent_frame + step,
"progress": "",
"mode_service": "online",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
if conf_c >= float(self.content["service"]["frame_score"]) and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
"current_frame": concurrent_frame,
"last_frame": concurrent_frame + step,
"progress": "",
"mode_service": "online",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if concurrent_frame % step == 0 and len(high_score_image) > 0:
if self.content["service"]["filter"]["picture_similarity"]:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.content["service"]["filter"]["similarity"]:
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()
concurrent_frame += 1
logger.info("实时进程任务完成,requestId:{}", self.msg.get("request_id"))
logger.info("实时进程任务完成,requestId:{}", self.msg.get("request_id"))
except ServiceException as s:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId: {}", s.code, s.msg, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())})
feedback = {"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}
except Exception as e:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
logger.exception("服务异常: {}, requestId: {},", e, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())})
feedback = {"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}
finally:
if cv2tool is not None:
if cv2tool:
cv2tool.close()
self.sendResult({"command": "stop"})
commonProcess.join(60 * 5)
if loop:
loop.close()
if pullProcess is not None and pullProcess.is_alive():
pullProcess.sendCommand({"command": "stop_ex"})
pullProcess.join(60 * 3)
if feedback:
self.sendResult(feedback)
# 删除本地视频文件
if self.orFilePath is not None and os.path.exists(self.orFilePath):
logger.info("开始删除原视频, orFilePath: {}, requestId: {}", self.orFilePath, self.msg.get("request_id"))
@@ -264,15 +316,52 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", self.aiFilePath, self.msg.get("request_id"))


def getPullResultQueue(pullQueue):
eBody = None
try:
eBody = pullQueue.get(block=False)
return eBody
except Exception as e:
pass
return eBody


def buildFrame(pullQueue, cv2tool, mod, content, pic):
frames = []
status = None
for i in range(pullQueue.qsize()):
frame_result = getPullResultQueue(pullQueue)
if frame_result is None:
time.sleep(0.01)
continue
if frame_result.get("status") == '4':
cv2tool.getFrameConfig(int(frame_result.get("fps")), int(frame_result.get("width")),
int(frame_result.get("height")))
frames.append((frame_result, mod, content, pic, cv2tool))
else:
status = frame_result
return frames, status


class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):

def stop_task(self, cv2tool, aiFilePath, analysisStatus):
def __init__(self, cfg):
super(OfflineIntelligentRecognitionProcess, self).__init__(cfg)
# 定义原视频、AI视频保存名称
random_time = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
self.aiFilePath = "%s%s%s%s%s" % (self.content["video"]["file_path"], random_time, "_on_ai_",
self.msg.get("request_id"), ".mp4")

def stop_task(self, cv2tool, pullProcess, analysisStatus):
cv2tool.close()
if not os.path.exists(aiFilePath):
if not os.path.exists(self.aiFilePath):
logger.error("AI视频不存在!requestId:{}", self.msg.get("request_id"))
raise ServiceException(ExceptionType.VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.VIDEO_ADDRESS_EXCEPTION.value[1])
params2 = (aiFilePath, "aiOffLineVideo", self.content, logger, self.msg.get("request_id"))
hb = Heartbeat(self.fbQueue, self.msg.get("request_id"), AnalysisType.OFFLINE.value)
hb.setDaemon(True)
hb.start()
params2 = (self.aiFilePath, "aiOffLineVideo", self.content, logger, self.msg.get("request_id"))
ai_update_thread = Common(content=self.content, func=AliyunSdk.get_play_url, args=params2)
ai_update_thread.setDaemon(True)
ai_update_thread.start()
@@ -282,7 +371,10 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
self.msg.get("request_id"), ai_play_url)
raise ServiceException(ExceptionType.GET_VIDEO_URL_EXCEPTION.value[0],
ExceptionType.GET_VIDEO_URL_EXCEPTION.value[1])
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(180)
hb.sendHbQueue({"command": "stop"})
hb.join(180)
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), analysisStatus,
AnalysisType.OFFLINE.value,
progress=Constant.success_progess,
@@ -291,156 +383,172 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):

def run(self):
cv2tool = None
commonProcess = None
pullProcess = None
loop = None
feedback = None
try:
# 程序开始时间
start_time = time.time()
LogUtils.init_log(self.content)
self.sendhbMessage(AnalysisStatus.WAITING.value, "0.0000", AnalysisType.OFFLINE.value)
mod, model_type_code = get_model((str(self.gpu_ids[0]), self.msg["models"]))
hbQueue = Queue()
# 结果反馈进程启动
commonProcess = CommonProcess(self.fbQueue, hbQueue, self.content, self.msg, self.imageQueue, self.type)
commonProcess.daemon = True
commonProcess.start()
cv2tool = Cv2Util(self.msg.get('original_url'), self.msg.get('push_url'), aiFilePath=self.aiFilePath,
pullProcess = OfflinePullVideoStreamProcess(self.msg, self.content, self.pullQueue, self.fbQueue,
self.imageQueue)
pullProcess.daemon = True
pullProcess.start()
cv2tool = Cv2Util(None, self.msg.get('push_url'), aiFilePath=self.aiFilePath,
requestId=self.msg.get("request_id"))
# cv2重试初始化次数
cv2_init_num = 0

high_score_image = {}
step = int(self.content["service"]["frame_step"])
# 当前帧数
concurrent_frame = 1
cv2tool.get_video_info()
while True:
end_time = time.time()
create_task_time = end_time - start_time
if create_task_time > int(self.content["service"]["timeout"]):
logger.error("分析超时,分析超时时间: {}s, requestId: {}", create_task_time, self.msg.get("request_id"))
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
if not commonProcess.is_alive():
logger.info("图片上传、心跳、问题反馈进程异常停止, requestId: {}", self.msg.get("request_id"))
raise Exception("图片上传、心跳、问题反馈进程异常停止")

loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
pullProcess_timeout = None
self.sendhbMessage(AnalysisStatus.RUNNING.value, "", AnalysisType.OFFLINE.value)
task_frame = None
with ThreadPoolExecutor(max_workers=6) as t:
while True:
start = time.time()
if not pullProcess.is_alive():
if pullProcess_timeout is None:
pullProcess_timeout = time.time()
if time.time() - pullProcess_timeout > 300:
logger.info("拉流进程停止异常, requestId: {}", self.msg.get("request_id"))
raise Exception("拉流进程异常停止")
# 检查是否获取到视频信息
eBody = self.getEvent()
if eBody is not None and len(eBody) > 0:
cmdStr = eBody.get("command")
if 'stop' == cmdStr:
if high_score_image is not None and len(high_score_image) > 0:
eBody = self.getEvent()
if eBody is not None and len(eBody) > 0:
cmdStr = eBody.get("command")
if 'stop' == cmdStr:
logger.info("离线任务开始停止分析, requestId: {}", self.msg.get("request_id"))
pullProcess.sendCommand({"command": "stop_pull_stream"})
frames = []
status = None
if task_frame is not None:
frames, status = task_frame.result()
task_frame = t.submit(buildFrame, self.pullQueue, cv2tool, mod, self.content, self.pic)
logger.info("帧数:{}, requestId: {}", len(frames), self.msg.get("request_id"))
if len(frames) > 0:
for result in t.map(process, frames):
if result is not None:
p_result, frame_all, frame_merge = result
task = loop.create_task(cv2tool.push_stream(frame_merge))
task1 = loop.create_task(cv2tool.video_write(None, frame_merge))
loop.run_until_complete(asyncio.wait([task, task1]))
if frame_all[0].get("cct_frame") % 600 == 0:
task_process = str(format(
float(frame_all[0].get("cct_frame")) / float(frame_all[0].get("all_frame")),
'.4f'))
self.sendhbMessage(AnalysisStatus.RUNNING.value, task_process,
AnalysisType.OFFLINE.value)
if p_result[2] is not None and len(p_result[2]) > 0:
for ai_analyse_result in p_result[2]:
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None and conf_c >= self.frame_score:
high_score_image[order] = {
"or_frame": frame_all[0].get("frame"),
"ai_frame": p_result[1],
"current_frame": frame_all[0].get("cct_frame"),
"last_frame": frame_all[0].get("cct_frame") + self.step,
"progress": "",
"mode_service": "offline",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
if conf_c >= self.frame_score and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame_all[0].get("frame"),
"ai_frame": p_result[1],
"current_frame": frame_all[0].get("cct_frame"),
"last_frame": frame_all[0].get("cct_frame") + self.step,
"progress": "",
"mode_service": "offline",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if frame_all[0].get("cct_frame") % self.step == 0 and len(high_score_image) > 0:
if self.picture_similarity:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.similarity:
self.imageQueue.put({"image": high_score_image.pop(key)})
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()

else:
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image.pop(key)})
pullProcess.sendCommand({"command": "stop_ex"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.TIMEOUT.value)
break

logger.info("执行时间1111: {}, 队列大小:{}, requestId: {}", time.time() - start, self.pullQueue.qsize(), self.msg.get("request_id"))
if status is None:
continue
if status.get("status") == "1":
raise ServiceException(status.get("error").get("code"), status.get("error").get("msg"))
elif status.get("status") == "2":
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image.pop(key)})
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.SUCCESS.value)
break
elif status.get("status") == "3":
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("离线任务开始停止分析, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, self.aiFilePath, AnalysisStatus.SUCCESS.value)
self.imageQueue.put({"image": high_score_image.pop(key)})
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.TIMEOUT.value)
break
if cv2tool.checkconfig() or cv2tool.pull_p is None:
logger.info("视频信息获取次数:{}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
if cv2_init_num >= 3:
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(0.5)
cv2tool.get_video_info()
cv2tool.build_pull_p()
continue
frame = cv2tool.read()
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", cv2tool.all_frames,
concurrent_frame, self.msg.get("request_id"))
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
if concurrent_frame < cv2tool.all_frames - 100:
logger.info("离线异常结束:requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, self.aiFilePath, AnalysisStatus.TIMEOUT.value)
elif status.get("status") == "9":
if len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image.pop(key)})
logger.info("实时任务正常结束:requestId: {}", self.msg.get("request_id"))
pullProcess.sendCommand({"command": "stop_image"})
pullProcess.join(60 * 3)
self.stop_task(cv2tool, pullProcess, AnalysisStatus.SUCCESS.value)
break
logger.info("任务开始结束分析, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, self.aiFilePath, AnalysisStatus.SUCCESS.value)
break
#time00 = time.time()
# 调用AI模型
p_result, timeOut = mod.process(copy.deepcopy(frame), int(cv2tool.width/2))
#logger.info("算法模型调度时间:{}s, requestId:{}", time.time() - time00, self.msg.get("request_id"))
# 原视频保存本地、AI视频保存本地
if self.content["video"]["video_add_water"]:
frame = self.pic.common_water_1(frame, self.pic.logo)
p_result[1] = self.pic.common_water_1(p_result[1], self.pic.logo)
frame_merge = cv2tool.video_merge(frame, p_result[1])
cv2tool.push_stream(frame_merge)
cv2tool.video_write(None, frame_merge)
if concurrent_frame % 400 == 0:
hbQueue.put({"cf": concurrent_frame, "af": cv2tool.all_frames})
if p_result[2] is not None and len(p_result[2]) > 0:
for ai_analyse_result in p_result[2]:
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None and conf_c >= float(self.content["service"]["frame_score"]):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
"current_frame": concurrent_frame,
"last_frame": concurrent_frame + step,
"progress": "",
"mode_service": "offline",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
if conf_c >= float(self.content["service"]["frame_score"]) and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
"current_frame": concurrent_frame,
"last_frame": concurrent_frame + step,
"progress": "",
"mode_service": "offline",
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if concurrent_frame % step == 0 and len(high_score_image) > 0:
if self.content["service"]["filter"]["picture_similarity"]:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.content["service"]["filter"]["similarity"]:
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()
hbQueue.put({"cf": concurrent_frame, "af": cv2tool.all_frames})
concurrent_frame += 1
# logger.info("分析总时间{},requestId:{}", time.time()-end_time,self.msg.get("request_id"))
logger.info("离线进程任务完成,requestId:{}", self.msg.get("request_id"))
except ServiceException as s:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
logger.error("服务异常,异常编号:{}, 异常描述:{}, requestId:{}", s.code, s.msg, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())})
feedback = {"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}
except Exception as e:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
logger.exception("服务异常: {}, requestId:{}", e, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())})
feedback = {"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}
finally:
if cv2tool is not None:
cv2tool.close()
self.sendResult({"command": "stop"})
commonProcess.join(60*5)
if loop:
loop.close()
if pullProcess is not None and pullProcess.is_alive():
pullProcess.sendCommand({"command": "stop_ex"})
pullProcess.join(60 * 3)
if feedback is not None:
self.sendResult(feedback)
# 删除本地视频文件
if self.aiFilePath is not None and os.path.exists(self.aiFilePath):
logger.info("开始删除AI视频, aiFilePath: {}, requestId: {}", self.aiFilePath, self.msg.get("request_id"))
@@ -448,8 +556,145 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
logger.info("删除AI视频成功, aiFilePath: {}, requestId: {}", self.aiFilePath, self.msg.get("request_id"))


def image_recognition(imageUrl, mod, content, pic, msg, fbQueue, model_type_code):
loop = None
try:
image = url2Array(imageUrl)
# 调用AI模型
p_result, timeOut = mod.process(copy.deepcopy(image), int(image.shape[1]))
# logger.info("算法模型调度时间:{}s, requestId:{}", time.time() - time00, self.msg.get("request_id"))
# 原视频保存本地、AI视频保存本地
if content["video"]["video_add_water"]:
frame = pic.common_water_1(image, pic.logo)
p_result[1] = pic.common_water_1(p_result[1], pic.logo)
aliyunOssSdk = AliyunOssSdk(content, logger, msg.get("request_id"))
aliyunOssSdk.get_oss_bucket()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
or_image = cv2.imencode(".jpg", frame)[1]
ai_image = cv2.imencode(".jpg", p_result[1])[1]
# 定义上传图片名称
random_num = TimeUtils.now_date_to_str(TimeUtils.YMDHMSF)
time_now = TimeUtils.now_date_to_str("%Y-%m-%d-%H-%M-%S")
# 图片名称待后期修改
or_image_name = build_image_name(msg.get('results_base_dir'), time_now,
str(0),
str(0),
random_num,
'image',
msg.get('request_id'), "OR")
ai_image_name = build_image_name(msg.get('results_base_dir'), time_now,
str(0),
str(0),
random_num,
'image',
msg.get('request_id'), "AI")
if p_result[2] is not None and len(p_result[2]) > 0:
loop.run_until_complete(asyncio.wait([upload_file(aliyunOssSdk, or_image_name, or_image),
upload_file(aliyunOssSdk, ai_image_name, ai_image)]))
for ai_analyse_result in p_result[2]:
order = str(int(ai_analyse_result[0]))
conf_c = ai_analyse_result[5]
# 图片帧数编码
# 上传原图片
# loop.run_until_complete(asyncio.gather(upload_file(aliyunOssSdk, or_image_name, or_image),
# upload_file(aliyunOssSdk, ai_image_name, ai_image_name)))

# aliyunOssSdk.upload_file(or_image_name, or_image.tobytes())
# aliyunOssSdk.upload_file(ai_image_name, ai_image.tobytes())
# 发送kafka消息
fbQueue.put({"feedback": FeedBack.message_feedback(msg.get('request_id'),
AnalysisStatus.RUNNING.value,
AnalysisType.IMAGE.value, "", "",
'',
or_image_name,
ai_image_name,
model_type_code,
order,
TimeUtils.now_date_to_str())})
return True
# else:
# fbQueue.put({"feedback": FeedBack.message_feedback(msg.get('request_id'),
# AnalysisStatus.RUNNING.value,
# AnalysisType.IMAGE.value, "", "",
# '',
# or_image_name,
# ai_image_name,
# model_type_code,
# 'None',
# TimeUtils.now_date_to_str())})
except Exception as e:
logger.exception("模型分析异常: {}, requestId: {}", e, msg.get("request_id"))
return False
finally:
if loop is not None:
loop.close()


async def upload_file(aliyunOssSdk, image_name, image):
try:
await aliyunOssSdk.upload_file(image_name, image.tobytes())
except Exception as e:
logger.exception("阿里云上传文件失败: {}", e)


'''
图片识别
'''


class PhotosIntelligentRecognitionProcess(IntelligentRecognitionProcess):
pass

def run(self):
try:
# 初始化日志
LogUtils.init_log(self.content)
# 发送开始识别等待
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"),
AnalysisStatus.WAITING.value,
AnalysisType.IMAGE.value,
progress='0.0000',
analyse_time=TimeUtils.now_date_to_str())})
# 加载模型
mod, model_type_code = get_model((str(self.gpu_ids[0]), self.msg["models"]))
imageUrls = self.msg.get("image_urls")
result = True
with ThreadPoolExecutor(max_workers=5) as t:
obj_list = []
for imageUrl in imageUrls:
obj = t.submit(image_recognition, imageUrl, mod, self.content, self.pic, self.msg,
self.fbQueue, model_type_code)
obj_list.append(obj)
for future in as_completed(obj_list):
data = future.result()
if not data:
result = False
if result:
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.SUCCESS.value,
AnalysisType.IMAGE.value,
progress=Constant.success_progess,
analyse_time=TimeUtils.now_date_to_str())})
else:
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.IMAGE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())})
logger.info("图片进程任务完成,requestId:{}", self.msg.get("request_id"))
except ServiceException as s:
logger.error("图片分析异常,异常编号:{}, 异常描述:{}, requestId:{}", s.code, s.msg, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.IMAGE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())})
except Exception as e:
logger.exception("图片分析异常: {}, requestId:{}", e, self.msg.get("request_id"))
self.sendResult({"feedback": message_feedback(self.msg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.IMAGE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())})


'''

+ 0
- 61
concurrency/MessagePollingThread.py View File

@@ -1,61 +0,0 @@
# -*- coding: utf-8 -*-
import time
from loguru import logger
from queue import Queue
from threading import Thread
from util import KafkaUtils

'''
实时、离线消息拉取线程
'''


class MessagePollingThread(Thread):
# 实时流分析消息拉取线程
def __init__(self, name, cfg):
super().__init__()
self.name = name
self.cfg = cfg
self.msgQueue = Queue(1)

def getMsgQueue(self):
eBody = None
try:
eBody = self.msgQueue.get(block=False)
except Exception as e:
pass
return eBody

def run(self):
logger.info("{} 线程任务开始执行", self.name)
# 指令消息消费
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.cfg["content"])
customerKafkaConsumer.subscribe(topics=self.cfg["topics"])
while True:
try:
customerKafkaConsumer.subscribe(topics=self.cfg["topics"])
# 拉取消息问题 1:后面运行会吃力,建议每次一条一拉
msg = customerKafkaConsumer.customerConsumer.poll()
if msg is not None and len(msg) > 0:
for k, v in msg.items():
for m in v:
logger.info("添加拉取消息")
self.msgQueue.put(m.value)
customerKafkaConsumer.commit_offset(m, topics=self.cfg["topics"])
except Exception as e:
logger.exception("消息监听异常: {}", e)
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.cfg["content"])
time.sleep(1)

def poll(self):
return self.getMsgQueue()


class OnlineMessagePollingThread(MessagePollingThread):
# 实时流分析消息拉取线程
pass


class OfflineMessagePollingThread(MessagePollingThread):
# 实时流分析消息拉取线程
pass

+ 250
- 0
concurrency/PullVideoStreamProcess.py View File

@@ -0,0 +1,250 @@
# -*- coding: utf-8 -*-
import time
from multiprocessing import Process, Queue

from loguru import logger

from concurrency.FileUpdateThread import ImageFileUpdate
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException
from util.Cv2Utils import Cv2Util


class PullVideoStreamProcess(Process):
def __init__(self, msg, content, pullQueue, fbQueue, imageQueue):
super().__init__()
self.command = Queue()
self.msg = msg
self.content = content
self.pullQueue = pullQueue
self.fbQueue = fbQueue
self.imageQueue = imageQueue
self.step = int(self.content["service"]["frame_step"])
self.pull_stream_timeout = int(self.content["service"]["cv2_pull_stream_timeout"])
self.read_stream_timeout = int(self.content["service"]["cv2_read_stream_timeout"])
self.service_timeout = int(self.content["service"]["timeout"])


def getCommand(self):
eBody = None
try:
eBody = self.command.get(block=False)
except Exception as e:
pass
return eBody

def sendCommand(self, result):
self.command.put(result)

def sendPullQueue(self, result):
self.pullQueue.put(result)

def sendImageResult(self, result):
self.imageQueue.put(result)


class OnlinePullVideoStreamProcess(PullVideoStreamProcess):

def run(self):
cv2tool = None
imageFileUpdate = None
try:
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.ONLINE.value)
imageFileUpdate.setDaemon(True)
imageFileUpdate.start()
logger.info("开启视频拉流线程, requestId:{}", self.msg.get("request_id"))
cv2tool = Cv2Util(self.msg.get('pull_url'), requestId=self.msg.get("request_id"))
cv2_init_num = 1
init_pull_num = 1
start_time = time.time()
start_time_1 = time.time()
start_time_2 = time.time()
concurrent_frame = 1
stop_imageFile = False
while True:
end_time = time.time()
create_task_time = int(end_time - start_time)
if create_task_time > self.service_timeout:
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id"))
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
if not imageFileUpdate.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id"))
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")
body = self.getCommand()
if body is not None and len(body) > 0:
if 'stop_pull_stream' == body.get("command"):
self.sendPullQueue({"status": "9"}) # 9 停止拉流
stop_imageFile = True
cv2tool.close()
continue
if 'stop_image' == body.get("command"):
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id"))
break
if 'stop_ex' == body.get("command"):
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
self.pullQueue.cancel_join_thread()
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id"))
break
if stop_imageFile:
time.sleep(1)
continue
if self.pullQueue.full():
time.sleep(0.1)
continue
if cv2tool.checkconfig() or cv2tool.pull_p is None:
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
pull_stream_init_timeout = time.time() - start_time_1
if pull_stream_init_timeout > self.pull_stream_timeout:
logger.info("开始拉流超时, 超时时间:{}, requestId:{}", pull_stream_init_timeout,
self.msg.get("request_id"))
raise ServiceException(ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[0],
ExceptionType.PULLSTREAM_TIMEOUT_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(2)
cv2tool.get_video_info()
cv2tool.build_pull_p()
continue
start_time_1 = time.time()
cv2_init_num = 1
frame = cv2tool.read()
if frame is None:
logger.info("获取帧为空, 开始重试: {}次, requestId: {}", init_pull_num, self.msg.get("request_id"))
pull_stream_read_timeout = time.time() - start_time_2
if pull_stream_read_timeout > self.read_stream_timeout:
logger.info("拉流过程中断了重试超时, 超时时间: {}, requestId: {}", pull_stream_read_timeout,
self.msg.get("request_id"))
self.sendPullQueue({"status": "3"}) # 3 超时
stop_imageFile = True
cv2tool.close()
continue
init_pull_num += 1
time.sleep(1)
cv2tool.build_pull_p()
continue
init_pull_num = 1
start_time_2 = time.time()
self.sendPullQueue({"status": "4",
"frame": frame,
"cct_frame": concurrent_frame,
"width": cv2tool.width,
"height": cv2tool.height,
"fps": cv2tool.fps,
"all_frame": cv2tool.all_frames})
concurrent_frame += 1
except ServiceException as s:
self.sendPullQueue({"status": "1", "error": {"code": s.code, "msg": s.msg}})
except Exception as e:
logger.exception("实时拉流异常: {}, requestId:{}", e, self.msg.get("request_id"))
self.sendPullQueue({"status": "1", "error": {"code": ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
"msg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1]}})
finally:
if cv2tool:
cv2tool.close()
if imageFileUpdate:
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
logger.info("实时拉流线程结束, requestId: {}", self.msg.get("request_id"))


class OfflinePullVideoStreamProcess(PullVideoStreamProcess):

def run(self):
cv2tool = None
imageFileUpdate = None
try:
imageFileUpdate = ImageFileUpdate(self.fbQueue, self.content, self.msg, self.imageQueue, AnalysisType.ONLINE.value)
imageFileUpdate.setDaemon(True)
imageFileUpdate.start()
cv2tool = Cv2Util(pullUrl=self.msg.get('original_url'), requestId=self.msg.get("request_id"))
cv2_init_num = 1
start_time = time.time()
cv2tool.get_video_info()
concurrent_frame = 1
stop_imageFile = False
while True:
end_time = time.time()
create_task_time = int(end_time - start_time)
if create_task_time > self.service_timeout:
logger.error("分析超时, 超时时间:{}, requestId: {}", create_task_time, self.msg.get("request_id"))
raise ServiceException(ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[0],
ExceptionType.ANALYSE_TIMEOUT_EXCEPTION.value[1])
if not imageFileUpdate.is_alive():
logger.error("未检测到图片上传线程活动,图片上传线程可能出现异常, reuqestId:{}", self.msg.get("request_id"))
raise Exception("未检测到图片上传线程活动,图片上传线程可能出现异常!")
body = self.getCommand()
if body is not None and len(body) > 0:
if 'stop_pull_stream' == body.get("command"):
self.sendPullQueue({"status": "9"}) # 9 停止拉流
stop_imageFile = True
cv2tool.close()
continue
if 'stop_image' == body.get("command"):
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id"))
break
if 'stop_ex' == body.get("command"):
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
self.pullQueue.cancel_join_thread()
logger.error("图片线程停止完成, reuqestId:{}", self.msg.get("request_id"))
break
if stop_imageFile:
time.sleep(1)
continue
if self.pullQueue.full():
time.sleep(1)
continue
if cv2tool.checkconfig() or cv2tool.pull_p is None:
logger.info("开始重新获取视频信息: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
if cv2_init_num > 3:
logger.info("视频信息获取失败, 重试: {}次, requestId: {}", cv2_init_num, self.msg.get("request_id"))
raise ServiceException(ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[0],
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])
cv2_init_num += 1
time.sleep(2)
cv2tool.get_video_info()
cv2tool.build_pull_p()
continue
frame = cv2tool.read()
if frame is None:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}", cv2tool.all_frames, concurrent_frame,
self.msg.get("request_id"))
# 允许100帧的误差
if concurrent_frame < cv2tool.all_frames - 100:
logger.info("离线拉流异常结束:requestId: {}", self.msg.get("request_id"))
self.sendPullQueue({"status": "3"})
stop_imageFile = True
cv2tool.close()
continue
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id"))
self.sendPullQueue({"status": "2"})
cv2tool.close()
stop_imageFile = True
continue
self.sendPullQueue({"status": "4",
"frame": frame,
"cct_frame": concurrent_frame,
"width": cv2tool.width,
"height": cv2tool.height,
"fps": cv2tool.fps,
"all_frame": cv2tool.all_frames})
concurrent_frame += 1
except ServiceException as s:
self.sendPullQueue({"status": "1", "error": {"code": s.code, "msg": s.msg}})
except Exception as e:
logger.exception("离线拉流异常: {}, requestId:{}", e, self.msg.get("request_id"))
self.sendPullQueue({"status": "1", "error": {"code": ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
"msg": ExceptionType.SERVICE_INNER_EXCEPTION.value[1]}})
finally:
if cv2tool is not None:
cv2tool.close()
if imageFileUpdate:
self.sendImageResult({"command": "stop"})
imageFileUpdate.join(60*3)
logger.info("离线拉流线程结束, requestId: {}", self.msg.get("request_id"))

BIN
concurrency/__pycache__/CommonProcess.cpython-38.pyc View File


BIN
concurrency/__pycache__/HeartbeatThread.cpython-38.pyc View File


BIN
concurrency/__pycache__/MessagePollingThread.cpython-38.pyc View File


+ 27
- 66
dsp_application.yml View File

@@ -4,15 +4,16 @@ kafka:
topic:
dsp-alg-online-tasks-topic: dsp-alg-online-tasks
dsp-alg-offline-tasks-topic: dsp-alg-offline-tasks
dsp-alg-image-tasks-topic: dsp-alg-image-tasks
dsp-alg-results-topic: dsp-alg-task-results
local:
bootstrap_servers: ['192.168.10.11:9092']
dsp-alg-online-tasks:
partition: [0]
dsp-alg-offline-tasks:
partition: [0]
dsp-alg-task-results:
partition: [0]
# dsp-alg-online-tasks:
# partition: [0]
# dsp-alg-offline-tasks:
# partition: [0]
# dsp-alg-task-results:
# partition: [0]
producer:
acks: -1
retries: 3
@@ -27,12 +28,12 @@ kafka:
max_poll_records: 1
dev:
bootstrap_servers: ['192.168.11.13:9092']
dsp-alg-online-tasks:
partition: [0]
dsp-alg-offline-tasks:
partition: [0]
dsp-alg-task-results:
partition: [0]
# dsp-alg-online-tasks:
# partition: [0]
# dsp-alg-offline-tasks:
# partition: [0]
# dsp-alg-task-results:
# partition: [0]
producer:
acks: -1
retries: 3
@@ -47,12 +48,12 @@ kafka:
max_poll_records: 1
test:
bootstrap_servers: ['192.168.11.242:9092']
dsp-alg-online-tasks:
partition: [0]
dsp-alg-offline-tasks:
partition: [0]
dsp-alg-task-results:
partition: [0]
# dsp-alg-online-tasks:
# partition: [0]
# dsp-alg-offline-tasks:
# partition: [0]
# dsp-alg-task-results:
# partition: [0]
producer:
acks: -1
retries: 3
@@ -65,54 +66,14 @@ kafka:
auto_offset_reset: latest
enable_auto_commit: False
max_poll_records: 1
test1:
bootstrap_servers: ['192.168.11.242:9092']
dsp-alg-online-tasks:
partition: [1]
dsp-alg-offline-tasks:
partition: [1]
dsp-alg-task-results:
partition: [1]
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: dsp_ai_server
group_id: dsp-ai-test
auto_offset_reset: latest
enable_auto_commit: False
max_poll_records: 1
prod12:
bootstrap_servers: ['101.132.127.1:19092']
dsp-alg-online-tasks:
partition: [0]
dsp-alg-offline-tasks:
partition: [0]
dsp-alg-task-results:
partition: [0]
producer:
acks: -1
retries: 3
linger_ms: 50
retry_backoff_ms: 1000
max_in_flight_requests_per_connection: 5
consumer:
client_id: dsp_ai_server
group_id: dsp-ai-prod
auto_offset_reset: latest
enable_auto_commit: False
max_poll_records: 1
prod13:
prod:
bootstrap_servers: ['101.132.127.1:19092']
dsp-alg-online-tasks:
partition: [1]
dsp-alg-offline-tasks:
partition: [1]
dsp-alg-task-results:
partition: [1]
# dsp-alg-online-tasks:
# partition: [1]
# dsp-alg-offline-tasks:
# partition: [1]
# dsp-alg-task-results:
# partition: [1]
producer:
acks: -1
retries: 3
@@ -170,7 +131,7 @@ service:
# 日志设置
log:
# 是否开启文件输出 True:开启 False:关闭
enable_file_log: False
enable_file_log: True
# 是否开启控制台日志输出 True:开启 False:关闭
enable_stderr: True
# 日志打印文件夹

+ 9
- 2
enums/AnalysisTypeEnum.py View File

@@ -4,9 +4,16 @@ from enum import Enum, unique
# 分析类型枚举
@unique
class AnalysisType(Enum):

# 在线
ONLINE = "1"

# 离线s
# 离线
OFFLINE = "2"

# 图片
IMAGE = "3"






+ 4
- 0
enums/ExceptionEnum.py View File

@@ -51,4 +51,8 @@ class ExceptionType(Enum):

ILLEGAL_PARAMETER_FORMAT = ("SP022", "Illegal Parameter Format!")

REQUEST_TYPE_NOT_MATCHED = ("SP023", "Request Type Not Matched!")

MODEL_ANALYSIS_EXCEPTION = ("SP024", "Model Analysis Exception!")

SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常, 请联系工程师定位处理!")

+ 151
- 109
service/Dispatcher.py View File

@@ -2,6 +2,7 @@
import time
import GPUtil

from concurrency.FeedbackThread import FeedbackThread
from entity.FeedBack import message_feedback
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
@@ -11,8 +12,7 @@ from util import YmlUtils, FileUtils, LogUtils, KafkaUtils, TimeUtils
from loguru import logger
from multiprocessing import Queue
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, \
OfflineIntelligentRecognitionProcess
from concurrency.MessagePollingThread import OfflineMessagePollingThread, OnlineMessagePollingThread
OfflineIntelligentRecognitionProcess, PhotosIntelligentRecognitionProcess
from util import GPUtils

'''
@@ -20,7 +20,7 @@ from util import GPUtils
'''


class DispatcherService():
class DispatcherService:

# 初始化
def __init__(self):
@@ -35,112 +35,90 @@ class DispatcherService():
# 记录当前正在执行的离线视频分析任务
self.offlineProcesses = {}
# 记录当前正在执行的图片分析任务
# self.photoProcesses = {}
self.onlineMpt = None
self.offlineMpt = None

self.photoProcesses = {}
self.fbQueue = Queue()
self.online_topic = self.content["kafka"]["topic"]["dsp-alg-online-tasks-topic"]
self.offline_topic = self.content["kafka"]["topic"]["dsp-alg-offline-tasks-topic"]
self.image_topic = self.content["kafka"]["topic"]["dsp-alg-image-tasks-topic"]
self.topics = [self.online_topic, self.offline_topic, self.image_topic]
self.analysisType = {
self.online_topic: (AnalysisType.ONLINE.value, lambda x, y: self.online(x, y)),
self.offline_topic: (AnalysisType.OFFLINE.value, lambda x, y: self.offline(x, y)),
self.image_topic: (AnalysisType.IMAGE.value, lambda x, y: self.image(x, y))
}

# 服务调用启动方法
def start_service(self):
# 启动实时,离线kafka消息拉取线程
self.Kafka_message_listening()
# 启动问题反馈线程
feedbackThread = self.start_feedback_thread()
# 初始化kafka监听者
customerKafkaConsumer = KafkaUtils.CustomerKafkaConsumer(self.content, topics=self.topics)
print("(♥◠‿◠)ノ゙ DSP【算法调度服务】启动成功 ლ(´ڡ`ლ)゙")
# 循环消息处理
while True:
time.sleep(1)
# 检查任务进程运行情况,去除活动的任务
self.check_process_task()
# 校验问题反馈线程是否正常
if not feedbackThread.is_alive():
logger.error("======================问题反馈线程异常停止======================")
break
# 获取当前可用gpu使用数量
gpu_ids = GPUtils.get_gpu_ids(self.content)
if gpu_ids is not None and len(gpu_ids) > 0:
################## 消息驱动实时流分析进程执行 ##################
onlineMsg = self.onlineMpt.poll()
if onlineMsg is not None and len(onlineMsg) > 0:
try:
GPUtil.showUtilization()
# 校验kafka消息
check_result = self.check_online_msg(onlineMsg)
if not check_result:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
if 'start' == onlineMsg.get("command"):
logger.info("开始实时分析")
self.startOnlineProcess(onlineMsg, self.content, gpu_ids)
elif 'stop' == onlineMsg.get("command"):
self.stopOnlineProcess(onlineMsg)
else:
pass
except ServiceException as s:
logger.exception("实时消息监听异常:{}, requestId: {}", s.msg, onlineMsg.get("request_id"))
feedback = {
"feedback": message_feedback(onlineMsg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}
self.send_error_message(onlineMsg, feedback)
except Exception as e:
logger.exception("实时消息监听异常:{}, requestId: {}", e, onlineMsg.get("request_id"))
feedback = {
"feedback": message_feedback(onlineMsg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.ONLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}
self.send_error_message(onlineMsg, feedback)

################## 消息驱动离线视频分析进程执行 ##################
offlineMsg = self.offlineMpt.poll()
if offlineMsg is not None and len(offlineMsg) > 0:
try:
GPUtil.showUtilization()
# 校验kafka消息
check_result = self.check_offline_msg(offlineMsg)
if not check_result:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
if 'start' == offlineMsg.get("command"):
logger.info("开始离线分析")
self.startOfflineProcess(offlineMsg, self.content, gpu_ids)
elif 'stop' == offlineMsg.get("command"):
self.stopOfflineProcess(offlineMsg)
else:
pass
except ServiceException as s:
logger.exception("离线消息监听异常:{}, requestId: {}", s.msg, offlineMsg.get("request_id"))
feedback = {
"feedback": message_feedback(offlineMsg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}
self.send_error_message(offlineMsg, feedback)
except Exception as e:
logger.exception("离线消息监听异常:{}, requestId: {}", e, offlineMsg.get("request_id"))
feedback = {
"feedback": message_feedback(offlineMsg.get("request_id"), AnalysisStatus.FAILED.value,
AnalysisType.OFFLINE.value,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}
self.send_error_message(offlineMsg, feedback)
msg = customerKafkaConsumer.poll()
if msg is not None and len(msg) > 0:
for k, v in msg.items():
for m in v:
message = m.value
analysisType = self.analysisType.get(m.topic)[0]
try:
customerKafkaConsumer.commit_offset(m)
logger.info("当前拉取到的消息, topic:{}, offset:{}, partition: {}, body: {}, requestId:{}",
m.topic, m.offset, m.partition, message, message.get("request_id"))
self.analysisType.get(m.topic)[1](message, gpu_ids)
except ServiceException as s:
logger.exception("消息监听异常:{}, requestId: {}", s.msg, message.get("request_id"))
if analysisType is not None:
feedback = {
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
s.code,
s.msg,
analyse_time=TimeUtils.now_date_to_str())}
self.fbQueue.put(message, feedback)
except Exception as e:
logger.exception("消息监听异常:{}, requestId: {}", e, message.get("request_id"))
if analysisType is not None:
feedback = {
"feedback": message_feedback(message.get("request_id"),
AnalysisStatus.FAILED.value,
analysisType,
ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
ExceptionType.SERVICE_INNER_EXCEPTION.value[1],
analyse_time=TimeUtils.now_date_to_str())}
self.fbQueue.put(message, feedback)
else:
time.sleep(1)
else:
logger.info("当前可用gpu数量: {}", gpu_ids)
GPUtil.showUtilization()
time.sleep(5)

def startOnlineProcess(self, msg, content, gpu_ids):
# 开启实时进程
def startOnlineProcess(self, msg, gpu_ids):
# 相同的requestId不在执行
if self.onlineProcesses.get(msg.get("request_id")):
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
cfg = {"fbQueue": Queue(), "imageQueue": Queue(), "content": content, "msg": msg, "gpu_ids": gpu_ids,
"type": AnalysisType.ONLINE.value}
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids}
# 创建在线识别进程并启动
oirp = OnlineIntelligentRecognitionProcess(cfg)
oirp.start()
# 记录请求与进程映射
self.onlineProcesses[msg.get("request_id")] = oirp

# 结束实时进程
def stopOnlineProcess(self, msg):
ps = self.onlineProcesses.get(msg.get("request_id"))
if ps is None:
@@ -156,20 +134,23 @@ class DispatcherService():
for requestId in list(self.offlineProcesses.keys()):
if not self.offlineProcesses[requestId].is_alive():
del self.offlineProcesses[requestId]
for requestId in list(self.photoProcesses.keys()):
if not self.photoProcesses[requestId].is_alive():
del self.photoProcesses[requestId]

# 开启离线进程
def startOfflineProcess(self, msg, content, gpu_ids):
def startOfflineProcess(self, msg, gpu_ids):
# 相同的requestId不在执行
if self.offlineProcesses.get(msg.get("request_id")):
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
cfg = {"fbQueue": Queue(), "imageQueue": Queue(), "content": content, "msg": msg, "gpu_ids": gpu_ids,
"type": AnalysisType.OFFLINE.value}
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids}
# 创建在线识别进程并启动
ofirp = OfflineIntelligentRecognitionProcess(cfg)
ofirp.start()
self.offlineProcesses[msg.get("request_id")] = ofirp

# 结束离线进程
def stopOfflineProcess(self, msg):
ps = self.offlineProcesses.get(msg.get("request_id"))
if ps is None:
@@ -177,6 +158,18 @@ class DispatcherService():
return
ps.sendEvent({'command': 'stop'})

# 开启图片分析进程
def startImageProcess(self, msg, gpu_ids):
# 相同的requestId不在执行
if self.photoProcesses.get(msg.get("request_id")):
logger.warning("重复任务,请稍后再试!requestId:{}", msg.get("request_id"))
return
cfg = {"fbQueue": self.fbQueue, "content": self.content, "msg": msg, "gpu_ids": gpu_ids}
# 创建在线识别进程并启动
imagep = PhotosIntelligentRecognitionProcess(cfg)
imagep.start()
self.photoProcesses[msg.get("request_id")] = imagep

# 校验实时kafka消息
def check_online_msg(self, msg):
requestId = msg.get("request_id")
@@ -236,27 +229,76 @@ class DispatcherService():
return False
return True

# 实时、离线kafka消息监听
def Kafka_message_listening(self):
# 实时流分析消息拉取
self.onlineMpt = OnlineMessagePollingThread('online_thread', {'content': self.content,
'topics': [self.content["kafka"]["topic"][
"dsp-alg-online-tasks-topic"]]})
# 校验图片kafka消息
def check_image_msg(self, msg):
requestId = msg.get("request_id")
models = msg.get("models")
command = msg.get("command")
image_urls = msg.get("image_urls")
results_base_dir = msg.get("results_base_dir")
if command is None:
return False
if requestId is None:
return False
if command == 'start' and models is None:
return False
if models is not None:
for model in models:
if model.get("code") is None:
return False
if model.get("categories") is None:
return False
if command == 'start' and image_urls is None:
return False
if command == 'start' and results_base_dir is None:
return False
return True

'''
开启问题反馈线程
'''
def start_feedback_thread(self):
feedbackThread = FeedbackThread(self.fbQueue, self.content)
feedbackThread.setDaemon(True)
feedbackThread.start()
return feedbackThread

def online(self, message, gpu_ids):
check_result = self.check_online_msg(message)
if not check_result:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
if 'start' == message.get("command"):
logger.info("开始实时分析")
self.startOnlineProcess(message, gpu_ids)
elif 'stop' == message.get("command"):
self.stopOnlineProcess(message)
else:
pass

# 离线视频分析消息拉取
self.offlineMpt = OfflineMessagePollingThread('offline_thread', {'content': self.content,
'topics': [self.content["kafka"]["topic"][
"dsp-alg-offline-tasks-topic"]]})
self.onlineMpt.setDaemon(True)
self.offlineMpt.setDaemon(True)
# 开启监听线程
self.onlineMpt.start()
self.offlineMpt.start()
def offline(self, message, gpu_ids):
check_result = self.check_offline_msg(message)
if not check_result:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
if 'start' == message.get("command"):
logger.info("开始离线分析")
self.startOfflineProcess(message, gpu_ids)
time.sleep(3)
elif 'stop' == message.get("command"):
self.stopOfflineProcess(message)
else:
pass

def send_error_message(self, msg, feedback):
try:
kafkaProducer = KafkaUtils.CustomerKafkaProducer(self.content, msg.get("request_id"))
kafkaProducer.sender(self.content["kafka"]["topic"]["dsp-alg-results-topic"],
feedback["request_id"], feedback, 1)
except Exception as e:
logger.exception("发送kafka消息异常: {}, requestId:{}", e, msg.get("request_id"))
def image(self, message, gpu_ids):
check_result = self.check_image_msg(message)
if not check_result:
raise ServiceException(ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[0],
ExceptionType.ILLEGAL_PARAMETER_FORMAT.value[1])
if 'start' == message.get("command"):
logger.info("开始图片分析")
self.startImageProcess(message, gpu_ids)
# elif 'stop' == message.get("command"):
# self.stopImageProcess(message)
else:
pass

+ 0
- 11
test/Producer2.py View File

@@ -1,11 +0,0 @@
from kafka import KafkaProducer
import json

topicName = 'alg_online_tasks'
eBody = {"request_id": "16446e0d79fb4497b390d1a7f49f3079","command":"stop"}
producer = KafkaProducer(bootstrap_servers=[
'localhost:9092'],
value_serializer=lambda m: json.dumps(m).encode('utf-8'))
future = producer.send(topicName, key=b'16446e0d79fb4497b390d1a7f49f3079', value=eBody)
result = future.get(timeout=10)
print(result)

test/__init__.py → test/aliyun/__init__.py View File


test/vod.py → test/aliyun/vod.py View File


test/vodTest.py → test/aliyun/vodTest.py View File


test/vodtest1.py → test/aliyun/vodtest1.py View File


test/vodtest2.py → test/aliyun/vodtest2.py View File


+ 0
- 147
test/experimental.py View File

@@ -1,147 +0,0 @@
# YOLOv5 experimental modules

import numpy as np # numpy矩阵操作模块
import torch # PyTorch深度学习模块
import torch.nn as nn # PYTorch模块函数库
import os
from models.common import Conv, DWConv
from utils.google_utils import attempt_download


class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2

def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights

def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y

"""
Ghost Convolution 幻象卷积 轻量化网络卷积模块
论文: https://arxiv.org/abs/1911.11907
源码: https://github.com/huawei-noah/ghostnet
"""
class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)

def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)


class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()

def forward(self, x):
return self.conv(x) + self.shortcut(x)


class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b

self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)

def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))


class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()

def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output


"""用在val.py、detect.py、train.py等文件中 一般用在测试、验证阶段
加载模型权重文件并构建模型(可以构造普通模型或者集成模型)
Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
:params weights: 模型的权重文件地址 默认weights/yolov5s.pt
可以是[a]也可以是list格式[a, b] 如果是list格式将调用上面的模型集成函数 多模型运算 提高最终模型的泛化误差
:params map_location: attempt_download函数参数 表示模型运行设备device
:params inplace: pytorch 1.7.0 compatibility设置
"""
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
#attempt_download(w)
assert os.path.exists(w),"%s not exists"
ckpt = torch.load(w, map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model

# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility

if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble

+ 179
- 0
test/ffmpeg11/aa.py View File

@@ -0,0 +1,179 @@
import json
import time
import subprocess as sp
import ffmpeg
import cv2
import sys

import numpy as np



"""
获取视频基本信息
"""


def get_video_info(in_file):
try:
probe = ffmpeg.probe('https://vod.play.t-aaron.com/430122abaedc42188e73763b57e33c3c/cd64c5ca5c454c84859d86e7dbaef7c8-e721e81dba2469bca32bce44ee238c44-hd.mp4')
# format = probe['format']
# size = int(format['size'])/1024/1024
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
if video_stream is None:
print('No video stream found', file=sys.stderr)
return
width = int(video_stream['width'])
height = int(video_stream['height'])
# num_frames = int(video_stream['nb_frames'])
# up, down = str(video_stream['r_frame_rate']).split('/')
# fps = eval(up) / eval(down)
# duration = float(video_stream['duration'])
bit_rate = int(video_stream['bit_rate'])/1000
print('width: {}'.format(width))
print('height: {}'.format(height))
# print('num_frames: {}'.format(num_frames))
print('bit_rate: {}k'.format(bit_rate))
# print('fps: {}'.format(fps))
# print('size: {}MB'.format(size))
# print('duration: {}'.format(duration))
return video_stream



except Exception as err:
raise err


if __name__ == '__main__':
file_path = 'https://vod.play.t-aaron.com/430122abaedc42188e73763b57e33c3c/cd64c5ca5c454c84859d86e7dbaef7c8-e721e81dba2469bca32bce44ee238c44-hd.mp4'
#file_path = 'https://vod.play.t-aaron.com/customerTrans/edc96ea2115a0723a003730956208134/40b416f7-183b57f6be0-0004-f90c-f2c-7ec68.mp4'
#file_path = 'https://vod.play.t-aaron.com/3301fc8e166f45be88f2214e7a8f4a9d/e29535365b54434d9ed2e8c3b0a175da-fba35541b31a1049ca05b145a283c33a-hd.mp4'
video_info = get_video_info(file_path)
print(json.dumps(video_info))
# total_frames = int(video_info['nb_frames'])
# print('总帧数:' + str(total_frames))
# random_frame = random.randint(1, total_frames)
# print('随机帧:' + str(random_frame))
# out = read_frame_as_jpeg(file_path, i)
# image_array = numpy.asarray(bytearray(out), dtype="uint8")
# image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
# kwargs={'fflags': 'nobuffer', 'flags': 'low_delay'}
kwargs={
# "hwaccel": "nvdec",
# "vcodec": "h264_cuvid",
# "c:v": "h264_cuvid"
}
output_args = {
# "vcodec": "hevc_nvenc",
# "c:v": "hevc_nvenc",
# "preset": "fast",
}
# i = 1
# process1 = (
# ffmpeg
# .input(file_path, **kwargs)
# .output('pipe:', format='rawvideo', pix_fmt='rgb24', **output_args)
# # .global_args("-an")
# .overwrite_output()
# .run_async(pipe_stdout=True, pipe_stderr=True)
# )
width = int(video_info['width'])
height = int(video_info['height'])
command = ['ffmpeg',
# '-hwaccel', 'cuvid',
'-c:v', 'h264_cuvid',
# '-hwaccel_output_format', 'cuda',
# '-resize', '960x540',
'-i', file_path,
# '-c:v', 'hevc_nvenc',
# '-pix_fmt', 'yuv420p',
'-f', 'rawvideo',
# '-pix_fmt', 'bgr24',
'-an',
'-']
p = sp.Popen(command, stdout=sp.PIPE)
# ai_video_file = cv2.VideoWriter(r"C:\Users\chenyukun\Desktop\shipin\aa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 30,
# (width, height))

command1 = ['ffmpeg',
# '-loglevel', 'debug',
'-y', # 不经过确认,输出时直接覆盖同名文件。
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'nv12',
# '-hwaccel', 'cuvid',
# '-c:v', 'h264_cuvid',
# '-hwaccel_output_format', 'cuda',
# '-s', "{}x{}".format(self.width * 2, self.height),
'-s', "{}x{}".format(int(1920), int(1080)),
'-r', str(25),
'-i', '-', # 指定输入文件
'-g', str(5),
# '-maxrate', '15000k',
# '-profile:v', 'high',
# '-b:v', '4000k',
# '-crf', '18',
'-rc:v', 'vbr',
'-cq:v', '30',
'-qmin', '30',
'-qmax', '30',
'-bufsize', '1',
'-c:v', 'h264_nvenc', #

# '-c:v', 'libx264', # 指定视频编码器
# '-tune', 'zerolatency', # 加速编码速度
# '-sc_threshold', '0',
'-pix_fmt', 'yuv420p',
"-an",
# '-flvflags', 'no_duration_filesize',
'-preset', 'fast',
# '-f', 'flv',ags', 'no_duration_filesize',
# '-preset', 'fast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',
'rtmp://live.push.t-aaron.com/live/THSAr']

# # 管道配置
p1 = sp.Popen(command1, stdin=sp.PIPE)
start1 = time.time()
num = 0
while True:
num += 1
# if num ==100:
# print(time.time()-start1)
# break
start = time.time()
in_bytes = p.stdout.read(int(width * height * 3))
# in_bytes = p.stdout.read()
if not in_bytes:
print(in_bytes)
# ai_video_file.release()
p.stdout.close()
p.wait()
break
# print("aedsdaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", len(in_bytes))
# break
# 转成ndarray
in_frame = (np.frombuffer(in_bytes, np.uint8).reshape([int(height), int(width), 3]))
# print(in_frame, time.time()-start)

p1.stdin.write(in_frame.tostring())
# p1.stdin.write(in_frame.tostring())
# frame
# .astype(np.uint8)
# .tobytes()
# frame = cv2.resize(in_frame, (1280, 720)) # 改变图片尺寸
# frame = cv2.cvtColor(in_frame, cv2.COLOR_RGB2BGR) # 转成BGR
# i += 1
# print(round(time.time()-start, 5))
#
# ai_video_file.write(in_frame)
# if time.time() - start1 > 60:
# ai_video_file.release()
# p.stdout.close()
# p.wait()
# break
# cv2.imshow('frame', frame)
# time.sleep(1111)
p.kill()

test/cv2test.py → test/ffmpeg11/cv2test.py View File


test/cv2test1.py → test/ffmpeg11/cv2test1.py View File


+ 22
- 14
test/ffmpeg11/ffmpeg11.py View File

@@ -2,7 +2,6 @@ import json
import time
import subprocess as sp
import ffmpeg
import numpy
import cv2
import sys
import random
@@ -60,7 +59,7 @@ def get_video_info(in_file):


if __name__ == '__main__':
file_path = 'rtmp://live.play.t-aaron.com/live/THSAk'
file_path = 'https://vod.play.t-aaron.com/0bc905ef5651439da2bfba8427fe467e/a76a7ebb6e3b44ef9c0c7820c7e9c574-f2d7ee90cba11aa91971d58e06d295d2-4k.mp4'
#file_path = 'https://vod.play.t-aaron.com/customerTrans/edc96ea2115a0723a003730956208134/40b416f7-183b57f6be0-0004-f90c-f2c-7ec68.mp4'
#file_path = 'https://vod.play.t-aaron.com/3301fc8e166f45be88f2214e7a8f4a9d/e29535365b54434d9ed2e8c3b0a175da-fba35541b31a1049ca05b145a283c33a-hd.mp4'
video_info = get_video_info(file_path)
@@ -95,31 +94,39 @@ if __name__ == '__main__':
width = int(video_info['width'])
height = int(video_info['height'])
command = ['ffmpeg',
'-vcodec', 'h264_cuvid',
'-resize', '1920x1080',
# '-hwaccel_output_format', 'bgr24',
'-i', file_path,
# '-vf', "hwdownload,format=bgr24",
# "-vcodec", "h264_nvenc", # hevc_nvenc h264_nvenc
'-f', 'rawvideo',
'-pix_fmt', 'bgr24',
'-vcodec','rawvideo',
'-s', "{}x{}".format(int(width/2), int(height/2)),
'-an',
# '-g', '5',
# '-pix_fmt', 'bgr24',
# '-hwaccel_output_format', 'bgr24',
# '-an',
'-']
p = sp.Popen(command, stdout=sp.PIPE)
ai_video_file = cv2.VideoWriter(r"C:\Users\chenyukun\Desktop\shipin\aa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 30,
(width, height))
# ai_video_file = cv2.VideoWriter(r"C:\Users\chenyukun\Desktop\shipin\aa.mp4", cv2.VideoWriter_fourcc(*'mp4v'), 30,
# (width, height))
start1 = time.time()
while True:
start = time.time()
in_bytes = p.stdout.read(int(width * height * 3/4))
# in_bytes = p.stdout.read()
in_bytes = p.stdout.read(int(width * height * 3 // 8))
# in_bytes = p.stdout.read(int(width * height * 3/4))
if not in_bytes:
print(in_bytes)
ai_video_file.release()
# ai_video_file.release()
p.stdout.close()
p.wait()
break
# 转成ndarray
in_frame = (np.frombuffer(in_bytes, np.uint8).reshape([int(height/2), int(width/2), 3]))
print("拉流时间:", time.time() - start)
img = (np.frombuffer(in_bytes, np.uint8)).reshape((int(height/2 * 3 // 2), int(width/2)))
bgr_img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12)
# in_frame = (np.frombuffer(in_bytes, np.uint8).reshape([int(height/2), int(width/2), 3]))
# print("拉流时间:", time.time() - start)
# frame = cv2.resize(in_frame, (1280, 720)) # 改变图片尺寸
# frame = cv2.cvtColor(in_frame, cv2.COLOR_RGB2BGR) # 转成BGR
# i += 1
# print(round(time.time()-start, 5))
#
@@ -129,6 +136,7 @@ if __name__ == '__main__':
# p.stdout.close()
# p.wait()
# break
# cv2.imshow('frame', frame)
# cv2.imshow('frame', bgr_img)
# cv2.waitKey(1)
# time.sleep(1111)
p.kill()

test/ffmpeg2.py → test/ffmpeg11/ffmpeg2.py View File


test/ffmpeg3.py → test/ffmpeg11/ffmpeg3.py View File


+ 0
- 0
test/gpu/__init__.py View File


test/gputest.py → test/gpu/gputest.py View File


test/gputest1.py → test/gpu/gputest1.py View File


+ 0
- 0
test/kafka/__init__.py View File


+ 149
- 0
test/kafka/producer_start.py View File

@@ -0,0 +1,149 @@
# import sys
# sys.path.extend(["..", "../util"])
# from util.AliyunSdk import AliyunVodSdk
# from concurrency.CommonThread import Common
from kafka import KafkaProducer
import json
import threading

# topicName = 'dsp-alg-online-tasks'
# eBody = {
# "request_id": "d4c909912ac741ce81ccef03fd1b2ec45",
# "models": [
# {
# "code": "001",
# "categories": [{
# "id": "0",
# "config": {}
# },
# {
# "id": "1",
# "config": {}
# },
# {
# "id": "2",
# "config": {}
# },
# {
# "id": "3",
# "config": {}
# },
# {
# "id": "4",
# "config": {}
# },
# {
# "id": "5",
# "config": {}
# }
# ]
# }],
# "command": "start",
# "pull_url": "rtmp://live.play.t-aaron.com/live/THSAh",
# "push_url": "rtmp://live.push.t-aaron.com/live/THSAg",
# "results_base_dir": "P20220802133841159"
# }
# producer = KafkaProducer(bootstrap_servers=['192.168.11.13:9092'],
# value_serializer=lambda m: json.dumps(m).encode('utf-8'))
# future = producer.send(topicName, key=b'd4c909912ac741ce81ccef03fd1b2ec45', value=eBody)
# result = future.get(timeout=10)
# print(result)

# topicName = 'dsp-alg-image-tasks'
# eBody = {
# "request_id": "d4c909912ac741ce81ccef03fd1b2ec46",
# "models": [
# {
# "code": "001",
# "categories": [
# {
# "id": "0",
# "config": {}
# },
# {
# "id": "1",
# "config": {}
# },
# {
# "id": "2",
# "config": {}
# },
# {
# "id": "3",
# "config": {}
# },
# {
# "id": "4",
# "config": {}
# },
# {
# "id": "5",
# "config": {}
# },
# {
# "id": "6",
# "config": {}
# },
# {
# "id": "7",
# "config": {}
# }
# ]
# }],
# "command": "start",
# "image_urls": ["https://image.t-aaron.com/P20221112103326614/2022-11-12-10-37-02_frame-3991-4291_20221112103702452021-offline-P20221112103326614-eb4467a3fe8f405ebf4c44f1b48a7e4b_OR.jpg",
# "https://image.t-aaron.com/P20221112103326614/2022-11-12-10-35-09_frame-1785-2085_20221112103509952824-offline-P20221112103326614-eb4467a3fe8f405ebf4c44f1b48a7e4b_OR.jpg"],
# "results_base_dir": "P20220802133841159"
# }
topicName = 'dsp-alg-offline-tasks'
eBody = {
"request_id": "d4c909912ac741ce81ccef03fd1b2ec46",
"models": [
{
"code": "001",
"categories": [
{
"id": "0",
"config": {}
},
{
"id": "1",
"config": {}
},
{
"id": "2",
"config": {}
},
{
"id": "3",
"config": {}
},
{
"id": "4",
"config": {}
},
{
"id": "5",
"config": {}
},
{
"id": "6",
"config": {}
},
{
"id": "7",
"config": {}
}
]
}],
"command": "start",
"original_url": "https://vod.play.t-aaron.com/0bc905ef5651439da2bfba8427fe467e/a76a7ebb6e3b44ef9c0c7820c7e9c574-f2d7ee90cba11aa91971d58e06d295d2-4k.mp4",
"original_type": ".mp4",
"push_url": "rtmp://live.push.t-aaron.com/live/THSAr",
"results_base_dir": "P20220802133841159"
}
producer = KafkaProducer(bootstrap_servers=['192.168.11.13:9092'],
value_serializer=lambda m: json.dumps(m).encode('utf-8'))
future = producer.send(topicName, key=b'd4c909912ac741ce81ccef03fd1b2ec46', value=eBody)
result = future.get(timeout=10)
print(result)

test/producer_stop.py → test/kafka/producer_stop.py View File


BIN
test/minio1/__pycache__/minio_test.cpython-310.pyc View File


+ 31
- 0
test/minio1/minio_test.py View File

@@ -0,0 +1,31 @@
import io
import json

import cv2
import urllib3
from minio import Minio

# MinIO

minioClient = Minio('192.168.12.5:9000',
access_key='RoZc1NbZ6oAC4lTV',
secret_key='wC01Lup6SIoahwfSrYcoF2gSF3mPy6gJ',
secure=False)
aa = r'C:\Users\chenyukun\Desktop\shipin\DJI_20211229100908_0001_S.mp4'
# # 创建存储桶
# img = cv2.imread(r"D:\work\alg\tuoheng_alg\test\image\1.jpg")
# ai_result, ai_image = cv2.imencode(".jpg", img)
# # minioClient.make_bucket("test/bb")
# print(len(ai_image))
# minioClient.put_object('test', '1.jpg', io.BytesIO(ai_image.tobytes()), len(ai_image))
result = minioClient.fput_object("test", "aa3.mp4", aa)
print(result.__dict__)
# 查看桶信息
# buckets = minioClient.list_buckets()
# for bucket in buckets:
# print(bucket.name, bucket.creation_date)
# 判断桶是否存在
if minioClient.bucket_exists("my-bucket"):
print("my-bucket exists")
else:
print("my-bucket does not exist")

+ 0
- 9
test/mysqltest.py View File

@@ -1,9 +0,0 @@
from util.MyConnectionPool import MySqLHelper
from util import YmlUtils
import json

if __name__=="__main__":
content = YmlUtils.getConfigs()
sql = MySqLHelper(content)
res = sql.selectall("select id, name, code, description, create_user, create_time, update_user, update_time, mark from dsp_model_classification where mark = %s", 1)
print(res)

+ 0
- 103
test/producer_start.py View File

@@ -1,103 +0,0 @@
# import sys
# sys.path.extend(["..", "../util"])
# from util.AliyunSdk import AliyunVodSdk
# from concurrency.CommonThread import Common
from kafka import KafkaProducer
import json
import threading

topicName = 'dsp-alg-online-tasks'
eBody = {
"request_id": "d4c909912ac741ce81ccef03fd1b2ec45",
"models": [
{
"code": "001",
"categories": [{
"id": "0",
"config": {}
},
{
"id": "1",
"config": {}
},
{
"id": "2",
"config": {}
},
{
"id": "3",
"config": {}
},
{
"id": "4",
"config": {}
},
{
"id": "5",
"config": {}
}
]
}],
"command": "start",
"pull_url": "rtmp://live.play.t-aaron.com/live/THSAh",
"push_url": "rtmp://live.push.t-aaron.com/live/THSAg",
"results_base_dir": "P20220802133841159"
}
producer = KafkaProducer(bootstrap_servers=['192.168.11.13:9092'],
value_serializer=lambda m: json.dumps(m).encode('utf-8'))
future = producer.send(topicName, key=b'd4c909912ac741ce81ccef03fd1b2ec45', value=eBody)
result = future.get(timeout=10)
print(result)

# topicName = 'dsp-alg-offline-tasks'
# eBody = {
# "request_id": "d4c909912ac741ce81ccef03fd1b2ec46",
# "models": [
# {
# "code": "001",
# "categories": [
# {
# "id": "0",
# "config": {}
# },
# {
# "id": "1",
# "config": {}
# },
# {
# "id": "2",
# "config": {}
# },
# {
# "id": "3",
# "config": {}
# },
# {
# "id": "4",
# "config": {}
# },
# {
# "id": "5",
# "config": {}
# },
# {
# "id": "6",
# "config": {}
# },
# {
# "id": "7",
# "config": {}
# }
# ]
# }],
# "command": "start",
# "original_url": "https://vod.play.t-aaron.com/430122abaedc42188e73763b57e33c3c/cd64c5ca5c454c84859d86e7dbaef7c8-e721e81dba2469bca32bce44ee238c44-hd.mp4",
# "original_type": ".mp4",
# "push_url": "rtmp://live.push.t-aaron.com/live/THSAg",
# "results_base_dir": "P20220802133841159"
# }
# producer = KafkaProducer(bootstrap_servers=['192.168.11.13:9092'],
# value_serializer=lambda m: json.dumps(m).encode('utf-8'))
# future = producer.send(topicName, key=b'd4c909912ac741ce81ccef03fd1b2ec45', value=eBody)
# result = future.get(timeout=10)
# print(result)

+ 0
- 20
test/read.py View File

@@ -1,20 +0,0 @@
import cv2
import time
def aa():
start = time.time()
vid = cv2.VideoCapture("http://live.play.t-aaron.com/live/THSAk_hd.m3u8")
print("aaaaaaa", time.time() - start)
while True:
if vid is None or not vid.isOpened():
print("11111111111111111111111", vid)
continue
return_value, frame = vid.read()
if return_value is not None and return_value:
print("拉流时间", time.time()-start)
# break




if __name__=="__main__":
aa()

+ 0
- 47
test/test1.py View File

@@ -1,47 +0,0 @@
import subprocess as sp
from PIL import Image
import time
import cv2
import oss2
import numpy as np
# 推流
if __name__== "__main__":

cap = cv2.VideoCapture("/home/DATA/chenyukun/DJI_20211229100908_0001_S.mp4")

# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
print(fps)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(width)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(height)
# ffmpeg command
command = ['/usr/bin/ffmpeg',
'-y', # 不经过确认,输出时直接覆盖同名文件。
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(self.width * 2, self.height),
'-s', "{}x{}".format(width, height),
'-r', str(15),
'-i', '-', # 指定输入文件
'-g', '25',
'-b:v', '3000k',
'-c:v', 'libx264', # 指定视频编码器
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',
"rtmp://live.push.t-aaron.com/live/THSAe"]

# 管道配置
p = sp.Popen(command, stdin=sp.PIPE, shell=False)
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
print("Opening camera is failed")
break
time.sleep(0.03)
p.stdin.write(frame.tostring())


+ 0
- 318
test/torch_utils.py View File

@@ -1,318 +0,0 @@
# YOLOv5 PyTorch utils

import datetime # 时间模块 基于time进行了封装 更高级
import logging # 日志功能生成模块
import math # 数学函数模块
import os # 与操作系统进行交互的模块
import platform # 提供获取操作系统相关信息的模块
import subprocess # 子进程定义及操作的模块
import time # 时间模块 更底层
from contextlib import contextmanager # 用于进行上下文管理的模块
from copy import deepcopy # 实现深度复制的模块
from pathlib import Path # Path将str转换为Path对象 使字符串路径易于操作的模块

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision

try:
import thop # 用于Pytorch模型的FLOPS计算工具模块
except ImportError:
thop = None
logger = logging.getLogger(__name__)


@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()


def init_torch_seeds(seed=0):
# Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
torch.manual_seed(seed)
if seed == 0: # slower, more reproducible
cudnn.benchmark, cudnn.deterministic = False, True
else: # faster, less reproducible
cudnn.benchmark, cudnn.deterministic = True, False


def date_modified(path=__file__):
# return human-readable file modification date, i.e. '2021-3-26'
t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
return f'{t.year}-{t.month}-{t.day}'


def git_describe(path=Path(__file__).parent): # path must be a directory
# return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
s = f'git -C {path} describe --tags --long --always'
try:
return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
except subprocess.CalledProcessError as e:
return '' # not a git repository

# 这个函数才是主角,用于自动选择本机模型训练的设备,并输出日志信息。
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
"""广泛用于train.py、test.py、detect.py等文件中
用于选择模型训练的设备 并输出日志信息
:params device: 输入的设备 device = 'cpu' or '0' or '0,1,2,3'
:params batch_size: 一个批次的图片个数
"""
# git_describe(): 返回当前文件父文件的描述信息(yolov5) date_modified(): 返回当前文件的修改日期
# s: 之后要加入logger日志的显示信息
s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
# 如果device输入为cpu cpu=True device.lower(): 将device字符串全部转为小写字母
cpu = device.lower() == 'cpu'
if cpu:
# 如果cpu=True 就强制(force)使用cpu 令torch.cuda.is_available() = False
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
# 如果输入device不为空 device=GPU 直接设置 CUDA environment variable = device 加入CUDA可用设备
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
# 检查cuda的可用性 如果不可用则终止程序
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
# 输入device为空 自行根据计算机情况选择相应设备 先看GPU 没有就CPU
# 如果cuda可用 且 输入device != cpu 则 cuda=True 反正cuda=False
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
# p: 每个可用显卡的相关属性
p = torch.cuda.get_device_properties(i)
# 显示信息s加上每张显卡的属性信息
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'

logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
# 如果cuda可用就返回第一张显卡的的名称 如: GeForce RTX 2060 反之返回CPU对应的名称
return torch.device('cuda:0' if cuda else 'cpu')


def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()


def profile(x, ops, n=100, device=None):
# profile a pytorch module or list of modules. Example usage:
# x = torch.randn(16, 3, 640, 640) # input
# m1 = lambda x: x * torch.sigmoid(x)
# m2 = nn.SiLU()
# profile(x, [m1, m2], n=100) # profile speed over 100 iterations

device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
for m in ops if isinstance(ops, list) else [ops]:
m = m.to(device) if hasattr(m, 'to') else m # device
m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
try:
flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
except:
flops = 0

for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except: # no backward method
t[2] = float('nan')
dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
dtb += (t[2] - t[1]) * 1000 / n # ms per op backward

s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')


def is_parallel(model):
return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)


def intersect_dicts(da, db, exclude=()):
# Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}


def initialize_weights(model):
for m in model.modules():
t = type(m)
if t is nn.Conv2d:
pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif t is nn.BatchNorm2d:
m.eps = 1e-3
m.momentum = 0.03
elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
m.inplace = True


def find_modules(model, mclass=nn.Conv2d):
# Finds layer indices matching module class 'mclass'
return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]


def sparsity(model):
# Return global model sparsity
a, b = 0., 0.
for p in model.parameters():
a += p.numel()
b += (p == 0).sum()
return b / a


def prune(model, amount=0.3):
# Prune model to requested global sparsity
import torch.nn.utils.prune as prune
print('Pruning model... ', end='')
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.l1_unstructured(m, name='weight', amount=amount) # prune
prune.remove(m, 'weight') # make permanent
print(' %.3g global sparsity' % sparsity(model))


def fuse_conv_and_bn(conv, bn):
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
fusedconv = nn.Conv2d(conv.in_channels,
conv.out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
groups=conv.groups,
bias=True).requires_grad_(False).to(conv.weight.device)

# prepare filters
w_conv = conv.weight.clone().view(conv.out_channels, -1)
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))

# prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)

return fusedconv


def model_info(model, verbose=False, img_size=640):
# Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
if verbose:
print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
(i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))

try: # FLOPS
from thop import profile
stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
except (ImportError, Exception):
fs = ''

logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")


def load_classifier(name='resnet101', n=2):
# Loads a pretrained model reshaped to n-class output
model = torchvision.models.__dict__[name](pretrained=True)

# ResNet model properties
# input_size = [3, 224, 224]
# input_space = 'RGB'
# input_range = [0, 1]
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]

# Reshape output to n classes
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model


def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
# scales img(bs,3,y,x) by ratio constrained to gs-multiple
if ratio == 1.0:
return img
else:
h, w = img.shape[2:]
s = (int(h * ratio), int(w * ratio)) # new size
img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
if not same_shape: # pad/crop img
h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean


def copy_attr(a, b, include=(), exclude=()):
# Copy attributes from b to a, options to only include [...] and to exclude [...]
for k, v in b.__dict__.items():
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
continue
else:
setattr(a, k, v)


class ModelEMA:
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""

def __init__(self, model, decay=0.9999, updates=0):
# Create EMA
self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
# if next(model.parameters()).device.type != 'cpu':
# self.ema.half() # FP16 EMA
self.updates = updates # number of EMA updates
self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
for p in self.ema.parameters():
p.requires_grad_(False)

def update(self, model):
# Update EMA parameters
with torch.no_grad():
self.updates += 1
d = self.decay(self.updates)

msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
for k, v in self.ema.state_dict().items():
if v.dtype.is_floating_point:
v *= d
v += (1. - d) * msd[k].detach()

def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
# Update EMA attributes
copy_attr(self.ema, model, include, exclude)

+ 24
- 0
test/协程/协程.py View File

@@ -0,0 +1,24 @@
#-*- coding:utf8 -*-
def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('[CONSUMER]Consuming %s...' % n)
r = '200 OK'

def producer(c):
# 启动生成器
c.send(None)
n = 0
while n < 5:
n = n + 1
print('[PRODUCER]Producing %s...' % n)
r = c.send(n)
print('[PRODUCER]Consumer return: %s' % r)
c.close()

if __name__ == '__main__':
c = consumer()
producer(c)

+ 13
- 0
test/协程/协程1.py View File

@@ -0,0 +1,13 @@
import asyncio

@asyncio.coroutine
def test(i):
print('test_1', i)
r = yield from asyncio.sleep(1)
print('test_2', i)

if __name__ == '__main__':
loop = asyncio.get_event_loop()
tasks = [test(i) for i in range(3)]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()

+ 49
- 0
test/协程/协程2.py View File

@@ -0,0 +1,49 @@
# import asyncio
# import threading
# import time
#
#
# async def print_hello(d):
# print("aaaaaaaaaaa")
# await asyncio.sleep(d)
# print("nnnnnnnnnnnnn")
# return 11
#
# async def print_hello1(d):
# print("ssssssssssssssss")
# await asyncio.sleep(2)
# print("qqqqqqqqqqqqq")
# return 22
# def aa(d):
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# start = time.time()
# aaaa = loop.run_until_complete(asyncio.gather(print_hello(1), print_hello(2)))
# print(aaaa)
# # aa = loop.run_until_complete(asyncio.wait(tasks1))
# # asyncio.gather()
# # print(aa[0].pop().result())
# # asyncio.run(print_hello())
# # asyncio.run(print_hello1())
# # async def main():
# # task = asyncio.create_task(print_hello())
# # task1 = asyncio.create_task(print_hello1())
# # # await task
# # # await task1
# # asyncio.run(main())
# print("ddddddddddddddddddddddd", time.time() - start)
#
# threading.Thread(target=aa, args=(1,)).start()
# # threading.Thread(target=aa, args=(loop, 2)).start()
# # threading.Thread(target=aa, args=(loop, 2)).start()
# # loop.close()
# # loop.close()
# # 打印结果
# # cccccccccccccccccccccc
# # aaaaaaaaaaa
# # nnnnnnnnnnnnn
# # ssssssssssssssss
# # qqqqqqqqqqqqq
# # ddddddddddddddddddddddd
a = lambda x: print(x)
a(1)

+ 10
- 0
test/协程/协程3.py View File

@@ -0,0 +1,10 @@
import time
from concurrent.futures import ThreadPoolExecutor
def aa(a):
print("aaaaaa")
time.sleep(2)

with ThreadPoolExecutor(max_workers=2) as t:
while True:
time.sleep(10)
t.submit(aa, 1)

+ 0
- 0
test/字典/__init__.py View File


+ 3
- 0
test/字典/字典.py View File

@@ -0,0 +1,3 @@
aa = {"aa": "aa", "a1": "a1", "a2": "a2"}
print(aa.pop("aa"))
print(aa)

test/same1.py → test/水印/same1.py View File


test/same2.py → test/水印/same2.py View File


test/same3.py → test/水印/same3.py View File


+ 18
- 0
test/装饰器/装饰器.py View File

@@ -0,0 +1,18 @@
import functools


def log(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator

@log('execute')
def now():
print('2015-3-25')

now = log('execute')(now)
print(now.__name__)

+ 5
- 3
util/AliyunSdk.py View File

@@ -1,6 +1,5 @@
import oss2
import time
from voduploadsdk.AliyunVodUtils import *
from exception.CustomerException import ServiceException
from enums.ExceptionEnum import ExceptionType
import json
@@ -27,14 +26,17 @@ class AliyunOssSdk():
self.content["aliyun"]["oss"]["bucket"],
connect_timeout=self.content["aliyun"]["oss"]["connect_timeout"])

def upload_file(self, updatePath, fileByte):
async def put_object(self, updatePath, fileByte):
self.bucket.put_object(updatePath, fileByte)

async def upload_file(self, updatePath, fileByte):
self.logger.info("开始上传文件到oss, requestId:{}", self.requestId)
self.get_oss_bucket()
MAX_RETRIES = 3
retry_count = 0
while True:
try:
self.bucket.put_object(updatePath, fileByte)
await self.put_object(updatePath, fileByte)
self.logger.info("上传文件到oss成功! requestId:{}", self.requestId)
break
except Exception as e:

+ 65
- 34
util/Cv2Utils.py View File

@@ -10,6 +10,7 @@ from loguru import logger
from exception.CustomerException import ServiceException
from enums.ExceptionEnum import ExceptionType


class Cv2Util():

def __init__(self, pullUrl, pushUrl=None, orFilePath=None, aiFilePath=None, requestId=None):
@@ -24,13 +25,28 @@ class Cv2Util():
self.fps = None
self.width = None
self.height = None
self.wah= None
self.wh = None
self.h = None
self.hn = None
self.w = None
self.all_frames = None
self.bit_rate = None
self.pull_p = None
self.requestId = requestId
self.p_push_retry_num = 0

def getFrameConfig(self, fps, width, height):
if self.fps is None:
self.fps = fps
self.width = width
self.height = height
self.wh = int(width * height * 3 // 8)
self.wah = '%sx%s' % (int(self.width/2), int(self.height/2))
self.h = int(self.height/2 * 3 // 2)
self.w = int(self.width/2)
self.hn = int(self.height/2)

'''
获取视频信息
'''
@@ -60,7 +76,11 @@ class Cv2Util():
if height:
self.height = int(height)
if width is not None and height is not None:
self.wh = int(width * height * 3 / 4)
self.wh = int(width * height * 3 // 8)
self.wah = '%sx%s' % (int(self.width/2), int(self.height/2))
self.h = int(self.height/2 * 3 // 2)
self.w = int(self.width/2)
self.hn = int(self.height/2)
if nb_frames:
self.all_frames = int(nb_frames)
if fps:
@@ -78,25 +98,19 @@ class Cv2Util():
except Exception as e:
logger.exception("获取视频信息异常:{}, requestId:{}", e, self.requestId)



'''
拉取视频
'''

def build_pull_p(self):
try:
if self.width is None or self.height is None:
if self.wah is None:
return
if self.pull_p:
logger.info("重试, 关闭拉流管道, requestId:{}", self.requestId)
self.pull_p.stdout.close()
self.pull_p.terminate()
self.pull_p.wait()
if self.pullUrl is None:
logger.error("拉流地址不能为空, requestId:{}", self.requestId)
raise ServiceException(ExceptionType.PULL_STREAM_URL_EXCEPTION.value[0],
ExceptionType.PULL_STREAM_URL_EXCEPTION.value[1])
# command = ['ffmpeg',
# # '-b:v', '3000k',
# '-i', self.pullUrl,
@@ -106,12 +120,11 @@ class Cv2Util():
# # '-s', "{}x{}".format(int(width), int(height)),
# '-an',
# '-']
aa = {'loglevel': 'error'}
input_config = {'c:v': 'h264_cuvid', 'resize': self.wah}
process = (
ffmpeg
.input(self.pullUrl, **aa)
.output('pipe:', format='rawvideo', pix_fmt='bgr24', loglevel='error',
s="{}x{}".format(int(self.width / 2), int(self.height / 2)))
.input(self.pullUrl, **input_config)
.output('pipe:', format='rawvideo') # pix_fmt='bgr24'
.overwrite_output()
.global_args('-an')
.run_async(pipe_stdout=True)
@@ -138,7 +151,10 @@ class Cv2Util():
# ExceptionType.PULL_PIPELINE_INIT_EXCEPTION.value[1])
in_bytes = self.pull_p.stdout.read(self.wh)
if in_bytes is not None and len(in_bytes) > 0:
result = (np.frombuffer(in_bytes, np.uint8).reshape([int(self.height / 2), int(self.width / 2), 3]))
# result = (np.frombuffer(in_bytes, np.uint8).reshape([int(self.height), int(self.width), 3]))
img = (np.frombuffer(in_bytes, np.uint8)).reshape((self.h, self.w))
result = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_NV12)
# result = cv2.resize(result, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
except ServiceException as s:
logger.exception("读流异常: {}, requestId:{}", s, self.requestId)
except Exception as e:
@@ -243,23 +259,29 @@ class Cv2Util():
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-thread_queue_size', '16',
# '-s', "{}x{}".format(self.width * 2, self.height),
'-s', "{}x{}".format(int(self.width), int(self.height / 2)),
'-s', "{}x{}".format(int(self.width), int(self.hn)),
'-r', str(self.fps),
'-i', '-', # 指定输入文件
'-g', str(self.fps),
# '-maxrate', '15000k',
# '-profile:v', 'high',
'-b:v', '4000k',
# '-b:v', '4000k',
# '-crf', '18',
'-bufsize', '4000k',
'-c:v', 'libx264', # 指定视频编码器
'-tune', 'zerolatency', # 加速编码速度
'-sc_threshold', '0',
'-rc:v', 'vbr',
'-cq:v', '30',
'-qmin', '30',
'-qmax', '30',
'-c:v', 'h264_nvenc', #
# '-bufsize', '4000k',
# '-c:v', 'libx264', # 指定视频编码器
# '-tune', 'zerolatency', # 加速编码速度
# '-sc_threshold', '0',
'-pix_fmt', 'yuv420p',
"-an",
# '-flvflags', 'no_duration_filesize',
'-preset', 'veryfast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
'-preset', 'fast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',
self.pushUrl]
@@ -306,11 +328,14 @@ class Cv2Util():
except Exception as e:
logger.exception("初始化p管道异常:{}, requestId:{}", e, self.requestId)

def push_stream(self, frame):
async def push_stream_write(self, frame):
self.p.stdin.write(frame.tostring())

async def push_stream(self, frame):
if self.p is None:
self.build_p()
try:
self.p.stdin.write(frame.tostring())
await self.push_stream_write(frame)
except Exception as ex:
logger.exception("推流进管道异常:{}, requestId: {}", ex, self.requestId)
current_retry_num = 0
@@ -324,7 +349,7 @@ class Cv2Util():
self.p_push_retry_num += 1
time.sleep(10)
self.build_p()
self.p.stdin.write(frame.tostring())
await self.push_stream_write(frame)
logger.info("构建p管道重试成功, 当前重试次数: {}, requestId: {}", current_retry_num,
self.requestId)
break
@@ -339,21 +364,27 @@ class Cv2Util():
raise ServiceException(ExceptionType.PUSH_STREAM_URL_E_EXCEPTION.value[0],
ExceptionType.PUSH_STREAM_URL_E_EXCEPTION.value[1])

def video_write(self, or_frame, ai_frame):
async def video_frame_write(self, or_frame, ai_frame):
if or_frame is not None:
self.or_video_file.write(or_frame)
if ai_frame is not None:
self.ai_video_file.write(ai_frame)

async def video_write(self, or_frame, ai_frame):
try:
self.build_write()
if or_frame is not None and len(or_frame) > 0:
self.or_video_file.write(or_frame)
await self.video_frame_write(or_frame, None)
if ai_frame is not None and len(ai_frame) > 0:
self.ai_video_file.write(ai_frame)
await self.video_frame_write(None, ai_frame)
except Exception as ex:
ai_retry_num = 0
while True:
try:
if or_frame is not None and len(or_frame) > 0:
self.or_video_file.write(or_frame)
await self.or_video_file.write(or_frame)
if ai_frame is not None and len(ai_frame) > 0:
self.ai_video_file.write(ai_frame)
await self.ai_video_file.write(ai_frame)
logger.info("重新写入离线分析后视频到本地, 当前重试次数: {}, requestId: {}", ai_retry_num,
self.requestId)
break
@@ -373,13 +404,13 @@ class Cv2Util():
ExceptionType.VIDEO_CONFIG_EXCEPTION.value[1])
if self.orFilePath is not None and self.or_video_file is None:
self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps,
(int(self.width / 2), int(self.height / 2)))
(int(self.w), int(self.hn)))
if self.or_video_file is None:
raise ServiceException(ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[0],
ExceptionType.OR_WRITE_OBJECT_EXCEPTION.value[1])
if self.aiFilePath is not None and self.ai_video_file is None:
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps,
(int(self.width), int(self.height / 2)))
(int(self.width), int(self.hn)))
if self.ai_video_file is None:
raise ServiceException(ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[0],
ExceptionType.AI_WRITE_OBJECT_EXCEPTION.value[1])
@@ -391,10 +422,10 @@ class Cv2Util():
raise e

def video_merge(self, frame1, frame2):
frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
frameRight = cv2.resize(frame2, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
frame_merge = np.hstack((frameLeft, frameRight))
# frame_merge = np.hstack((frame1, frame2))
# frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
# frameRight = cv2.resize(frame2, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
# frame_merge = np.hstack((frameLeft, frameRight))
frame_merge = np.hstack((frame1, frame2))
return frame_merge

def getP(self):

+ 8
- 1
util/ImageUtils.py View File

@@ -1,6 +1,8 @@
import time
from io import BytesIO

import cv2
import requests
from PIL import Image, ImageDraw, ImageFont
import numpy as np

@@ -180,7 +182,12 @@ def Hamming_distance(hash1,hash2):
num += 1
return num


def url2Array(url):
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image1 = np.array(image)
img_bgr = cv2.cvtColor(image1, cv2.COLOR_RGB2BGR)
return img_bgr

if __name__ == '__main__':
# img = cv2.imread("../test/a.jpg", -1)

+ 98
- 57
util/KafkaUtils.py View File

@@ -1,3 +1,5 @@
import time

from kafka import KafkaProducer, KafkaConsumer, TopicPartition, OffsetAndMetadata
from kafka.errors import kafka_errors
import json
@@ -7,52 +9,66 @@ from loguru import logger
# 生产者
class CustomerKafkaProducer():

def __init__(self, content, requestId):
def __init__(self, content):
self.content = content
self.configs = self.content["kafka"][self.content["dsp"]["active"]]["producer"]
self.customerProducer = None
self.requestId = requestId
self.get_producer()

# 获取kafka生产者
def get_producer(self):
if self.customerProducer is None:
logger.info("配置kafka生产者, requestId:{}", self.requestId)
self.customerProducer = KafkaProducer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
acks=self.configs["acks"],
retries=self.configs["retries"],
linger_ms=self.configs["linger_ms"],
retry_backoff_ms=self.configs["retry_backoff_ms"],
max_in_flight_requests_per_connection=self.configs[
"max_in_flight_requests_per_connection"],
key_serializer=lambda m: json.dumps(m).encode('utf-8'),
value_serializer=lambda m: json.dumps(m).encode('utf-8'))
logger.info("配置kafka生产者!")
self.customerProducer = KafkaProducer(
bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
acks=self.configs["acks"],
retries=self.configs["retries"],
linger_ms=self.configs["linger_ms"],
retry_backoff_ms=self.configs["retry_backoff_ms"],
max_in_flight_requests_per_connection=self.configs[
"max_in_flight_requests_per_connection"],
key_serializer=lambda m: json.dumps(m).encode('utf-8'),
value_serializer=lambda m: json.dumps(m).encode('utf-8'))

# mode 模式1:异步发送 2:同步发送
# def on_send_success(record_metadata): 成功回调
# def on_send_error(excp): 失败回调
def sender(self, topic, key, message, mode, customer_send_success=None, customer_send_error=None):
self.get_producer()
logger.info("kafka发送信息,topic:{}|key:{}|message:{}|mode:{}|requestId:{}", topic, key, message, mode, self.requestId)
if mode == 1:
if not customer_send_success:
customer_send_success = CustomerKafkaProducer.on_send_success
if not customer_send_error:
customer_send_error = CustomerKafkaProducer.on_send_error
self.customerProducer.send(topic=topic, key=key, value=message).add_callback(
customer_send_success, self.requestId).add_errback(customer_send_error, self.requestId)
if mode == 2:
retry_send_num = 0
while True:
try:
self.customerProducer.send(topic=topic, key=key, value=message).get(timeout=30)
logger.info("kafka同步发送信息成功, requestId:{}", self.requestId)
except kafka_errors as e:
logger.exception("kafka同步发送消息异常: {}, requestId:{}", e, self.requestId)
raise e
self.get_producer()
logger.info("kafka发送信息,topic:{}|key:{}|message:{}|mode:{}|requestId:{}", topic, key, message, mode,
message.get("request_id"))
if mode == 1:
if not customer_send_success:
customer_send_success = CustomerKafkaProducer.on_send_success
if not customer_send_error:
customer_send_error = CustomerKafkaProducer.on_send_error
self.customerProducer.send(topic=topic, key=key, value=message) \
.add_callback(customer_send_success, message.get("request_id")) \
.add_errback(customer_send_error, message.get("request_id"))
if mode == 2:
try:
self.customerProducer.send(topic=topic, key=key, value=message).get(timeout=30)
logger.info("kafka同步发送信息成功, requestId:{}", message.get("request_id"))
except kafka_errors as e:
logger.exception("kafka同步发送消息异常: {}, requestId:{}", e, message.get("request_id"))
raise e
break
except Exception as e:
logger.exception("kafka发送消息异常: {}, requestId:{}", e, message.get("request_id"))
self.customerProducer = None
retry_send_num += 1
if retry_send_num > 3:
logger.exception("kafka发送消息重试失败: {}, requestId:{}", e, message.get("request_id"))
raise e


def close_producer(self):
self.customerProducer.flush()
self.customerProducer.close()
logger.info("kafka生产者关闭完成, requestId:{}", self.requestId)
logger.info("kafka生产者关闭完成!")

def on_send_success(requestId, record_metadata):
logger.info("kafka异步发送信息成功,topic:{}|partition:{}|offset:{}|requestId:{}", record_metadata.topic,
@@ -65,48 +81,73 @@ class CustomerKafkaProducer():
# 生产者
class CustomerKafkaConsumer():

def __init__(self, content):
def __init__(self, content, topics=()):
logger.info("初始化消费者")
self.content = content
self.configs = self.content["kafka"][self.content["dsp"]["active"]]["consumer"]
self.customerConsumer = None
self.topics = topics
self.subscribe()
logger.info("初始化消费者完成")

def subscribe(self, topics=()):
def subscribe(self):
if self.customerConsumer is None:
logger.info("获取消费者!")
self.customerConsumer = KafkaConsumer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
client_id=self.configs["client_id"],
group_id=self.configs["group_id"],
auto_offset_reset=self.configs["auto_offset_reset"],
enable_auto_commit=self.configs["enable_auto_commit"],
max_poll_records=self.configs["max_poll_records"],
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
logger.info("kafka生产者订阅topic:{}", topics)
if topics is None or len(topics) == 0:
logger.error("消费者订阅topic不能为空!")
raise Exception("消费者订阅topic不能为空!")
# 手动配置分区
customer_partition = []
for topic in topics:
for p in self.content["kafka"][self.content["dsp"]["active"]][topic]["partition"]:
customer_partition.append(TopicPartition(topic, p))
self.customerConsumer.assign(customer_partition)
self.customerConsumer = KafkaConsumer(
bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
client_id=self.configs["client_id"],
group_id=self.configs["group_id"],
auto_offset_reset=self.configs["auto_offset_reset"],
enable_auto_commit=self.configs["enable_auto_commit"],
max_poll_records=self.configs["max_poll_records"],
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
logger.info("kafka生产者订阅topic:{}", self.topics)
# if self.topics is None or len(self.topics) == 0:
# logger.error("消费者订阅topic不能为空!")
# raise Exception("消费者订阅topic不能为空!")
# # 手动配置分区
# customer_partition = []
# for topic in self.topics:
# for p in self.content["kafka"][self.content["dsp"]["active"]][topic]["partition"]:
# customer_partition.append(TopicPartition(topic, p))
# self.customerConsumer.assign(customer_partition)
# 自动配置
# self.customerConsumer.subscribe(topics=topics)
self.customerConsumer.subscribe(topics=self.topics)
logger.info("kafka生产者订阅topic完成")

def commit_offset(self, message, topics=()):
self.subscribe(topics)
logger.info("消费者开始提交offset,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1,
message.partition)
tp = TopicPartition(topic=message.topic, partition=message.partition)
self.customerConsumer.commit(offsets={tp: (OffsetAndMetadata(message.offset + 1, None))})
logger.info("消费者提交offset完成,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1,
message.partition)
def poll(self):
msg = None
try:
self.subscribe()
msg = self.customerConsumer.poll()
except Exception as e:
self.customerConsumer = None
logger.exception("消费者拉取消息异常: {}", e)
return msg

def commit_offset(self, message):
retry_num = 1
while True:
try:
self.subscribe()
logger.info("消费者开始提交offset,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1,
message.partition)
tp = TopicPartition(topic=message.topic, partition=message.partition)
self.customerConsumer.commit(offsets={tp: (OffsetAndMetadata(message.offset + 1, None))})
logger.info("消费者提交offset完成,topic:{}|offset:{}|partition:{}", message.topic, message.offset + 1,
message.partition)
break
except Exception as e:
self.customerConsumer = None
logger.exception("消费者提交offset异常: {}, 重试次数: {}", e, retry_num)
time.sleep(1)
retry_num += 1
if retry_num > 3:
logger.exception("消费者提交offset重试失败: {}", e)
break

# if __name__=="__main__":
# try:
# 1/0
# except Exception as e:
# logger.exception("aaaaa:{} {}", e, "11111")
# logger.exception("aaaaa:{} {}", e, "11111")

Loading…
Cancel
Save