zsl #14

Merged
jiangchaoqing merged 7 commits from zsl into master 2025-08-25 11:20:50 +08:00
11 changed files with 605 additions and 333 deletions

View File

@ -16,7 +16,6 @@ success_progess = "1.0000"
width = 1400 width = 1400
COLOR = ( COLOR = (
[0, 0, 255],
[255, 0, 0], [255, 0, 0],
[211, 0, 148], [211, 0, 148],
[0, 127, 0], [0, 127, 0],
@ -35,7 +34,8 @@ COLOR = (
[8, 101, 139], [8, 101, 139],
[171, 130, 255], [171, 130, 255],
[139, 112, 74], [139, 112, 74],
[205, 205, 180]) [205, 205, 180],
[0, 0, 255],)
ONLINE = "online" ONLINE = "online"
OFFLINE = "offline" OFFLINE = "offline"

View File

@ -3,6 +3,7 @@ from concurrent.futures import ThreadPoolExecutor
from threading import Thread from threading import Thread
from time import sleep, time from time import sleep, time
from traceback import format_exc from traceback import format_exc
import numpy as np
from loguru import logger from loguru import logger
import cv2 import cv2
@ -14,18 +15,18 @@ from util.AliyunSdk import AliyunOssSdk
from util.MinioSdk import MinioSdk from util.MinioSdk import MinioSdk
from util import TimeUtils from util import TimeUtils
from enums.AnalysisStatusEnum import AnalysisStatus from enums.AnalysisStatusEnum import AnalysisStatus
from util.PlotsUtils import draw_painting_joint, draw_name_ocr, draw_name_crowd from util.PlotsUtils import draw_painting_joint, draw_name_ocr, draw_name_crowd,draw_transparent_red_polygon
from util.QueUtil import put_queue, get_no_block_queue, clear_queue from util.QueUtil import put_queue, get_no_block_queue, clear_queue
import io import io
from util.LocationUtils import locate_byMqtt from util.LocationUtils import locate_byMqtt
class FileUpload(Thread): class FileUpload(Thread):
__slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg', '_mqtt_list') __slots__ = ('_fb_queue', '_context', '_image_queue', '_analyse_type', '_msg')
def __init__(self, *args): def __init__(self, *args):
super().__init__() super().__init__()
self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type, self._mqtt_list = args self._fb_queue, self._context, self._msg, self._image_queue, self._analyse_type = args
self._storage_source = self._context['service']['storage_source'] self._storage_source = self._context['service']['storage_source']
self._algStatus = False # 默认关闭 self._algStatus = False # 默认关闭
@ -64,16 +65,29 @@ class ImageFileUpload(FileUpload):
模型编号modeCode 模型编号modeCode
检测目标detectTargetCode 检测目标detectTargetCode
''' '''
print('*' * 100, ' mqtt_list:', len(self._mqtt_list)) aFrame = frame.copy()
igH, igW = aFrame.shape[0:2]
model_info = [] model_info = []
mqttPares= det_xywh['mqttPares']
border = None
gps = [None, None]
camParas = None
if mqttPares is not None:
if mqttPares[0] == 1:
border = mqttPares[1]
elif mqttPares[0] == 0:
camParas = mqttPares[1]
if border is not None:
aFrame = draw_transparent_red_polygon(aFrame, np.array(border, np.int32), alpha=0.25)
det_xywh.pop('mqttPares')
# 更加模型编码解析数据 # 更加模型编码解析数据
for code, det_list in det_xywh.items(): for code, det_list in det_xywh.items():
if len(det_list) > 0: if len(det_list) > 0:
for cls, target_list in det_list.items(): for cls, target_list in det_list.items():
if len(target_list) > 0: if len(target_list) > 0:
aFrame = frame.copy()
for target in target_list: for target in target_list:
if camParas is not None:
gps = locate_byMqtt(target[1], igW, igH, camParas, outFormat='wgs84')
# 自研车牌模型判断 # 自研车牌模型判断
if ModelType.CITY_CARPLATE_MODEL.value[1] == str(code): if ModelType.CITY_CARPLATE_MODEL.value[1] == str(code):
draw_name_ocr(target[1], aFrame, target[4]) draw_name_ocr(target[1], aFrame, target[4])
@ -82,15 +96,7 @@ class ImageFileUpload(FileUpload):
draw_name_crowd(target[1], aFrame, target[4]) draw_name_crowd(target[1], aFrame, target[4])
else: else:
draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config, draw_painting_joint(target[1], aFrame, target[3], target[2], target[4], font_config,
target[5]) target[5],border)
igH, igW = aFrame.shape[0:2]
if len(self._mqtt_list) >= 1:
# camParas = self._mqtt_list[0]['data']
camParas = self._mqtt_list[0]
gps = locate_byMqtt(target[1], igW, igH, camParas, outFormat='wgs84')
else:
gps = [None, None]
model_info.append( model_info.append(
{"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame, 'gps': gps}) {"modelCode": str(code), "detectTargetCode": str(cls), "aFrame": aFrame, 'gps': gps})
if len(model_info) > 0: if len(model_info) > 0:
@ -133,7 +139,6 @@ class ImageFileUpload(FileUpload):
# 获取队列中的消息 # 获取队列中的消息
image_msg = get_no_block_queue(image_queue) image_msg = get_no_block_queue(image_queue)
if image_msg is not None: if image_msg is not None:
if image_msg[0] == 2: if image_msg[0] == 2:
logger.info("图片上传线程收到命令:{}, requestId: {}", image_msg[1], request_id) logger.info("图片上传线程收到命令:{}, requestId: {}", image_msg[1], request_id)
if 'stop' == image_msg[1]: if 'stop' == image_msg[1]:

View File

@ -64,6 +64,7 @@ class IntelligentRecognitionProcess(Process):
self._analyse_type, progress=init_progess), timeout=2, is_ex=True) self._analyse_type, progress=init_progess), timeout=2, is_ex=True)
self._storage_source = self._context['service']['storage_source'] self._storage_source = self._context['service']['storage_source']
self._algStatus = False self._algStatus = False
def sendEvent(self, eBody): def sendEvent(self, eBody):
put_queue(self.event_queue, eBody, timeout=2, is_ex=True) put_queue(self.event_queue, eBody, timeout=2, is_ex=True)
@ -127,6 +128,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
or_url = upload_video_thread_or.get_result() or_url = upload_video_thread_or.get_result()
ai_url = upload_video_thread_ai.get_result() ai_url = upload_video_thread_ai.get_result()
return or_url, ai_url return or_url, ai_url
''' '''
@staticmethod @staticmethod
def upload_video(base_dir, env, request_id, orFilePath, aiFilePath): def upload_video(base_dir, env, request_id, orFilePath, aiFilePath):
@ -306,7 +308,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
else: else:
model_param = model_conf[1] model_param = model_conf[1]
# (modeType, model_param, allowedList, names, rainbows) # (modeType, model_param, allowedList, names, rainbows)
MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0], MODEL_CONFIG[code][2](frame_list[0][0].shape[1], frame_list[0][0].shape[0],
model_conf) model_conf)
if draw_config.get("font_config") is None: if draw_config.get("font_config") is None:
draw_config["font_config"] = model_param['font_config'] draw_config["font_config"] = model_param['font_config']
@ -322,7 +324,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
# print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno) # print_cpu_status(requestId=request_id,lineNum=inspect.currentframe().f_lineno)
# 多线程并发处理, 经过测试两个线程最优 # 多线程并发处理, 经过测试两个线程最优
det_array = [] det_array = []
for i, frame in enumerate(frame_list): for i, [frame,_] in enumerate(frame_list):
det_result = t.submit(self.obj_det, self, model_array, frame, task_status, det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
frame_index_list[i], tt, request_id) frame_index_list[i], tt, request_id)
det_array.append(det_result) det_array.append(det_result)
@ -469,6 +471,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
ai_url = upload_video_thread_ai.get_result() ai_url = upload_video_thread_ai.get_result()
return ai_url return ai_url
''' '''
@staticmethod @staticmethod
def ai_normal_dtection(model, frame, request_id): def ai_normal_dtection(model, frame, request_id):
model_conf, code = model model_conf, code = model
@ -634,7 +637,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
else: else:
model_param = model_conf[1] model_param = model_conf[1]
# (modeType, model_param, allowedList, names, rainbows) # (modeType, model_param, allowedList, names, rainbows)
MODEL_CONFIG[code][2](frame_list[0].shape[1], frame_list[0].shape[0], MODEL_CONFIG[code][2](frame_list[0][0].shape[1], frame_list[0][0].shape[0],
model_conf) model_conf)
if draw_config.get("font_config") is None: if draw_config.get("font_config") is None:
draw_config["font_config"] = model_param['font_config'] draw_config["font_config"] = model_param['font_config']
@ -648,7 +651,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
det_array = [] det_array = []
for i, frame in enumerate(frame_list): for i, [frame,_] in enumerate(frame_list):
det_result = t.submit(self.obj_det, self, model_array, frame, task_status, det_result = t.submit(self.obj_det, self, model_array, frame, task_status,
frame_index_list[i], tt, request_id) frame_index_list[i], tt, request_id)
det_array.append(det_result) det_array.append(det_result)
@ -1051,6 +1054,8 @@ class PhotosIntelligentRecognitionProcess(Process):
ai_result_list = p_result[2] ai_result_list = p_result[2]
for ai_result in ai_result_list: for ai_result in ai_result_list:
box, score, cls = xywh2xyxy2(ai_result) box, score, cls = xywh2xyxy2(ai_result)
if ModelType.CITY_FIREAREA_MODEL.value[1] == str(code):
box.append(ai_result[-1])
# 如果检测目标在识别任务中,继续处理 # 如果检测目标在识别任务中,继续处理
if cls in allowedList: if cls in allowedList:
label_array = label_arraylist[cls] label_array = label_arraylist[cls]
@ -1208,7 +1213,8 @@ class PhotosIntelligentRecognitionProcess(Process):
image_thread.setDaemon(True) image_thread.setDaemon(True)
image_thread.start() image_thread.start()
return image_thread return image_thread
def check_ImageUrl_Vaild(self,url,timeout=1):
def check_ImageUrl_Vaild(self, url, timeout=1):
try: try:
# 发送 HTTP 请求,尝试访问图片 # 发送 HTTP 请求,尝试访问图片
response = requests.get(url, timeout=timeout) # 设置超时时间为 10 秒 response = requests.get(url, timeout=timeout) # 设置超时时间为 10 秒
@ -1239,8 +1245,7 @@ class PhotosIntelligentRecognitionProcess(Process):
ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0], ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[0],
ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1]), timeout=2) ExceptionType.URL_ADDRESS_ACCESS_FAILED.value[1]), timeout=2)
return return
with ThreadPoolExecutor(max_workers=1) as t: with ThreadPoolExecutor(max_workers=1) as t:
try: try:
@ -1318,6 +1323,7 @@ class ScreenRecordingProcess(Process):
recording_feedback(self._msg["request_id"], RecordingStatus.RECORDING_WAITING.value[0]), recording_feedback(self._msg["request_id"], RecordingStatus.RECORDING_WAITING.value[0]),
timeout=1, is_ex=True) timeout=1, is_ex=True)
self._storage_source = self._context['service']['storage_source'] self._storage_source = self._context['service']['storage_source']
def sendEvent(self, result): def sendEvent(self, result):
put_queue(self._event_queue, result, timeout=2, is_ex=True) put_queue(self._event_queue, result, timeout=2, is_ex=True)
@ -1495,6 +1501,7 @@ class ScreenRecordingProcess(Process):
upload_video_thread_ai.start() upload_video_thread_ai.start()
or_url = upload_video_thread_ai.get_result() or_url = upload_video_thread_ai.get_result()
return or_url return or_url
''' '''
@staticmethod @staticmethod
def upload_video(base_dir, env, request_id, orFilePath): def upload_video(base_dir, env, request_id, orFilePath):

View File

@ -1,142 +1,163 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from threading import Thread from threading import Thread
from time import sleep, time from time import sleep, time
from traceback import format_exc from traceback import format_exc
from loguru import logger from loguru import logger
from common.YmlConstant import mqtt_yml_path from common.YmlConstant import mqtt_yml_path
from util.RWUtils import getConfigs from util.RWUtils import getConfigs
from common.Constant import init_progess from common.Constant import init_progess
from enums.AnalysisStatusEnum import AnalysisStatus from enums.AnalysisStatusEnum import AnalysisStatus
from entity.FeedBack import message_feedback from entity.FeedBack import message_feedback
from enums.ExceptionEnum import ExceptionType from enums.ExceptionEnum import ExceptionType
from exception.CustomerException import ServiceException from exception.CustomerException import ServiceException
from util.QueUtil import get_no_block_queue, put_queue, clear_queue from util.QueUtil import get_no_block_queue, put_queue, clear_queue
from multiprocessing import Process, Queue from multiprocessing import Process, Queue
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
import json,os import json,os
class PullMqtt(Thread): class PullMqtt(Thread):
__slots__ = ('__fb_queue', '__mqtt_list', '__request_id', '__analyse_type', "_context") __slots__ = ('__fb_queue', '__mqtt_list', '__request_id', '__analyse_type', "_context" ,'__business')
def __init__(self, *args): def __init__(self, *args):
super().__init__() super().__init__()
self.__fb_queue, self.__mqtt_list, self.__request_id, self.__analyse_type, self._context = args self.__fb_queue, self.__mqtt_list, self.__request_id, self.__analyse_type, self._context, self.__business = args
base_dir, env = self._context["base_dir"], self._context["env"] base_dir, env = self._context["base_dir"], self._context["env"]
self.__config = getConfigs(os.path.join(base_dir, mqtt_yml_path % env)) self.__config = getConfigs(os.path.join(base_dir, mqtt_yml_path % env))
if self.__business == 0:
self.__broker = self.__config["broker"] self.__broker = self.__config['location']["broker"]
self.__port = self.__config["port"] self.__port = self.__config['location']["port"]
self.__topic = self.__config["topic"] self.__topic = self.__config['location']["topic"]
self.__lengthMqttList = self.__config["length"] elif self.__business == 1:
self.__broker = self.__config['invade']["broker"]
self.__port = self.__config['invade']["port"]
def put_queue(self,__queue,data): self.__topic = self.__config['invade']["topic"]
if __queue.full(): self.__lengthMqttList = self.__config["length"]
a = __queue.get()
__queue.put( data,block=True, timeout=2 )
def on_connect(self,client,userdata,flags,rc): def put_queue(self,__queue,data):
client.subscribe(self.__topic) if __queue.full():
a = __queue.get()
__queue.put( data,block=True, timeout=2 )
def on_connect(self,client,userdata,flags,rc):
# 当接收到MQTT消息时回调函数 client.subscribe(self.__topic)
def on_message(self,client, userdata, msg):
# 将消息解码为JSON格式
payload = msg.payload.decode('utf-8')
data = json.loads(payload) # 当接收到MQTT消息时回调函数
#logger.info(str(data)) def on_location(self,client, userdata, msg):
# 将消息解码为JSON格式
payload = msg.payload.decode('utf-8')
# 解析位姿信息 data = json.loads(payload)
lon = data.get("lon") #logger.info(str(data))
lat = data.get("lat") # 解析位姿信息
alt = data.get("alt") lon = data.get("lon")
yaw = data.get("yaw") lat = data.get("lat")
pitch = data.get("pitch") alt = data.get("alt")
roll = data.get("roll") yaw = data.get("yaw")
pitch = data.get("pitch")
if len(self.__mqtt_list) == self.__lengthMqttList: roll = data.get("roll")
self.__mqtt_list.pop(0)
self.__mqtt_list.append(data) if len(self.__mqtt_list) == self.__lengthMqttList:
self.__mqtt_list.pop(0)
self.__mqtt_list.append([self.__business,data])
# 打印无人机的位姿信息
#print(f"Longitude: {lon}, Latitude: {lat}, Altitude: {alt}, sat:{data.get('satcount')} , list length:{len(self.__mqtt_list)}")
# 打印无人机的位姿信息
def mqtt_connect(self): #print(f"Longitude: {lon}, Latitude: {lat}, Altitude: {alt}, sat:{data.get('satcount')} , list length:{len(self.__mqtt_list)}")
# 创建客户端
self.client = mqtt.Client() def on_invade(self, client, userdata, msg):
self.client.on_connect = self.on_connect # 将消息解码为JSON格式
# 设置回调函数 payload = msg.payload.decode('utf-8')
self.client.on_message = self.on_message data = json.loads(payload)
# logger.info(str(data))
# 连接到 Broker # 解析位姿信息
self.client.connect(self.__broker, self.__port) points = data.get("points")
# 订阅主题 if len(self.__mqtt_list) == self.__lengthMqttList:
self.client.subscribe(self.__topic) self.__mqtt_list.pop(0)
# 循环等待并处理网络事件 self.__mqtt_list.append([self.__business,points])
self.client.loop_forever()
# 打印无人机的位姿信息
def mqtt_disconnect(self): # print(f"Longitude: {lon}, Latitude: {lat}, Altitude: {alt}, sat:{data.get('satcount')} , list length:{len(self.__mqtt_list)}")
start_time = time()
while True: def mqtt_connect(self):
if time() - start_time > service_timeout: # 创建客户端
logger.error("MQTT读取超时, requestId: %s,限定时间:%.1s , 已运行:%.1fs"%(request_id,service_timeout, time() - start_time)) self.client = mqtt.Client()
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0], self.client.on_connect = self.on_connect
ExceptionType.TASK_EXCUTE_TIMEOUT.value[1]) if self.__business == 0:
client.loop_stop() # 停止循环 # 设置回调函数
client.disconnect() # 断开连接 self.client.on_message = self.on_location
elif self.__business == 1:
def run(self): # 设置回调函数
request_id, mqtt_list, progress = self.__request_id, self.__mqtt_list, init_progess self.client.on_message = self.on_invade
analyse_type, fb_queue = self.__analyse_type, self.__fb_queue
#service_timeout = int(self.__config["service"]["timeout"]) + 120 # 连接到 Broker
self.client.connect(self.__broker, self.__port)
try:
logger.info("开始MQTT读取线程requestId:{}", request_id) # 订阅主题
mqtt_init_num = 0 self.client.subscribe(self.__topic)
self.mqtt_connect() # 循环等待并处理网络事件
self.client.loop_forever()
except Exception:
logger.error("MQTT线程异常:{}, requestId:{}", format_exc(), request_id) def mqtt_disconnect(self):
finally: start_time = time()
mqtt_list = [] while True:
logger.info("MQTT线程停止完成requestId:{}", request_id) if time() - start_time > service_timeout:
logger.error("MQTT读取超时, requestId: %s,限定时间:%.1s , 已运行:%.1fs"%(request_id,service_timeout, time() - start_time))
raise ServiceException(ExceptionType.TASK_EXCUTE_TIMEOUT.value[0],
def start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context): ExceptionType.TASK_EXCUTE_TIMEOUT.value[1])
mqtt_thread = PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context) client.loop_stop() # 停止循环
mqtt_thread.setDaemon(True) client.disconnect() # 断开连接
mqtt_thread.start()
return mqtt_thread def run(self):
def start_PullVideo(mqtt_list): request_id, mqtt_list, progress = self.__request_id, self.__mqtt_list, init_progess
for i in range(1000): analyse_type, fb_queue = self.__analyse_type, self.__fb_queue
sleep(1) #service_timeout = int(self.__config["service"]["timeout"]) + 120
if len(mqtt_list)>=10:
print( mqtt_list[4]) try:
print(i,len(mqtt_list)) logger.info("开始MQTT读取线程requestId:{}", request_id)
if __name__=="__main__": mqtt_init_num = 0
#context = {'service':{'timeout':3600},'mqtt':{ self.mqtt_connect()
# 'broker':"101.133.163.127",'port':1883,'topic':"test/topic","length":10}
# } except Exception:
context = { logger.error("MQTT线程异常:{}, requestId:{}", format_exc(), request_id)
'base_dir':'/home/th/WJ/test/tuoheng_algN', finally:
'env':'test' mqtt_list = []
logger.info("MQTT线程停止完成requestId:{}", request_id)
}
analyse_type = '1'
request_id = '123456789' def start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context):
event_queue, pull_queue, mqtt_list, image_queue, push_queue, push_ex_queue = Queue(), Queue(10), [], Queue(), Queue(), Queue() mqtt_thread = PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context)
fb_queue = Queue() mqtt_thread.setDaemon(True)
mqtt_thread = start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context) mqtt_thread.start()
return mqtt_thread
def start_PullVideo(mqtt_list):
start_PullVideo(mqtt_list) for i in range(1000):
print('---line117--') sleep(1)
if len(mqtt_list)>=10:
print( mqtt_list[4])
print(i,len(mqtt_list))
#mqtt_thread.join() if __name__=="__main__":
#context = {'service':{'timeout':3600},'mqtt':{
# 'broker':"101.133.163.127",'port':1883,'topic':"test/topic","length":10}
# }
context = {
'base_dir':'/home/th/WJ/test/tuoheng_algN',
'env':'test'
}
analyse_type = '1'
request_id = '123456789'
event_queue, pull_queue, mqtt_list, image_queue, push_queue, push_ex_queue = Queue(), Queue(10), [], Queue(), Queue(), Queue()
fb_queue = Queue()
mqtt_thread = start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context)
start_PullVideo(mqtt_list)
print('---line117--')
#mqtt_thread.join()

View File

@ -35,15 +35,15 @@ class PullVideoStreamProcess(Process):
put_queue(self._command_queue, result, timeout=2, is_ex=True) put_queue(self._command_queue, result, timeout=2, is_ex=True)
@staticmethod @staticmethod
def start_File_upload(fb_queue, context, msg, image_queue, analyse_type,mqtt_list): def start_File_upload(fb_queue, context, msg, image_queue, analyse_type):
image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type,mqtt_list) image_thread = ImageFileUpload(fb_queue, context, msg, image_queue, analyse_type)
image_thread.setDaemon(True) image_thread.setDaemon(True)
image_thread.start() image_thread.start()
return image_thread return image_thread
@staticmethod @staticmethod
def start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context): def start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context,business):
mqtt_thread = PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context) mqtt_thread = PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context,business)
mqtt_thread.setDaemon(True) mqtt_thread.setDaemon(True)
mqtt_thread.start() mqtt_thread.start()
return mqtt_thread return mqtt_thread
@ -81,13 +81,14 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
# 初始化日志 # 初始化日志
init_log(base_dir, env) init_log(base_dir, env)
logger.info("开启启动实时视频拉流进程, requestId:{},pid:{},ppid:{}", request_id,os.getpid(),os.getppid()) logger.info("开启启动实时视频拉流进程, requestId:{},pid:{},ppid:{}", request_id,os.getpid(),os.getppid())
#开启mqtt # 开启mqtt
if service["mqtt_flag"]==1: if service['mqtt']["flag"] == 1:
mqtt_thread = self.start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context) business = service['mqtt']["business"]
mqtt_thread = self.start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context, business)
# 开启图片上传线程 # 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type,mqtt_list) image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1 cv2_init_num, init_pull_num, concurrent_frame = 0, 1, 1
start_time, pull_stream_start_time, read_start_time, full_timeout = time(), None, None, None start_time, pull_stream_start_time, read_start_time, full_timeout = time(), None, None, None
while True: while True:
@ -129,7 +130,7 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3, frame, pull_p, width, height = pull_read_video_stream(pull_p, pull_url, width, height, width_height_3,
w_2, h_2, request_id) w_2, h_2, request_id)
if pull_queue.full(): if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) #logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None: if full_timeout is None:
full_timeout = time() full_timeout = time()
if time() - full_timeout > 180: if time() - full_timeout > 180:
@ -171,7 +172,7 @@ class OnlinePullVideoStreamProcess(PullVideoStreamProcess):
sleep(1) sleep(1)
continue continue
init_pull_num, read_start_time = 1, None init_pull_num, read_start_time = 1, None
frame_list.append(frame) frame_list.append([frame, mqtt_list])
frame_index_list.append(concurrent_frame) frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num: if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)
@ -222,10 +223,11 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
def run(self): def run(self):
msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type msg, context, frame_num, analyse_type = self._msg, self._context, self._frame_num, self._analyse_type
request_id, base_dir, env, pull_url = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"] request_id, base_dir, env, pull_url, service = msg["request_id"], context['base_dir'], context['env'], msg["pull_url"], context["service"]
ex, service_timeout, full_timeout = None, int(context["service"]["timeout"]) + 120, None ex, service_timeout, full_timeout = None, int(context["service"]["timeout"]) + 120, None
command_queue, pull_queue, image_queue, fb_queue = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue command_queue, pull_queue, image_queue, fb_queue, mqtt_list = self._command_queue, self._pull_queue, self._image_queue, \
self._fb_queue, self._mqtt_list
image_thread, pull_p = None, None image_thread, pull_p = None, None
width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None width, height, width_height_3, all_frames, w_2, h_2 = None, None, None, 0, None, None
frame_list, frame_index_list = [], [] frame_list, frame_index_list = [], []
@ -235,8 +237,12 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
init_log(base_dir, env) init_log(base_dir, env)
logger.info("开启离线视频拉流进程, requestId:{}", request_id) logger.info("开启离线视频拉流进程, requestId:{}", request_id)
#开启mqtt
if service['mqtt']["flag"]==1:
business = service['mqtt']["business"]
mqtt_thread = self.start_PullMqtt(fb_queue, mqtt_list, request_id, analyse_type, context, business)
# 开启图片上传线程 # 开启图片上传线程
image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type,[]) image_thread = self.start_File_upload(fb_queue, context, msg, image_queue, analyse_type)
# 初始化拉流工具类 # 初始化拉流工具类
cv2_init_num, concurrent_frame = 0, 1 cv2_init_num, concurrent_frame = 0, 1
@ -269,7 +275,7 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id) width, height, width_height_3, all_frames, w_2, h_2 = build_video_info(pull_url, request_id)
continue continue
if pull_queue.full(): if pull_queue.full():
logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id) #logger.info("pull拉流队列满了:{}, requestId: {}", os.getppid(), request_id)
if full_timeout is None: if full_timeout is None:
full_timeout = time() full_timeout = time()
if time() - full_timeout > 180: if time() - full_timeout > 180:
@ -306,7 +312,7 @@ class OfflinePullVideoStreamProcess(PullVideoStreamProcess):
ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1]) ExceptionType.READSTREAM_TIMEOUT_EXCEPTION.value[1])
logger.info("离线拉流线程结束, requestId: {}", request_id) logger.info("离线拉流线程结束, requestId: {}", request_id)
break break
frame_list.append(frame) frame_list.append([frame,mqtt_list])
frame_index_list.append(concurrent_frame) frame_index_list.append(concurrent_frame)
if len(frame_list) >= frame_num: if len(frame_list) >= frame_num:
put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True) put_queue(pull_queue, (4, (frame_list, frame_index_list, all_frames)), timeout=1, is_ex=True)

View File

@ -23,7 +23,7 @@ from util.Cv2Utils import video_conjuncing, write_or_video, write_ai_video, push
from util.ImageUtils import url2Array, add_water_pic from util.ImageUtils import url2Array, add_water_pic
from util.LogUtils import init_log from util.LogUtils import init_log
from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2, xy2xyxy, draw_name_joint, plot_one_box_auto, draw_name_ocr,draw_name_crowd from util.PlotsUtils import draw_painting_joint, filterBox, xywh2xyxy2, xy2xyxy, draw_name_joint, plot_one_box_auto, draw_name_ocr,draw_name_crowd,draw_transparent_red_polygon
from util.QueUtil import get_no_block_queue, put_queue, clear_queue from util.QueUtil import get_no_block_queue, put_queue, clear_queue
@ -36,11 +36,10 @@ class PushStreamProcess(Process):
# 传参 # 传参
self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args self._msg, self._push_queue, self._image_queue, self._push_ex_queue, self._hb_queue, self._context = args
self._algStatus = False # 默认关闭 self._algStatus = False # 默认关闭
self._algSwitch = self._context['service']['algSwitch'] self._algSwitch = self._context['service']['algSwitch']
# 0521:
#0521: default_enabled = str(self._msg.get("defaultEnabled", "True")).lower() == "true"
default_enabled = str(self._msg.get("defaultEnabled", "True")).lower() == "true"
if default_enabled: if default_enabled:
print("执行默认程序defaultEnabled=True") print("执行默认程序defaultEnabled=True")
self._algSwitch = True self._algSwitch = True
@ -131,15 +130,26 @@ class OnPushStreamProcess(PushStreamProcess):
if push_r is not None: if push_r is not None:
if push_r[0] == 1: if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
for i, frame in enumerate(frame_list): # 处理每1帧
for i, [frame,mqtt_list] in enumerate(frame_list):
# mqtt传参
border = None
mqttPares = None
if len(mqtt_list) >= 1:
mqttPares = mqtt_list[0]
if mqttPares[0] == 1:
border = mqttPares[1]
pix_dis = int((frame.shape[0]//10)*1.2) pix_dis = int((frame.shape[0]//10)*1.2)
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
if border is not None:
copy_frame = draw_transparent_red_polygon(copy_frame, np.array(border, np.int32),alpha=0.25)
det_xywh, thread_p = {}, [] det_xywh, thread_p = {}, []
det_xywh2 = {} det_xywh2 = {'mqttPares':mqttPares}
# 所有问题的矩阵集合 # 所有问题的矩阵集合
qs_np = None qs_np = None
qs_reurn = [] qs_reurn = []
bp_np = None
for det in push_objs[i]: for det in push_objs[i]:
code, det_result = det code, det_result = det
# 每个单独模型处理 # 每个单独模型处理
@ -168,18 +178,21 @@ class OnPushStreamProcess(PushStreamProcess):
else: else:
try: # 应对NaN情况 try: # 应对NaN情况
box, score, cls = xywh2xyxy2(qs) box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score:
continue
if ModelType.CITY_FIREAREA_MODEL.value[1] == str(code):
# 借score作为points点集
box.append(qs[-1])
except: except:
continue continue
if cls not in allowedList or score < frame_score:
continue
label_array, color = label_arrays[cls], rainbows[cls] label_array, color = label_arrays[cls], rainbows[cls]
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2: if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, rr = t.submit(draw_name_joint, box, copy_frame,
draw_config[code]["label_dict"], score, color, draw_config[code]["label_dict"], score, color,
font_config, qs[6]) font_config, qs[6])
else: else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config, border=border)
score, color, font_config)
thread_p.append(rr) thread_p.append(rr)
if det_xywh.get(code) is None: if det_xywh.get(code) is None:
@ -189,17 +202,24 @@ class OnPushStreamProcess(PushStreamProcess):
if cd is None: if cd is None:
det_xywh[code][cls] = [[cls, box, score, label_array, color]] det_xywh[code][cls] = [[cls, box, score, label_array, color]]
else: else:
det_xywh[code][cls].append([cls, box, score, label_array, color]) det_xywh[code][cls].append([cls, box, score, label_array, color])
if qs_np is None: if qs_np is None:
qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1], qs_np = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1], box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32) score, cls, code],dtype=np.float32)
else: else:
result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1], result_li = np.array([box[0][0], box[0][1], box[1][0], box[1][1],
box[2][0], box[2][1], box[3][0], box[3][1], box[2][0], box[2][1], box[3][0], box[3][1],
score, cls, code],dtype=np.float32) score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li)) qs_np = np.row_stack((qs_np, result_li))
if ModelType.CITY_FIREAREA_MODEL.value[1] == str(code):
if bp_np is None:
bp_np = np.array([box[0][0], box[0][1], box[-1]], dtype=object)
else:
bp_li = np.array([box[0][0], box[0][1], box[-1]], dtype=object)
bp_np = np.row_stack((bp_np, bp_li))
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
@ -230,7 +250,7 @@ class OnPushStreamProcess(PushStreamProcess):
if picture_similarity: if picture_similarity:
qs_np_tmp = qs_np_id.copy() qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0]) b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b)) qs_reurn = np.column_stack((qs_np,b))
else: else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis) qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity: if picture_similarity:
@ -256,8 +276,14 @@ class OnPushStreamProcess(PushStreamProcess):
score = q[8] score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls] label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])), box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))] (int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
if bp_np is not None:
if len(bp_np.shape)==1:
bp_np = bp_np[np.newaxis, ...]
for bp in bp_np:
if np.array_equal(bp[:2], np.array([int(q[0]), int(q[1])])):
box.append(bp[-1])
is_new = False is_new = False
if q[11] == 1: if q[11] == 1:
is_new = True is_new = True
@ -281,7 +307,7 @@ class OnPushStreamProcess(PushStreamProcess):
if push_r[0] == 2: if push_r[0] == 2:
logger.info("拉流进程收到控制命令为:{}, requestId: {}",push_r[1] ,request_id) logger.info("拉流进程收到控制命令为:{}, requestId: {}",push_r[1] ,request_id)
if 'algStart' == push_r[1]: self._algStatus = True;logger.info("算法识别开启, requestId: {}", request_id) if 'algStart' == push_r[1]: self._algStatus = True;logger.info("算法识别开启, requestId: {}", request_id)
if 'algStop' == push_r[1]: self._algStatus = False;logger.info("算法识别关闭, requestId: {}", request_id) if 'algStop' == push_r[1]: self._algStatus = False;logger.info("算法识别关闭, requestId: {}", request_id)
if 'stop' == push_r[1]: if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id) logger.info("停止推流进程, requestId: {}", request_id)
break break
@ -368,23 +394,33 @@ class OffPushStreamProcess(PushStreamProcess):
# [(2, 操作指令)] 指令操作 # [(2, 操作指令)] 指令操作
if push_r[0] == 1: if push_r[0] == 1:
frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1] frame_list, frame_index_list, all_frames, draw_config, push_objs = push_r[1]
# 处理每一帧图片 # 处理每一帧图片
for i, frame in enumerate(frame_list): for i, [frame,mqtt_list] in enumerate(frame_list):
# mqtt传参
border = None
mqttPares = None
if len(mqtt_list) >= 1:
mqttPares = mqtt_list[0]
if mqttPares[0] == 1:
border = mqttPares[1]
pix_dis = int((frame.shape[0]//10)*1.2) pix_dis = int((frame.shape[0]//10)*1.2)
if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames: if frame_index_list[i] % 300 == 0 and frame_index_list[i] <= all_frames:
task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames)) task_process = "%.2f" % (float(frame_index_list[i]) / float(all_frames))
put_queue(hb_queue, {"hb_value": task_process}, timeout=2) put_queue(hb_queue, {"hb_value": task_process}, timeout=2)
# 复制帧用来画图 # 复制帧用来画图
copy_frame = frame.copy() copy_frame = frame.copy()
if border is not None:
copy_frame = draw_transparent_red_polygon(copy_frame, np.array(border, np.int32),alpha=0.25)
# 所有问题记录字典 # 所有问题记录字典
det_xywh, thread_p = {}, [] det_xywh, thread_p = {}, []
det_xywh2 = {} det_xywh2 = {'mqttPares':mqttPares}
# 所有问题的矩阵集合 # 所有问题的矩阵集合
qs_np = None qs_np = None
qs_reurn = [] qs_reurn = []
bp_np = None
for det in push_objs[i]: for det in push_objs[i]:
code, det_result = det code, det_result = det
# 每个单独模型处理 # 每个单独模型处理
# 模型编号、100帧的所有问题, 检测目标、颜色、文字图片 # 模型编号、100帧的所有问题, 检测目标、颜色、文字图片
if len(det_result) > 0: if len(det_result) > 0:
@ -414,14 +450,16 @@ class OffPushStreamProcess(PushStreamProcess):
box, score, cls = xywh2xyxy2(qs) box, score, cls = xywh2xyxy2(qs)
if cls not in allowedList or score < frame_score: if cls not in allowedList or score < frame_score:
continue continue
if ModelType.CITY_FIREAREA_MODEL.value[1] == str(code):
box.append(qs[-1])
label_array, color = label_arrays[cls], rainbows[cls] label_array, color = label_arrays[cls], rainbows[cls]
if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2: if ModelType.CHANNEL2_MODEL.value[1] == str(code) and cls == 2:
rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6]) rr = t.submit(draw_name_joint, box, copy_frame, draw_config[code]["label_dict"], score, color, font_config, qs[6])
else: else:
rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config) rr = t.submit(draw_painting_joint, box, copy_frame, label_array, score, color, font_config, border=border)
thread_p.append(rr) thread_p.append(rr)
if det_xywh.get(code) is None: if det_xywh.get(code) is None:
det_xywh[code] = {} det_xywh[code] = {}
cd = det_xywh[code].get(cls) cd = det_xywh[code].get(cls)
@ -440,6 +478,13 @@ class OffPushStreamProcess(PushStreamProcess):
score, cls, code],dtype=np.float32) score, cls, code],dtype=np.float32)
qs_np = np.row_stack((qs_np, result_li)) qs_np = np.row_stack((qs_np, result_li))
if ModelType.CITY_FIREAREA_MODEL.value[1]== str(code):
if bp_np is None:
bp_np = np.array([box[0][0], box[0][1],box[-1]],dtype=object)
else:
bp_li = np.array([box[0][0], box[0][1],box[-1]],dtype=object)
bp_np = np.row_stack((bp_np, bp_li))
if logo: if logo:
frame = add_water_pic(frame, logo, request_id) frame = add_water_pic(frame, logo, request_id)
copy_frame = add_water_pic(copy_frame, logo, request_id) copy_frame = add_water_pic(copy_frame, logo, request_id)
@ -467,7 +512,7 @@ class OffPushStreamProcess(PushStreamProcess):
if picture_similarity: if picture_similarity:
qs_np_tmp = qs_np_id.copy() qs_np_tmp = qs_np_id.copy()
b = np.zeros(qs_np.shape[0]) b = np.zeros(qs_np.shape[0])
qs_reurn = np.column_stack((qs_np,b)) qs_reurn = np.column_stack((qs_np,b))
else: else:
qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis) qs_reurn = filterBox(qs_np, qs_np_tmp, pix_dis)
if picture_similarity: if picture_similarity:
@ -494,8 +539,14 @@ class OffPushStreamProcess(PushStreamProcess):
score = q[8] score = q[8]
rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"] rainbows, label_arrays = draw_config[code]["rainbows"], draw_config[code]["label_arrays"]
label_array, color = label_arrays[cls], rainbows[cls] label_array, color = label_arrays[cls], rainbows[cls]
box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])), box = [(int(q[0]), int(q[1])), (int(q[2]), int(q[3])),
(int(q[4]), int(q[5])), (int(q[6]), int(q[7]))] (int(q[4]), int(q[5])), (int(q[6]), int(q[7]))]
if bp_np is not None:
if len(bp_np.shape)==1:
bp_np = bp_np[np.newaxis, ...]
for bp in bp_np:
if np.array_equal(bp[:2], np.array([int(q[0]), int(q[1])])):
box.append(bp[-1])
is_new = False is_new = False
if q[11] == 1: if q[11] == 1:
is_new = True is_new = True
@ -509,7 +560,7 @@ class OffPushStreamProcess(PushStreamProcess):
else: else:
det_xywh2[code][cls].append( det_xywh2[code][cls].append(
[cls, box, score, label_array, color, is_new]) [cls, box, score, label_array, color, is_new])
if len(det_xywh2) > 0: if len(det_xywh2) > 1:
put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]])) put_queue(image_queue, (1, [det_xywh2, frame, frame_index_list[i], all_frames, draw_config["font_config"]]))
push_p = push_stream_result.result(timeout=60) push_p = push_stream_result.result(timeout=60)
ai_video_file = write_ai_video_result.result(timeout=60) ai_video_file = write_ai_video_result.result(timeout=60)
@ -517,7 +568,7 @@ class OffPushStreamProcess(PushStreamProcess):
if push_r[0] == 2: if push_r[0] == 2:
logger.info("拉流进程收到控制命令为:{}, requestId: {}",push_r[1] ,request_id) logger.info("拉流进程收到控制命令为:{}, requestId: {}",push_r[1] ,request_id)
if 'algStart' == push_r[1]: self._algStatus = True;logger.info("算法识别开启, requestId: {}", request_id) if 'algStart' == push_r[1]: self._algStatus = True;logger.info("算法识别开启, requestId: {}", request_id)
if 'algStop' == push_r[1]: self._algStatus = False;logger.info("算法识别关闭, requestId: {}", request_id) if 'algStop' == push_r[1]: self._algStatus = False;logger.info("算法识别关闭, requestId: {}", request_id)
if 'stop' == push_r[1]: if 'stop' == push_r[1]:
logger.info("停止推流进程, requestId: {}", request_id) logger.info("停止推流进程, requestId: {}", request_id)
break break

View File

@ -1,10 +1,21 @@
mqtt_flag: true mqtt_flag: true
broker : "58.213.148.44" # 业务0为经纬度定位业务1为入侵算法开关
port : 1883 business: 1
username: "admin" # 经纬度定位
password: "admin##123" location:
#topic: "/topic/v1/airportFly/%s/aiDroneData" broker : "58.213.148.44"
topic: "/topic/v1/airportDrone/THJSQ03B2309TPCTD5QV/realTime/data" port : 1883
# 存储多少条消息到list里 username: "admin"
length: 10 password: "admin##123"
#topic: "/topic/v1/airportFly/%s/aiDroneData"
topic: "/topic/v1/airportDrone/THJSQ03B2309TPCTD5QV/realTime/data"
# 入侵
invade:
broker : "192.168.11.8"
port : 2883
#topic: "/topic/v1/airportFly/%s/aiDroneData"
topic: "test000/topic"
# 存储多少条消息到list里
length: 30

View File

@ -33,7 +33,8 @@ service:
#storage source,0--aliyun,1--minio #storage source,0--aliyun,1--minio
storage_source: 0 storage_source: 0
#是否启用mqtt0--不用1--启用 #是否启用mqtt0--不用1--启用
mqtt_flag: 0 mqtt:
flag: 0
business: 1
#是否启用alg控制功能 #是否启用alg控制功能
algSwitch: False algSwitch: False

View File

@ -14,6 +14,7 @@ from utilsK.drownUtils import mixDrowing_water_postprocess
from utilsK.noParkingUtils import mixNoParking_road_postprocess from utilsK.noParkingUtils import mixNoParking_road_postprocess
from utilsK.illParkingUtils import illParking_postprocess from utilsK.illParkingUtils import illParking_postprocess
from utilsK.pannelpostUtils import pannel_post_process from utilsK.pannelpostUtils import pannel_post_process
from utilsK.securitypostUtils import security_post_process
from stdc import stdcModel from stdc import stdcModel
from yolov5 import yolov5Model from yolov5 import yolov5Model
from p2pNet import p2NnetModel from p2pNet import p2NnetModel
@ -63,7 +64,7 @@ class ModelType(Enum):
"classes": 5, "classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'fiterList':[2],
'Detweights': "../weights/trt/AIlib2/river/yolov5_%s_fp16.engine" % gpuName, 'Detweights': "../weights/trt/AIlib2/river/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../weights/trt/AIlib2/river/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../weights/trt/AIlib2/river/stdc_360X640_%s_fp16.engine' % gpuName
}) })
@ -99,10 +100,8 @@ class ModelType(Enum):
'weight':"../weights/trt/AIlib2/forest2/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径 'weight':"../weights/trt/AIlib2/forest2/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } }, 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False},
} }
], ],
'postFile': { 'postFile': {
@ -112,11 +111,10 @@ class ModelType(Enum):
"classes": 5, "classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示、输出 "score_byClass": {0: 0.25, 1: 0.3, 2: 0.3, 3: 0.3},
'fiterList': [5],
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2, "pixScale": 1.2,
}) })
@ -162,7 +160,8 @@ class ModelType(Enum):
"classes": 10, "classes": 10,
"rainbows": COLOR "rainbows": COLOR
}, },
'allowedList':[0,1,2,3,4,5,6,7,8,9,10,11,12,16,17,18,19,20,21,22], 'score_byClass':{11:0.75,12:0.75},
'fiterList': [13,14,15,16,17,18,19,20,21,22],
'Detweights': "../weights/trt/AIlib2/highWay2/yolov5_%s_fp16.engine" % gpuName, 'Detweights': "../weights/trt/AIlib2/highWay2/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../weights/trt/AIlib2/highWay2/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../weights/trt/AIlib2/highWay2/stdc_360X640_%s_fp16.engine' % gpuName
}) })
@ -231,7 +230,7 @@ class ModelType(Enum):
"classes": 5, "classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'Segweights': None 'Segweights': None,
}) })
ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: { ANGLERSWIMMER_MODEL = ("9", "009", "钓鱼游泳模型", 'AnglerSwimmer', lambda device, gpuName: {
@ -345,7 +344,8 @@ class ModelType(Enum):
'function': riverDetSegMixProcess, 'function': riverDetSegMixProcess,
'pars': { 'pars': {
'slopeIndex': [1, 3, 4, 7], 'slopeIndex': [1, 3, 4, 7],
'riverIou': 0.1 'riverIou': 0.1,
'scale': 0.25
} }
} }
}, },
@ -377,10 +377,11 @@ class ModelType(Enum):
'weight':'../weights/trt/AIlib2/cityMangement3/yolov5_%s_fp16.engine'%(gpuName), 'weight':'../weights/trt/AIlib2/cityMangement3/yolov5_%s_fp16.engine'%(gpuName),
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3,4,5,6,7],'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':True, "score_byClass":{"0":0.8,"1":0.4,"2":0.5,"3":0.5 } } 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':True}
}, },
{ {
'weight':'../weights/pth/AIlib2/cityMangement3/dmpr.pth', 'weight':'../weights/trt/AIlib2/cityMangement3/dmpr_3090.engine',
#'weight':'../weights/pth/AIlib2/cityMangement3/dmpr.pth',
'par':{ 'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.1, 'dmprimg_size':640, 'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.1, 'dmprimg_size':640,
'name':'dmpr' 'name':'dmpr'
@ -403,7 +404,7 @@ class ModelType(Enum):
"classes": 8, "classes": 8,
"rainbows": COLOR "rainbows": COLOR
}, },
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示、输出 "score_byClass":{0:0.8, 1:0.4, 2:0.5, 3:0.5},
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2, "pixScale": 1.2,
}) })
@ -568,10 +569,10 @@ class ModelType(Enum):
'weight':'../weights/trt/AIlib2/channel2/yolov5_%s_fp16.engine'%(gpuName), 'weight':'../weights/trt/AIlib2/channel2/yolov5_%s_fp16.engine'%(gpuName),
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.1,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.7,"1":0.7,"2":0.8,"3":0.6} } 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.1,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False}
}, },
{ {
'weight' : '../weights/pth/AIlib2/ocr2/crnn_ch.pth', 'weight' : '../weights/trt/AIlib2/ocr2/crnn_ch_%s_fp16_192X32.engine'%(gpuName),
'name':'ocr', 'name':'ocr',
'model':ocrModel, 'model':ocrModel,
'par':{ 'par':{
@ -587,7 +588,6 @@ class ModelType(Enum):
}, },
} }
], ],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6]],
'segPar': None, 'segPar': None,
'postFile': { 'postFile': {
"name": "post_process", "name": "post_process",
@ -597,6 +597,8 @@ class ModelType(Enum):
"rainbows": COLOR "rainbows": COLOR
}, },
'Segweights': None, 'Segweights': None,
"score_byClass": {0: 0.7, 1: 0.7, 2: 0.8, 3: 0.6}
}) })
RIVERT_MODEL = ("25", "025", "河道检测模型(T)", 'riverT', lambda device, gpuName: { RIVERT_MODEL = ("25", "025", "河道检测模型(T)", 'riverT', lambda device, gpuName: {
@ -642,7 +644,7 @@ class ModelType(Enum):
'weight':"../weights/trt/AIlib2/forestCrowd/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径 'weight':"../weights/trt/AIlib2/forestCrowd/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{ "0":0.25,"1":0.25,"2":0.6,"3":0.6,'4':0.6 ,'5':0.6 } }, 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False},
} }
@ -655,7 +657,7 @@ class ModelType(Enum):
"classes": 5, "classes": 5,
"rainbows": COLOR "rainbows": COLOR
}, },
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示、输出 "score_byClass":{0:0.25,1:0.25,2:0.6,3:0.6,4:0.6 ,5:0.6},
'segRegionCnt':2,###分割模型结果需要保留的等值线数目 'segRegionCnt':2,###分割模型结果需要保留的等值线数目
"pixScale": 1.2, "pixScale": 1.2,
@ -703,6 +705,7 @@ class ModelType(Enum):
"classes": 10, "classes": 10,
"rainbows": COLOR "rainbows": COLOR
}, },
'fiterltList': [11,12,13,14,15,16,17],
'Detweights': "../weights/trt/AIlib2/highWay2T/yolov5_%s_fp16.engine" % gpuName, 'Detweights': "../weights/trt/AIlib2/highWay2T/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../weights/trt/AIlib2/highWay2T/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../weights/trt/AIlib2/highWay2T/stdc_360X640_%s_fp16.engine' % gpuName
}) })
@ -716,7 +719,7 @@ class ModelType(Enum):
'weight':"../weights/trt/AIlib2/smartSite/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径 'weight':"../weights/trt/AIlib2/smartSite/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } }, 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False},
} }
@ -724,6 +727,7 @@ class ModelType(Enum):
'postFile': { 'postFile': {
"rainbows": COLOR "rainbows": COLOR
}, },
"score_byClass": {0: 0.25, 1: 0.3, 2: 0.3, 3: 0.3}
}) })
@ -736,7 +740,7 @@ class ModelType(Enum):
'weight':"../weights/trt/AIlib2/rubbish/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径 'weight':"../weights/trt/AIlib2/rubbish/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } }, 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False},
} }
@ -744,6 +748,7 @@ class ModelType(Enum):
'postFile': { 'postFile': {
"rainbows": COLOR "rainbows": COLOR
}, },
"score_byClass": {0: 0.25, 1: 0.3, 2: 0.3, 3: 0.3}
}) })
@ -756,7 +761,7 @@ class ModelType(Enum):
'weight':"../weights/trt/AIlib2/firework/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径 'weight':"../weights/trt/AIlib2/firework/yolov5_%s_fp16.engine"%(gpuName),###检测模型路径
'name':'yolov5', 'name':'yolov5',
'model':yolov5Model, 'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } }, 'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'segRegionCnt':1, 'trtFlag_det':True,'trtFlag_seg':False },
} }
@ -764,7 +769,6 @@ class ModelType(Enum):
'postFile': { 'postFile': {
"rainbows": COLOR "rainbows": COLOR
}, },
}) })
TRAFFIC_SPILL_MODEL = ("50", "501", "高速公路抛洒物模型", 'highWaySpill', lambda device, gpuName: { TRAFFIC_SPILL_MODEL = ("50", "501", "高速公路抛洒物模型", 'highWaySpill', lambda device, gpuName: {
@ -806,7 +810,7 @@ class ModelType(Enum):
"classes": 2, "classes": 2,
"rainbows": COLOR "rainbows": COLOR
}, },
'detModelpara': [{"id": str(x), "config": {"k1": "v1", "k2": "v2"}} for x in [0]], 'fiterList': [1],
###控制哪些检测类别显示、输出 ###控制哪些检测类别显示、输出
'Detweights': "../weights/trt/AIlib2/highWaySpill/yolov5_%s_fp16.engine" % gpuName, 'Detweights': "../weights/trt/AIlib2/highWaySpill/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../weights/trt/AIlib2/highWaySpill/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../weights/trt/AIlib2/highWaySpill/stdc_360X640_%s_fp16.engine' % gpuName
@ -851,7 +855,7 @@ class ModelType(Enum):
"classes": 4, "classes": 4,
"rainbows": COLOR "rainbows": COLOR
}, },
'detModelpara': [{"id": str(x), "config": {"k1": "v1", "k2": "v2"}} for x in [0]], 'fiterList':[1,2,3],
###控制哪些检测类别显示、输出 ###控制哪些检测类别显示、输出
'Detweights': "../weights/trt/AIlib2/highWayCthc/yolov5_%s_fp16.engine" % gpuName, 'Detweights': "../weights/trt/AIlib2/highWayCthc/yolov5_%s_fp16.engine" % gpuName,
'Segweights': '../weights/trt/AIlib2/highWayCthc/stdc_360X640_%s_fp16.engine' % gpuName 'Segweights': '../weights/trt/AIlib2/highWayCthc/stdc_360X640_%s_fp16.engine' % gpuName
@ -867,14 +871,15 @@ class ModelType(Enum):
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.25, 'iou_thres': 0.45, 'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.25, 'iou_thres': 0.45,
'allowedList': [0,1,2], 'segRegionCnt': 1, 'trtFlag_det': True, 'segRegionCnt': 1, 'trtFlag_det': True,
'trtFlag_seg': False, "score_byClass": {"0": 0.25, "1": 0.3, "2": 0.3, "3": 0.3}}, 'trtFlag_seg': False},
} }
], ],
'postFile': { 'postFile': {
"rainbows": COLOR "rainbows": COLOR
}, },
'fiterList':[0]
}) })
@ -884,25 +889,27 @@ class ModelType(Enum):
'rainbows': COLOR, 'rainbows': COLOR,
'models': [ 'models': [
{ {
'trtFlag_det': False, #'weight': '../weights/pth/AIlib2/carplate/plate_yolov5s_v3.jit',
'weight': '../weights/pth/AIlib2/carplate/plate_yolov5s_v3.jit', 'weight': '../weights/trt/AIlib2/carplate/yolov5_%s_fp16.engine' % (gpuName),
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': { 'par': {
'trtFlag_det': True,
'device': 'cuda:0', 'device': 'cuda:0',
'half': False, 'half': True,
'conf_thres': 0.4, 'conf_thres': 0.4,
'iou_thres': 0.45, 'iou_thres': 0.45,
'nc': 1, 'nc': 1,
'plate':8,
'plate_dilate': (0.5, 0.1) 'plate_dilate': (0.5, 0.1)
}, },
}, },
{ {
'trtFlag_ocr': False, 'weight' : '../weights/trt/AIlib2/ocr2/crnn_ch_%s_fp16_192X32.engine'%(gpuName),
'weight': '../weights/pth/AIlib2/ocr2/crnn_ch.pth',
'name': 'ocr', 'name': 'ocr',
'model': ocrModel, 'model': ocrModel,
'par': { 'par': {
'trtFlag_ocr': True,
'char_file': '../AIlib2/conf/ocr2/benchmark.txt', 'char_file': '../AIlib2/conf/ocr2/benchmark.txt',
'mode': 'ch', 'mode': 'ch',
'nc': 3, 'nc': 3,
@ -926,10 +933,8 @@ class ModelType(Enum):
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45, 'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45,
'allowedList': list(range(20)), 'segRegionCnt': 1, 'trtFlag_det': True, 'segRegionCnt': 1, 'trtFlag_det': True,'trtFlag_seg': False},
'trtFlag_seg': False, "score_byClass": {"0": 0.50, "1": 0.3, "2": 0.3, "3": 0.3}},
} }
], ],
'postFile': { 'postFile': {
"rainbows": COLOR "rainbows": COLOR
@ -947,8 +952,7 @@ class ModelType(Enum):
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45, 'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45,
'allowedList': list(range(20)), 'segRegionCnt': 1, 'trtFlag_det': True, 'segRegionCnt': 1, 'trtFlag_det': True, 'trtFlag_seg': False},
'trtFlag_seg': False, "score_byClass": {"0": 0.50, "1": 0.3, "2": 0.3, "3": 0.3}},
} }
], ],
@ -995,8 +999,7 @@ class ModelType(Enum):
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45, 'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.50, 'iou_thres': 0.45,
'allowedList': list(range(20)), 'segRegionCnt': 1, 'trtFlag_det': True, 'segRegionCnt': 1, 'trtFlag_det': True, 'trtFlag_seg': False},
'trtFlag_seg': False, "score_byClass": {"0": 0.50, "1": 0.3, "2": 0.3, "3": 0.3}},
} }
], ],
@ -1016,8 +1019,7 @@ class ModelType(Enum):
'name': 'yolov5', 'name': 'yolov5',
'model': yolov5Model, 'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.25, 'iou_thres': 0.45, 'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.25, 'iou_thres': 0.45,
'allowedList': [0,1,2], 'segRegionCnt': 1, 'trtFlag_det': True, 'segRegionCnt': 1, 'trtFlag_det': True, 'trtFlag_seg': False},
'trtFlag_seg': False, "score_byClass": {"0": 0.25, "1": 0.3, "2": 0.3, "3": 0.3}},
}, },
{ {
'trtFlag_det': False, 'trtFlag_det': False,
@ -1042,6 +1044,54 @@ class ModelType(Enum):
}], }],
}) })
CITY_FIREAREA_MODEL = ("30", "307", "火焰面积模型", 'FireArea', lambda device, gpuName: {
'device': device,
'gpu_name': gpuName,
'labelnames': ["火焰"],
'seg_nclass': 2, # 分割模型类别数目默认2类
'segRegionCnt': 0,
'trtFlag_det': True,
'trtFlag_seg': False,
'Detweights': "../weights/trt/AIlib2/smogfire/yolov5_%s_fp16.engine" % gpuName, # 0fire 1smoke
'Samweights': "../weights/pth/AIlib2/firearea/sam_vit_b_01ec64.pth", #分割模型
'ksize':(7,7),
'sam_type':'vit_b',
'slopeIndex': [],
'segPar': None,
'postFile': {
"name": "post_process",
"conf_thres": 0.25,
"iou_thres": 0.45,
"classes": 5,
"rainbows": COLOR
},
'Segweights': None,
'fiterList':[1],
"score_byClass": {0: 0.1}
})
CITY_SECURITY_MODEL = ("30", "308", "安防模型", 'SECURITY', lambda device, gpuName: {
'labelnames': ["带安全帽","安全帽","攀爬","斗殴","未戴安全帽"],
'postProcess': {'function': security_post_process, 'pars': {'objs': [0,1],'iou':0.25,'unhelmet':4}},
'models':
[
{
'weight': "../weights/trt/AIlib2/security/yolov5_%s_fp16.engine" % (gpuName), ###检测模型路径
'name': 'yolov5',
'model': yolov5Model,
'par': {'half': True, 'device': 'cuda:0', 'conf_thres': 0.25, 'iou_thres': 0.45,
'segRegionCnt': 1, 'trtFlag_det': True, 'trtFlag_seg': False},
}
],
'postFile': {
"rainbows": COLOR
},
'fiterList': [0,1],
"score_byClass": {"0": 0.50}
})
@staticmethod @staticmethod
def checkCode(code): def checkCode(code):
for model in ModelType: for model in ModelType:

View File

@ -27,6 +27,7 @@ import torch
import tensorrt as trt import tensorrt as trt
from utilsK.jkmUtils import pre_process, post_process, get_return_data from utilsK.jkmUtils import pre_process, post_process, get_return_data
from DMPR import DMPRModel from DMPR import DMPRModel
from segment_anything import SamPredictor, sam_model_registry
FONT_PATH = "../AIlib2/conf/platech.ttf" FONT_PATH = "../AIlib2/conf/platech.ttf"
@ -36,6 +37,7 @@ class OneModel:
def __init__(self, device, allowedList=None, requestId=None, modeType=None, gpu_name=None, base_dir=None, env=None): def __init__(self, device, allowedList=None, requestId=None, modeType=None, gpu_name=None, base_dir=None, env=None):
try: try:
start = time.time()
logger.info("########################加载{}########################, requestId:{}", modeType.value[2], logger.info("########################加载{}########################, requestId:{}", modeType.value[2],
requestId) requestId)
par = modeType.value[4](str(device), gpu_name) par = modeType.value[4](str(device), gpu_name)
@ -68,10 +70,11 @@ class OneModel:
'ovlap_thres_crossCategory': postFile.get("ovlap_thres_crossCategory"), 'ovlap_thres_crossCategory': postFile.get("ovlap_thres_crossCategory"),
'iou_thres': postFile["iou_thres"], 'iou_thres': postFile["iou_thres"],
# 对高速模型进行过滤 # 对高速模型进行过滤
'allowedList': par['allowedList'] if modeType.value[0] == '3' else [],
'segRegionCnt': par['segRegionCnt'], 'segRegionCnt': par['segRegionCnt'],
'trtFlag_det': par['trtFlag_det'], 'trtFlag_det': par['trtFlag_det'],
'trtFlag_seg': par['trtFlag_seg'] 'trtFlag_seg': par['trtFlag_seg'],
'score_byClass':par['score_byClass'] if 'score_byClass' in par.keys() else None,
'fiterList': par['fiterList'] if 'fiterList' in par.keys() else []
} }
model_param = { model_param = {
"model": model, "model": model,
@ -86,6 +89,7 @@ class OneModel:
logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId) logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId)
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
logger.info("模型初始化时间:{}, requestId:{}", time.time() - start, requestId)
# 纯分类模型 # 纯分类模型
class cityManagementModel: class cityManagementModel:
__slots__ = "model_conf" __slots__ = "model_conf"
@ -103,6 +107,8 @@ class cityManagementModel:
model_param = { model_param = {
"modelList": modelList, "modelList": modelList,
"postProcess": postProcess, "postProcess": postProcess,
"score_byClass":par['score_byClass'] if 'score_byClass' in par.keys() else None,
"fiterList":par['fiterList'] if 'fiterList' in par.keys() else [],
} }
self.model_conf = (modeType, model_param, allowedList, names, rainbows) self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception: except Exception:
@ -111,15 +117,14 @@ class cityManagementModel:
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
def detSeg_demo2(args): def detSeg_demo2(args):
model_conf, frame, request_id = args model_conf, frame, request_id = args
modelList, postProcess = model_conf[1]['modelList'], model_conf[1]['postProcess'] modelList, postProcess,score_byClass,fiterList = (
model_conf[1]['modelList'], model_conf[1]['postProcess'],model_conf[1]['score_byClass'], model_conf[1]['fiterList'])
try: try:
result = [[ None, None, AI_process_N([frame], modelList, postProcess)[0] ] ] # 为了让返回值适配统一的接口而写的shi result = [[ None, None, AI_process_N([frame], modelList, postProcess,score_byClass,fiterList)[0] ] ] # 为了让返回值适配统一的接口而写的shi
return result return result
except ServiceException as s: except ServiceException as s:
raise s raise s
except Exception: except Exception:
# self.num += 1
# cv2.imwrite('/home/th/tuo_heng/dev/img%s.jpg' % str(self.num), frame)
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id) logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
@ -127,11 +132,6 @@ def detSeg_demo2(args):
def model_process(args): def model_process(args):
model_conf, frame, request_id = args model_conf, frame, request_id = args
model_param, names, rainbows = model_conf[1], model_conf[3], model_conf[4] model_param, names, rainbows = model_conf[1], model_conf[3], model_conf[4]
# modeType, model_param, allowedList, names, rainbows = model_conf
# segmodel, names, label_arraylist, rainbows, objectPar, font, segPar, mode, postPar, requestId = args
# model_param['digitFont'] = digitFont
# model_param['label_arraylist'] = label_arraylist
# model_param['font_config'] = font_config
try: try:
return AI_process([frame], model_param['model'], model_param['segmodel'], names, model_param['label_arraylist'], return AI_process([frame], model_param['model'], model_param['segmodel'], names, model_param['label_arraylist'],
rainbows, objectPar=model_param['objectPar'], font=model_param['digitFont'], rainbows, objectPar=model_param['objectPar'], font=model_param['digitFont'],
@ -164,7 +164,13 @@ class TwoModel:
Detweights = par['Detweights'] Detweights = par['Detweights']
with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime: with open(Detweights, "rb") as f, trt.Runtime(trt.Logger(trt.Logger.ERROR)) as runtime:
model = runtime.deserialize_cuda_engine(f.read()) model = runtime.deserialize_cuda_engine(f.read())
segmodel = None if modeType == ModelType.CITY_FIREAREA_MODEL:
sam = sam_model_registry[par['sam_type']](checkpoint=par['Samweights'])
sam.to(device=device)
segmodel = SamPredictor(sam)
else:
segmodel = None
postFile = par['postFile'] postFile = par['postFile']
conf_thres = postFile["conf_thres"] conf_thres = postFile["conf_thres"]
iou_thres = postFile["iou_thres"] iou_thres = postFile["iou_thres"]
@ -178,7 +184,10 @@ class TwoModel:
"conf_thres": conf_thres, "conf_thres": conf_thres,
"iou_thres": iou_thres, "iou_thres": iou_thres,
"trtFlag_det": par['trtFlag_det'], "trtFlag_det": par['trtFlag_det'],
"otc": otc "otc": otc,
"ksize":par['ksize'] if 'ksize' in par.keys() else None,
"score_byClass": par['score_byClass'] if 'score_byClass' in par.keys() else None,
"fiterList": par['fiterList'] if 'fiterList' in par.keys() else []
} }
self.model_conf = (modeType, model_param, allowedList, names, rainbows) self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception: except Exception:
@ -186,16 +195,15 @@ class TwoModel:
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId) logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId)
def forest_process(args): def forest_process(args):
model_conf, frame, request_id = args model_conf, frame, request_id = args
model_param, names, rainbows = model_conf[1], model_conf[3], model_conf[4] model_param, names, rainbows = model_conf[1], model_conf[3], model_conf[4]
try: try:
return AI_process_forest([frame], model_param['model'], model_param['segmodel'], names, return AI_process_forest([frame], model_param['model'], model_param['segmodel'], names,
model_param['label_arraylist'], rainbows, model_param['half'], model_param['device'], model_param['label_arraylist'], rainbows, model_param['half'], model_param['device'],
model_param['conf_thres'], model_param['iou_thres'], [], font=model_param['digitFont'], model_param['conf_thres'], model_param['iou_thres'],font=model_param['digitFont'],
trtFlag_det=model_param['trtFlag_det'], SecNms=model_param['otc']) trtFlag_det=model_param['trtFlag_det'], SecNms=model_param['otc'],ksize = model_param['ksize'],
score_byClass=model_param['score_byClass'],fiterList=model_param['fiterList'])
except ServiceException as s: except ServiceException as s:
raise s raise s
except Exception: except Exception:
@ -204,7 +212,6 @@ def forest_process(args):
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id) logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
class MultiModel: class MultiModel:
__slots__ = "model_conf" __slots__ = "model_conf"
@ -223,6 +230,8 @@ class MultiModel:
model_param = { model_param = {
"modelList": modelList, "modelList": modelList,
"postProcess": postProcess, "postProcess": postProcess,
"score_byClass": par['score_byClass'] if 'score_byClass' in par.keys() else None,
"fiterList": par['fiterList'] if 'fiterList' in par.keys() else []
} }
self.model_conf = (modeType, model_param, allowedList, names, rainbows) self.model_conf = (modeType, model_param, allowedList, names, rainbows)
except Exception: except Exception:
@ -230,13 +239,13 @@ class MultiModel:
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId) logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId)
def channel2_process(args): def channel2_process(args):
model_conf, frame, request_id = args model_conf, frame, request_id = args
modelList, postProcess = model_conf[1]['modelList'], model_conf[1]['postProcess'] modelList, postProcess,score_byClass,fiterList = (
model_conf[1]['modelList'], model_conf[1]['postProcess'],model_conf[1]['score_byClass'], model_conf[1]['fiterList'])
try: try:
start = time.time() start = time.time()
result = [[None, None, AI_process_C([frame], modelList, postProcess)[0]]] # 为了让返回值适配统一的接口而写的shi result = [[None, None, AI_process_C([frame], modelList, postProcess,score_byClass,fiterList)[0]]] # 为了让返回值适配统一的接口而写的shi
# print("AI_process_C use time = {}".format(time.time()-start)) # print("AI_process_C use time = {}".format(time.time()-start))
return result return result
except ServiceException as s: except ServiceException as s:
@ -245,7 +254,6 @@ def channel2_process(args):
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id) logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
def get_label_arraylist(*args): def get_label_arraylist(*args):
width, height, names, rainbows = args width, height, names, rainbows = args
# line = int(round(0.002 * (height + width) / 2) + 1) # line = int(round(0.002 * (height + width) / 2) + 1)
@ -266,8 +274,6 @@ def get_label_arraylist(*args):
'label_location': 'leftTop'} 'label_location': 'leftTop'}
label_arraylist = get_label_arrays(names, rainbows, fontSize=text_height, fontPath=FONT_PATH) label_arraylist = get_label_arrays(names, rainbows, fontSize=text_height, fontPath=FONT_PATH)
return digitFont, label_arraylist, (line, text_width, text_height, fontScale, tf) return digitFont, label_arraylist, (line, text_width, text_height, fontScale, tf)
# 船只模型 # 船只模型
class ShipModel: class ShipModel:
__slots__ = "model_conf" __slots__ = "model_conf"
@ -293,8 +299,6 @@ class ShipModel:
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId) logger.info("模型初始化时间:{}, requestId:{}", time.time() - s, requestId)
def obb_process(args): def obb_process(args):
model_conf, frame, request_id = args model_conf, frame, request_id = args
model_param = model_conf[1] model_param = model_conf[1]
@ -309,7 +313,6 @@ def obb_process(args):
logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id) logger.error("算法模型分析异常:{}, requestId:{}", format_exc(), request_id)
raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_ANALYSE_EXCEPTION.value[0],
ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1]) ExceptionType.MODEL_ANALYSE_EXCEPTION.value[1])
# 车牌分割模型、健康码、行程码分割模型 # 车牌分割模型、健康码、行程码分割模型
class IMModel: class IMModel:
__slots__ = "model_conf" __slots__ = "model_conf"
@ -333,7 +336,7 @@ class IMModel:
new_device = torch.device(par['device']) new_device = torch.device(par['device'])
model = torch.jit.load(par[img_type]['weights']) model = torch.jit.load(par[img_type]['weights'])
logger.info("########################加载 jit 模型成功 成功 ########################, requestId:{}", logger.info("########################加载 jit 模型成功 成功 ########################, requestId:{}",
requestId) requestId)
self.model_conf = (modeType, allowedList, new_device, model, par, img_type) self.model_conf = (modeType, allowedList, new_device, model, par, img_type)
except Exception: except Exception:
@ -387,15 +390,12 @@ class CARPLATEModel:
detpar = par['models'][0]['par'] detpar = par['models'][0]['par']
# new_device = torch.device(par['device']) # new_device = torch.device(par['device'])
# modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ] # modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
logger.info("########################加载 plate_yolov5s_v3.jit 成功 ########################, requestId:{}",
requestId)
self.model_conf = (modeType, device, modelList, detpar, par['rainbows']) self.model_conf = (modeType, device, modelList, detpar, par['rainbows'])
except Exception: except Exception:
logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId) logger.error("模型加载异常:{}, requestId:{}", format_exc(), requestId)
raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0], raise ServiceException(ExceptionType.MODEL_LOADING_EXCEPTION.value[0],
ExceptionType.MODEL_LOADING_EXCEPTION.value[1]) ExceptionType.MODEL_LOADING_EXCEPTION.value[1])
class DENSECROWDCOUNTModel: class DENSECROWDCOUNTModel:
__slots__ = "model_conf" __slots__ = "model_conf"
@ -763,4 +763,18 @@ MODEL_CONFIG = {
None, None,
lambda x: cc_process(x) lambda x: cc_process(x)
), ),
# 加载火焰面积模型
ModelType.CITY_FIREAREA_MODEL.value[1]: (
lambda x, y, r, t, z, h: TwoModel(x, y, r, ModelType.CITY_FIREAREA_MODEL, t, z, h),
ModelType.CITY_FIREAREA_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: forest_process(x)
),
# 加载安防模型
ModelType.CITY_SECURITY_MODEL.value[1]: (
lambda x, y, r, t, z, h: cityManagementModel(x, y, r, ModelType.CITY_SECURITY_MODEL, t, z, h),
ModelType.CITY_SECURITY_MODEL,
lambda x, y, z: one_label(x, y, z),
lambda x: detSeg_demo2(x)
),
} }

View File

@ -5,7 +5,7 @@ import unicodedata
from loguru import logger from loguru import logger
FONT_PATH = "../AIlib2/conf/platech.ttf" FONT_PATH = "../AIlib2/conf/platech.ttf"
zhFont = ImageFont.truetype(FONT_PATH, 20, encoding="utf-8") zhFont = ImageFont.truetype(FONT_PATH, 20, encoding="utf-8")
def get_label_array(color=None, label=None, font=None, fontSize=40, unify=False): def get_label_array(color=None, label=None, font=None, fontSize=40, unify=False):
if unify: if unify:
@ -24,7 +24,6 @@ def get_label_array(color=None, label=None, font=None, fontSize=40, unify=False)
im_array = cv2.resize(im_array, (0, 0), fx=scale, fy=scale) im_array = cv2.resize(im_array, (0, 0), fx=scale, fy=scale)
return im_array return im_array
def get_label_arrays(labelNames, colors, fontSize=40, fontPath="platech.ttf"): def get_label_arrays(labelNames, colors, fontSize=40, fontPath="platech.ttf"):
font = ImageFont.truetype(fontPath, fontSize, encoding='utf-8') font = ImageFont.truetype(fontPath, fontSize, encoding='utf-8')
label_arraylist = [get_label_array(colors[i % 20], label_name, font, fontSize) for i, label_name in label_arraylist = [get_label_array(colors[i % 20], label_name, font, fontSize) for i, label_name in
@ -50,6 +49,48 @@ def get_label_array_dict(colors, fontSize=40, fontPath="platech.ttf"):
zh_dict[code] = arr zh_dict[code] = arr
return zh_dict return zh_dict
def get_label_left(x0,y1,label_array,img):
imh, imw = img.shape[0:2]
lh, lw = label_array.shape[0:2]
# x1 框框左上x位置 + 描述的宽
# y0 框框左上y位置 - 描述的高
x1, y0 = x0 + lw, y1 - lh
# 如果y0小于0, 说明超过上边框
if y0 < 0:
y0 = 0
# y1等于文字高度
y1 = y0 + lh
# 如果y1框框的高大于图片高度
if y1 > imh:
# y1等于图片高度
y1 = imh
# y0等于y1减去文字高度
y0 = y1 - lh
# 如果x0小于0
if x0 < 0:
x0 = 0
x1 = x0 + lw
if x1 > imw:
x1 = imw
x0 = x1 - lw
return x0,y0,x1,y1
def get_label_right(x1,y0,label_array):
lh, lw = label_array.shape[0:2]
# x1 框框右上x位置 + 描述的宽
# y0 框框右上y位置 - 描述的高
x0, y1 = x1 - lw, y0 - lh
# 如果y0小于0, 说明超过上边框
if y0 < 0 or y1 < 0:
y1 = 0
# y1等于文字高度
y0 = y1 + lh
# 如果x0小于0
if x0 < 0 or x1 < 0:
x0 = 0
x1 = x0 + lw
return x0,y1,x1,y0
def xywh2xyxy(box): def xywh2xyxy(box):
if not isinstance(box[0], (list, tuple, np.ndarray)): if not isinstance(box[0], (list, tuple, np.ndarray)):
@ -75,42 +116,24 @@ def xy2xyxy(box):
box = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)] box = [(x1, y1), (x2, y1), (x2, y2), (x1, y2)]
return box return box
def draw_painting_joint(box, img, label_array, score=0.5, color=None, config=None, isNew=False): def draw_painting_joint(box, img, label_array, score=0.5, color=None, config=None, isNew=False, border=None):
# 识别问题描述图片的高、宽 # 识别问题描述图片的高、宽
lh, lw = label_array.shape[0:2]
# 图片的长度和宽度 # 图片的长度和宽度
imh, imw = img.shape[0:2] if border is not None:
border = np.array(border,np.int32)
color,label_array=draw_name_border(box,color,label_array,border)
#img = draw_transparent_red_polygon(img,border,'',alpha=0.1)
lh, lw = label_array.shape[0:2]
tl = config[0]
if isinstance(box[-1], np.ndarray):
return draw_name_points(img,box,color)
label = ' %.2f' % score
box = xywh2xyxy(box) box = xywh2xyxy(box)
# 框框左上的位置 # 框框左上的位置
x0, y1 = box[0][0], box[0][1] x0, y1 = box[0][0], box[0][1]
# if score_location == 'leftTop': x0, y0, x1, y1 = get_label_left(x0, y1, label_array, img)
# x0, y1 = box[0][0], box[0][1]
# # 框框左下的位置
# elif score_location == 'leftBottom':
# x0, y1 = box[3][0], box[3][1]
# else:
# x0, y1 = box[0][0], box[0][1]
# x1 框框左上x位置 + 描述的宽
# y0 框框左上y位置 - 描述的高
x1, y0 = x0 + lw, y1 - lh
# 如果y0小于0, 说明超过上边框
if y0 < 0:
y0 = 0
# y1等于文字高度
y1 = y0 + lh
# 如果y1框框的高大于图片高度
if y1 > imh:
# y1等于图片高度
y1 = imh
# y0等于y1减去文字高度
y0 = y1 - lh
# 如果x0小于0
if x0 < 0:
x0 = 0
x1 = x0 + lw
if x1 > imw:
x1 = imw
x0 = x1 - lw
# box_tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1 # box_tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1
''' '''
1. imgarray 为ndarray类型可以为cv.imread直接读取的数据 1. imgarray 为ndarray类型可以为cv.imread直接读取的数据
@ -120,14 +143,12 @@ def draw_painting_joint(box, img, label_array, score=0.5, color=None, config=Non
5. thicknessint画线的粗细 5. thicknessint画线的粗细
6. shift顶点坐标中小数的位数 6. shift顶点坐标中小数的位数
''' '''
tl = config[0] img[y0:y1, x0:x1, :] = label_array
box1 = np.asarray(box, np.int32) box1 = np.asarray(box, np.int32)
cv2.polylines(img, [box1], True, color, tl) cv2.polylines(img, [box1], True, color, tl)
img[y0:y1, x0:x1, :] = label_array
pts_cls = [(x0, y0), (x1, y1)] pts_cls = [(x0, y0), (x1, y1)]
# 把英文字符score画到类别旁边 # 把英文字符score画到类别旁边
# tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1 # tl = max(int(round(imw / 1920 * 3)), 1) or round(0.002 * (imh + imw) / 2) + 1
label = ' %.2f' % score
# tf = max(tl, 1) # tf = max(tl, 1)
# fontScale = float(format(imw / 1920 * 1.1, '.2f')) or tl * 0.33 # fontScale = float(format(imw / 1920 * 1.1, '.2f')) or tl * 0.33
# fontScale = tl * 0.33 # fontScale = tl * 0.33
@ -230,7 +251,6 @@ def draw_name_ocr(box, img, color, line_thickness=2, outfontsize=40):
# (color=None, label=None, font=None, fontSize=40, unify=False) # (color=None, label=None, font=None, fontSize=40, unify=False)
label_zh = get_label_array(color, box[0], font, outfontsize) label_zh = get_label_array(color, box[0], font, outfontsize)
return plot_one_box_auto(box[1], img, color, line_thickness, label_zh) return plot_one_box_auto(box[1], img, color, line_thickness, label_zh)
def filterBox(det0, det1, pix_dis): def filterBox(det0, det1, pix_dis):
# det0为 (m1, 11) 矩阵 # det0为 (m1, 11) 矩阵
# det1为 (m2, 12) 矩阵 # det1为 (m2, 12) 矩阵
@ -276,6 +296,7 @@ def plot_one_box_auto(box, img, color=None, line_thickness=2, label_array=None):
# print("省略 :%s, lh:%s, lw:%s"%('+++' * 10, lh, lw)) # print("省略 :%s, lh:%s, lw:%s"%('+++' * 10, lh, lw))
# 图片的长度和宽度 # 图片的长度和宽度
imh, imw = img.shape[0:2] imh, imw = img.shape[0:2]
points = None
box = xy2xyxy(box) box = xy2xyxy(box)
# 框框左上的位置 # 框框左上的位置
x0, y1 = box[0][0], box[0][1] x0, y1 = box[0][0], box[0][1]
@ -316,7 +337,6 @@ def plot_one_box_auto(box, img, color=None, line_thickness=2, label_array=None):
return img, box return img, box
def draw_name_crowd(dets, img, color, outfontsize=20): def draw_name_crowd(dets, img, color, outfontsize=20):
font = ImageFont.truetype(FONT_PATH, outfontsize, encoding='utf-8') font = ImageFont.truetype(FONT_PATH, outfontsize, encoding='utf-8')
if len(dets) == 2: if len(dets) == 2:
@ -367,4 +387,90 @@ def draw_name_crowd(dets, img, color, outfontsize=20):
img[y0:y1, x0:x1, :] = label_arr img[y0:y1, x0:x1, :] = label_arr
return img, dets return img, dets
def draw_name_points(img,box,color):
font = ImageFont.truetype(FONT_PATH, 6, encoding='utf-8')
points = box[-1]
arrea = cv2.contourArea(points)
label = '火焰'
arealabel = '面积:%s' % f"{arrea:.1e}"
label_array_area = get_label_array(color, arealabel, font, 10)
label_array = get_label_array(color, label, font, 10)
lh_area, lw_area = label_array_area.shape[0:2]
box = box[:4]
# 框框左上的位置
x0, y1 = box[0][0], max(box[0][1] - lh_area - 3, 0)
x1, y0 = box[1][0], box[1][1]
x0_label, y0_label, x1_label, y1_label = get_label_left(x0, y1, label_array, img)
x0_area, y0_area, x1_area, y1_area = get_label_right(x1, y0, label_array_area)
img[y0_label:y1_label, x0_label:x1_label, :] = label_array
img[y0_area:y1_area, x0_area:x1_area, :] = label_array_area
# cv2.drawContours(img, points, -1, color, tl)
cv2.polylines(img, [points], False, color, 2)
if lw_area < box[1][0] - box[0][0]:
box = [(x0, y1), (x1, y1), (x1, box[2][1]), (x0, box[2][1])]
else:
box = [(x0_label, y1), (x1, y1), (x1, box[2][1]), (x0_label, box[2][1])]
box = np.asarray(box, np.int32)
cv2.polylines(img, [box], True, color, 2)
return img, box
def draw_name_border(box,color,label_array,border):
box = xywh2xyxy(box[:4])
cx, cy = int((box[0][0] + box[2][0]) / 2), int((box[0][1] + box[2][1]) / 2)
flag = cv2.pointPolygonTest(border, (int(cx), int(cy)),
False) # 若为False会找点是否在内或轮廓上
if flag == 1:
color = [0, 0, 255]
# 纯白色是(255, 255, 255),根据容差定义白色范围
lower_white = np.array([255 - 30] * 3, dtype=np.uint8)
upper_white = np.array([255, 255, 255], dtype=np.uint8)
# 创建白色区域的掩码白色区域为True非白色为False
white_mask = cv2.inRange(label_array, lower_white, upper_white)
# 创建与原图相同大小的目标颜色图像
target_img = np.full_like(label_array, color, dtype=np.uint8)
# 先将非白色区域设为目标颜色,再将白色区域覆盖回原图颜色
label_array = np.where(white_mask[..., None], label_array, target_img)
return color,label_array
def draw_transparent_red_polygon(img, points, alpha=0.5):
"""
在图像中指定的多边形区域绘制半透明红色
参数:
image_path: 原始图像路径
points: 多边形顶点坐标列表格式为[(x1,y1), (x2,y2), ..., (xn,yn)]
output_path: 输出图像路径
alpha: 透明度系数0-1之间值越小透明度越高
"""
# 读取原始图像
if img is None:
raise ValueError(f"无法读取图像")
# 创建与原图大小相同的透明图层RGBA格式
overlay = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.uint8)
# 将点列表转换为适合cv2.fillPoly的格式
#pts = np.array(points, np.int32)
pts = points.reshape((-1, 1, 2))
# 在透明图层上绘制红色多边形BGR为0,0,255
# 最后一个通道是Alpha值控制透明度黄色rgb
cv2.fillPoly(overlay, [pts], (255, 0, 0, int(alpha * 255)))
# 将透明图层转换为BGR格式用于与原图混合
overlay_bgr = cv2.cvtColor(overlay, cv2.COLOR_RGBA2BGR)
# 创建掩码,用于提取红色区域
mask = overlay[:, :, 3] / 255.0
mask = np.stack([mask] * 3, axis=-1) # 转换为3通道
# 混合原图和透明红色区域
img = img * (1 - mask) + overlay_bgr * mask
img = img.astype(np.uint8)
# # 保存结果
# cv2.imwrite(output_path, result)
return img