817 lines
42 KiB
Python
817 lines
42 KiB
Python
from loguru import logger
|
||
import json, cv2, time, os, torch, glob
|
||
from PIL import Image, ImageDraw, ImageFont
|
||
import numpy as np
|
||
import torch.nn.functional as F
|
||
from copy import deepcopy
|
||
from scipy import interpolate
|
||
|
||
from DrGraph.util import yoloHelper, torchHelper
|
||
from DrGraph.util.drHelper import *
|
||
|
||
from DrGraph.util.segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward
|
||
|
||
def getDetectionsFromPreds(pred,img,im0,conf_thres=0.2,iou_thres=0.45,ovlap_thres=0.6,padInfos=None):
|
||
'''
|
||
对YOLO模型的预测结果进行后处理,包括NMS、坐标还原和格式转换等操作。
|
||
|
||
参数:
|
||
pred (torch.Tensor): 检测模型输出的结果,通常是包含边界框、置信度和类别信息的张量。
|
||
img (torch.Tensor): 输入检测模型时的图像张量,用于坐标变换参考。
|
||
im0 (numpy.ndarray): 原始输入图像,用于将检测框映射回原始尺寸。
|
||
conf_thres (float): 第一次非极大值抑制(NMS)中置信度的阈值,默认为0.2。
|
||
iou_thres (float): 第一次非极大值抑制中IoU的阈值,默认为0.45。
|
||
ovlap_thres (float): 可选的二次NMS中IoU的阈值,若为0则不执行,默认为0.6。
|
||
padInfos (list or None): 图像resize时的填充信息,用于准确还原检测框位置。
|
||
|
||
返回:
|
||
list: 包含以下内容的列表:
|
||
- img (numpy.ndarray): 原始图像。
|
||
- im0 (numpy.ndarray): 同上,重复项以保持接口一致性。
|
||
- det_xywh (list of lists): 检测结果列表,每个元素为 [x0, y0, x1, y1, score, cls]。
|
||
- 0 (int): 无实际意义,仅为兼容旧接口保留。
|
||
'''
|
||
with TimeDebugger('预测结果后处理') as td:
|
||
# 执行第一次非极大值抑制(NMS),过滤低置信度和重叠的检测框
|
||
pred = yoloHelper.non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
|
||
# 如果设置了二次NMS阈值,则执行重叠框抑制
|
||
if ovlap_thres:
|
||
pred = yoloHelper.overlap_box_suppression(pred, ovlap_thres)
|
||
td.addStep("NMS")
|
||
i=0;det=pred[0]###一次检测一张图片
|
||
det_xywh=[]
|
||
|
||
# 如果存在检测结果,则进行坐标还原和格式转换
|
||
if len(det)>0:
|
||
#将坐标恢复成原始尺寸的大小
|
||
H,W = im0.shape[0:2]
|
||
det[:, :4] = imgHelper.scale_back( det[:, :4],padInfos).round() \
|
||
if padInfos \
|
||
else imgHelper.scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
|
||
|
||
#转换坐标格式,及tensor转换为cpu中的numpy格式。
|
||
for *xyxy, conf, cls in reversed(det):
|
||
cls_c = cls.cpu().numpy()
|
||
conf_c = conf.cpu().numpy()
|
||
tt=[ int(x.cpu()) for x in xyxy]
|
||
x0,y0,x1,y1 = tt[0:4]
|
||
x0 = max(0,x0);y0 = max(0,y0);
|
||
x1 = min(W-1,x1);y1 = min(H-1,y1)
|
||
#line = [float(cls_c), *tt, float(conf_c)] # label format ,
|
||
line = [ x0,y0,x1,y1, float(conf_c),float(cls_c)] # label format 2023.08.03--修改
|
||
#print('###line305:',line)
|
||
det_xywh.append(line)
|
||
|
||
td.addStep('ScaleBack')
|
||
return [im0,im0,det_xywh,0] ###0,没有意义,只是为了和过去保持一致长度4个元素。
|
||
|
||
def score_filter_byClass(pdetections,score_para_2nd):
|
||
"""
|
||
根据类别特定的置信度阈值过滤检测结果
|
||
|
||
参数:
|
||
pdetections: 检测结果列表,每个元素包含[x1, y1, x2, y2, score, class]格式的检测框信息
|
||
score_para_2nd: 字典类型,键为类别标识(整数或字符串),值为对应的置信度阈值
|
||
|
||
返回值:
|
||
ret: 过滤后的检测结果列表,只保留置信度高于对应类别阈值的检测框
|
||
"""
|
||
ret=[]
|
||
for det in pdetections:
|
||
# 获取当前检测框的置信度和类别
|
||
score,cls = det[4],det[5]
|
||
# 根据类别查找对应的置信度阈值,优先查找整数键,其次查找字符串键,都没有则使用默认阈值0.7
|
||
if int(cls) in score_para_2nd.keys():
|
||
score_th = score_para_2nd[int(cls)]
|
||
elif str(int(cls)) in score_para_2nd.keys():
|
||
score_th = score_para_2nd[str(int(cls))]
|
||
else:
|
||
score_th = 0.7
|
||
# 只保留置信度高于阈值的检测框
|
||
if score > score_th:
|
||
ret.append(det)
|
||
return ret
|
||
|
||
def AI_process(im0s, model, segmodel, names, label_arraylist, rainbows,
|
||
objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False,'score_byClass':{x:0.1 for x in range(30)} },
|
||
font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},
|
||
segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},
|
||
mode='others', postPar=None):
|
||
# logger.info("AI_process(\n\rim0s={}, \n\rmodel={},\n\rsegmodel={},\n\rnames={},\n\rrainbows={},\n\robjectPar={},\n\rfont={},\n\rsegPar={},\n\rmode={},\n\rpostPar={})", \
|
||
# im0s, model, segmodel, names, rainbows, \
|
||
# objectPar, font, segPar, mode, postPar)
|
||
"""
|
||
对输入图像进行目标检测和分割处理,返回处理后的图像及检测结果。
|
||
|
||
参数:
|
||
im0s (list): 原始图像列表。
|
||
model: 检测模型对象。
|
||
segmodel: 分割模型对象,若未使用则为 None。
|
||
names (list): 类别名称列表。
|
||
label_arraylist: 标签数组列表。
|
||
rainbows: 颜色映射相关参数。
|
||
objectPar (dict): 目标检测相关参数配置,默认包含:
|
||
- half (bool): 是否使用半精度(FP16)。
|
||
- device (str): 使用的设备(如 'cuda:0')。
|
||
- conf_thres (float): 置信度阈值。
|
||
- iou_thres (float): IOU 阈值。
|
||
- allowedList (list): 允许检测的类别列表。
|
||
- segRegionCnt (int): 分割区域数量。
|
||
- trtFlag_det (bool): 是否使用 TensorRT 加速检测。
|
||
- trtFlag_seg (bool): 是否使用 TensorRT 加速分割。
|
||
- score_byClass (dict): 每个类别的最低置信度阈值。
|
||
font (dict): 字体和绘制相关参数配置。
|
||
segPar (dict): 分割模型相关参数配置。
|
||
mode (str): 处理模式标识。
|
||
postPar: 后处理参数,当前未使用。
|
||
|
||
返回:
|
||
tuple: 包含两个元素的元组:
|
||
- list: 处理结果列表,格式为 [原始图像, 处理后图像, 检测框信息, 帧号]。
|
||
其中检测框信息是一个列表,每个元素表示一个目标,格式为:
|
||
[xc, yc, w, h, conf_c, cls_c],
|
||
xc, yc 为中心坐标,w, h 为目标宽高,conf_c 为置信度,cls_c 为类别编号。
|
||
- str: 各阶段处理耗时信息字符串。
|
||
"""
|
||
|
||
# 从 objectPar 中提取关键参数
|
||
half,device,conf_thres,iou_thres = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres']
|
||
|
||
trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
|
||
if 'ovlap_thres_crossCategory' in objectPar.keys(): ovlap_thres = objectPar['ovlap_thres_crossCategory']
|
||
else: ovlap_thres = None
|
||
|
||
if 'score_byClass' in objectPar.keys(): score_byClass = objectPar['score_byClass']
|
||
else: score_byClass = None
|
||
|
||
with TimeDebugger('AI_process') as td: # enabled logAtExit - 结束时输出用时分析日志
|
||
# 图像预处理:根据是否使用 TensorRT 进行不同的图像填充或 letterbox 操作
|
||
if trtFlag_det:
|
||
img, padInfos = imgHelper.img_pad(im0s[0], size=(640,640,3))
|
||
img = [img]
|
||
else:
|
||
#print('####line72:',im0s[0][10:12,10:12,2])
|
||
img = [imgHelper.letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
|
||
padInfos=None
|
||
img_height, img_width = img[0].shape[0:2] # 获取高和宽
|
||
#print('####line74:',img[0][10:12,10:12,2])
|
||
# 将图像堆叠并转换为模型输入格式(BGR 转 RGB,HWC 转 CHW)
|
||
img = np.stack(img, 0)
|
||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||
img = np.ascontiguousarray(img)
|
||
td.addStep("img_pad" if trtFlag_det else "letterbox")
|
||
|
||
# 转换为 PyTorch 张量并归一化到 [0, 1]
|
||
img = torch.from_numpy(img)
|
||
td.addStep(f"from_numpy({img_height} x {img_width})")
|
||
img = img.to(device)
|
||
td.addStep(f"to GPU({img_height} x {img_width})" )
|
||
|
||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||
img /= 255.0
|
||
# td.addStep("seg")
|
||
|
||
# 如果提供了分割模型,则执行分割推理
|
||
if segmodel:
|
||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||
segFlag=True
|
||
else:
|
||
seg_pred = None;segFlag=False;segstr='Not implemented'
|
||
td.addStep("infer")
|
||
# 执行目标检测推理
|
||
if trtFlag_det:
|
||
pred = yolov5Trtforward(model,img)
|
||
else:
|
||
#print('####line96:',img[0,0,10:12,10:12])
|
||
pred = model(img,augment=False)[0]
|
||
td.addStep('yolov5Trtforward' if trtFlag_det else 'model')
|
||
|
||
# 对检测结果进行后处理,包括 NMS 和坐标还原
|
||
p_result = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
|
||
# 根据类别分别设置置信度阈值过滤
|
||
if score_byClass:
|
||
p_result[2] = score_filter_byClass(p_result[2],score_byClass)
|
||
td.addStep('后处理')
|
||
#print('-'*10,p_result[2])
|
||
#if mode=='highWay3.0':
|
||
#if segmodel:
|
||
# 如果启用了混合后处理函数(如结合分割结果优化检测框),则执行该函数
|
||
if segPar and segPar['mixFunction']['function']:
|
||
mixFunction = segPar['mixFunction']['function'];
|
||
H,W = im0s[0].shape[0:2]
|
||
parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
|
||
parMix['imgSize'] = (W,H)
|
||
#print(' -----------line149: ',p_result[2] ,'\n', seg_pred, parMix ,' sumpSeg:',np.sum(seg_pred))
|
||
logger.warning('启用混合后处理函数')
|
||
p_result[2] , timeMixPost = mixFunction(p_result[2], seg_pred, pars=parMix )
|
||
#print(' -----------line112: ',p_result[2] )
|
||
p_result.append(seg_pred)
|
||
|
||
else:
|
||
timeMixPost=':0 ms'
|
||
time_info = td.getReportInfo()
|
||
return p_result,time_info
|
||
|
||
def AI_process_N(im0s,modelList,postProcess):
|
||
|
||
#输入参数
|
||
## im0s---原始图像列表
|
||
## modelList--所有的模型
|
||
# postProcess--字典{},包括后处理函数,及其参数
|
||
#输出参数
|
||
##ret[0]--检测结果;
|
||
##ret[1]--时间信息
|
||
|
||
#modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
|
||
modelRets=[ model.eval(im0s[0]) for model in modelList]
|
||
|
||
timeInfos = [ x[1] for x in modelRets]
|
||
timeInfos=''.join(timeInfos)
|
||
timeInfos=timeInfos
|
||
|
||
#postProcess['function']--后处理函数,输入的就是所有模型输出结果
|
||
mixFunction =postProcess['function']
|
||
predsList = [ modelRet[0] for modelRet in modelRets ]
|
||
H,W = im0s[0].shape[0:2]
|
||
postProcess['pars']['imgSize'] = (W,H)
|
||
|
||
#ret就是混合处理后的结果
|
||
ret = mixFunction( predsList, postProcess['pars'])
|
||
|
||
return ret[0],timeInfos+ret[1]
|
||
|
||
def getMaxScoreWords(detRets0):
|
||
maxScore=-1;maxId=0
|
||
for i,detRet in enumerate(detRets0):
|
||
if detRet[4]>maxScore:
|
||
maxId=i
|
||
maxScore = detRet[4]
|
||
return maxId
|
||
|
||
def AI_process_C(im0s,modelList,postProcess):
|
||
#函数定制的原因:
|
||
## 之前模型处理流是
|
||
## 图片---> 模型1-->result1;图片---> 模型2->result2;[result1,result2]--->后处理函数
|
||
## 本函数的处理流程是
|
||
## 图片---> 模型1-->result1;[图片,result1]---> 模型2->result2;[result1,result2]--->后处理函数
|
||
## 模型2的输入,是有模型1的输出决定的。如模型2是ocr模型,需要将模型1检测出来的船名抠图出来输入到模型2.
|
||
## 之前的模型流都是模型2是分割模型,输入就是原始图片,与模型1的输出无关。
|
||
#输入参数
|
||
## im0s---原始图像列表
|
||
## modelList--所有的模型
|
||
# postProcess--字典{},包括后处理函数,及其参数
|
||
#输出参数
|
||
##ret[0]--检测结果;
|
||
##ret[1]--时间信息
|
||
|
||
#modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
|
||
|
||
t0=time.time()
|
||
detRets0 = modelList[0].eval(im0s[0])
|
||
|
||
#detRets0=[[12, 46, 1127, 1544, 0.2340087890625, 2.0], [1884, 1248, 2992, 1485, 0.64208984375, 1.0]]
|
||
detRets0 = detRets0[0]
|
||
parsIn=postProcess['pars']
|
||
|
||
_detRets0_obj = list(filter(lambda x: x[5] in parsIn['objs'], detRets0 ))
|
||
_detRets0_others = list(filter(lambda x: x[5] not in parsIn['objs'], detRets0 ))
|
||
_detRets0 = []
|
||
if postProcess['name']=='channel2':
|
||
if len(_detRets0_obj)>0:
|
||
maxId=getMaxScoreWords(_detRets0_obj)
|
||
_detRets0 = _detRets0_obj[maxId:maxId+1]
|
||
else: _detRets0 = detRets0
|
||
|
||
|
||
t1=time.time()
|
||
imagePatches = [ im0s[0][int(x[1]):int(x[3] ) ,int(x[0]):int(x[2])] for x in _detRets0 ]
|
||
detRets1 = [modelList[1].eval(patch) for patch in imagePatches]
|
||
print('###line240:',detRets1)
|
||
if postProcess['name']=='crackMeasurement':
|
||
detRets1 = [x[0]*255 for x in detRets1]
|
||
t2=time.time()
|
||
mixFunction =postProcess['function']
|
||
crackInfos = [mixFunction(patchMask,par=parsIn) for patchMask in detRets1]
|
||
|
||
rets = [ _detRets0[i]+ crackInfos[i] for i in range(len(imagePatches)) ]
|
||
t3=time.time()
|
||
outInfos='total:%.1f (det:%.1f %d次segs:%.1f mixProcess:%.1f) '%( (t3-t0)*1000, (t1-t0)*1000, len(detRets1),(t2-t1)*1000, (t3-t2)*1000 )
|
||
elif postProcess['name']=='channel2':
|
||
H,W = im0s[0].shape[0:2];parsIn['imgSize'] = (W,H)
|
||
mixFunction =postProcess['function']
|
||
_detRets0_others = mixFunction([_detRets0_others], parsIn)
|
||
ocrInfo='no ocr'
|
||
if len(_detRets0_obj)>0:
|
||
res_real = detRets1[0][0]
|
||
res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real)))
|
||
|
||
#detRets1[0][0]="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),detRets1[0][0])))
|
||
_detRets0_obj[maxId].append(res_real )
|
||
_detRets0_obj = [_detRets0_obj[maxId]]##只输出有OCR的那个船名结果
|
||
ocrInfo=detRets1[0][1]
|
||
print( ' _detRets0_obj:{} _detRets0_others:{} '.format( _detRets0_obj, _detRets0_others ) )
|
||
rets=_detRets0_obj+_detRets0_others
|
||
t3=time.time()
|
||
outInfos='total:%.1f ,where det:%.1f, ocr:%s'%( (t3-t0)*1000, (t1-t0)*1000, ocrInfo)
|
||
|
||
#print('###line233:',detRets1,detRets0 )
|
||
|
||
return rets,outInfos
|
||
|
||
def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None):
|
||
object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt']
|
||
##输入dataset genereate 生成的数据,model预测的结果pred,nms参数
|
||
##主要操作NMS ---> 坐标转换 ---> 画图
|
||
##输出原图、AI处理后的图、检测结果
|
||
time0=time.time()
|
||
path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6];
|
||
#segmodel=True
|
||
pred = yoloHelper.non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
|
||
if ovlap_thres:
|
||
pred = yoloHelper.overlap_box_suppression(pred, ovlap_thres)
|
||
time1=time.time()
|
||
i=0;det=pred[0]###一次检测一张图片
|
||
time1_1 = time.time()
|
||
#p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
|
||
p, s, im0 = path[i], '%g: ' % i, im0s[i]
|
||
time1_2 = time.time()
|
||
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
||
time1_3 = time.time()
|
||
det_xywh=[];
|
||
#im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR);
|
||
if segmodel:
|
||
if len(seg_pred)==2:
|
||
im0,water = illBuildings(seg_pred,im0)
|
||
else:
|
||
river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] }
|
||
im0,water = drawWater(seg_pred,im0,river)
|
||
time2=time.time()
|
||
#plt.imshow(im0);plt.show()
|
||
if len(det)>0:
|
||
# Rescale boxes from img_size to im0 size
|
||
if not padInfos:
|
||
det[:, :4] = imgHelper.scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
|
||
else:
|
||
#print('####line131:',det[:, :])
|
||
det[:, :4] = imgHelper.scale_back( det[:, :4],padInfos).round()
|
||
#print('####line133:',det[:, :])
|
||
#用seg模型,确定有效检测匡及河道轮廓线
|
||
if segmodel:
|
||
cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32)
|
||
##判断哪些目标属于岸坡的
|
||
slope_flag = np.array([x in slopeIndex for x in cls_indexs ] )
|
||
|
||
det_c = det.clone(); det_c=det_c.cpu().numpy()
|
||
try:
|
||
area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] )
|
||
except:
|
||
print('*****************************line143: error:',det_c)
|
||
water_flag = np.array(area_factors>0.1)
|
||
det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。
|
||
#对检测匡绘图
|
||
|
||
for *xyxy, conf, cls in reversed(det):
|
||
xywh = (mathHelper.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
||
cls_c = cls.cpu().numpy()
|
||
|
||
|
||
conf_c = conf.cpu().numpy()
|
||
tt=[ int(x.cpu()) for x in xyxy]
|
||
#line = [float(cls_c), *tt, float(conf_c)] # label format
|
||
line = [*tt, float(conf_c), float(cls_c)] # label format
|
||
det_xywh.append(line)
|
||
label = f'{names[int(cls)]} {conf:.2f}'
|
||
#print('- '*20, ' line165:',xyxy,cls,conf )
|
||
if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示
|
||
continue
|
||
#print('- '*20, ' line168:',xyxy,cls,conf )
|
||
im0 = drawHelper.draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font)
|
||
time3=time.time()
|
||
strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '% ( \
|
||
timeHelper.deltaTime_MS(time0,time1),
|
||
timeHelper.deltaTime_MS(time1,time2),
|
||
timeHelper.deltaTime_MS(time1_1,time1_2),
|
||
timeHelper.deltaTime_MS(time1_2,time1_3),
|
||
timeHelper.deltaTime_MS(time2,time3) )
|
||
return [im0s[0],im0,det_xywh,iframe],strout
|
||
|
||
def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,
|
||
allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
|
||
#输入参数
|
||
# im0s---原始图像列表
|
||
# model---检测模型,segmodel---分割模型(如若没有用到,则为None)
|
||
#输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
|
||
# [im0s[0],im0,det_xywh,iframe]中,
|
||
# im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
|
||
# det_xywh--检测结果,是一个列表。
|
||
# 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式
|
||
# #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
|
||
# #strout---统计AI处理个环节的时间
|
||
|
||
# Letterbox
|
||
time0=time.time()
|
||
if trtFlag_det:
|
||
img, padInfos = imgHelper.img_pad(im0s[0], size=(640,640,3)) ;img = [img]
|
||
else:
|
||
img = [imgHelper.letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
|
||
#img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
|
||
# Stack
|
||
img = np.stack(img, 0)
|
||
# Convert
|
||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||
img = np.ascontiguousarray(img)
|
||
|
||
img = torch.from_numpy(img).to(device)
|
||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||
|
||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||
if segmodel:
|
||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||
segFlag=True
|
||
else:
|
||
seg_pred = None;segFlag=False
|
||
time1=time.time()
|
||
pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
|
||
|
||
|
||
time2=time.time()
|
||
datas = [[''], img, im0s, None,pred,seg_pred,10]
|
||
|
||
ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
|
||
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
|
||
#print('###line274:',p_result[2])
|
||
#p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
|
||
time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
|
||
return p_result,time_info+timeOut
|
||
|
||
|
||
def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None):
|
||
im0s,iframe=im0s_in[0],im0s_in[1]
|
||
model = modelPar['det_Model']
|
||
segmodel = modelPar['seg_Model']
|
||
half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
|
||
if 'score_byClass' in processPar.keys(): score_byClass = processPar['score_byClass']
|
||
else: score_byClass = None
|
||
|
||
iou2nd = processPar['iou2nd']
|
||
time0=time.time()
|
||
|
||
if trtFlag_det:
|
||
img, padInfos = imgHelper.img_pad(im0s[0], size=(640,640,3))
|
||
img = [img]
|
||
else:
|
||
img = [imgHelper.letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
|
||
img = np.stack(img, 0)
|
||
# Convert
|
||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||
img = np.ascontiguousarray(img)
|
||
|
||
img = torch.from_numpy(img).to(device)
|
||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||
|
||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||
|
||
seg_pred = None;segFlag=False
|
||
time1=time.time()
|
||
pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
|
||
|
||
time2=time.time()
|
||
|
||
#p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
|
||
p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos)
|
||
if score_byClass:
|
||
p_result[2] = score_filter_byClass(p_result[2],score_byClass)
|
||
if segmodel:
|
||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||
segFlag=True
|
||
else:
|
||
seg_pred = None;segFlag=False;segstr='No segmodel'
|
||
|
||
|
||
if segPar and segPar['mixFunction']['function']:
|
||
mixFunction = segPar['mixFunction']['function']
|
||
|
||
H,W = im0s[0].shape[0:2]
|
||
parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
|
||
parMix['imgSize'] = (W,H)
|
||
|
||
|
||
p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=parMix )
|
||
timeInfos_seg_post = 'segInfer:%s ,postMixProcess:%s'%( segstr, timeInfos_post )
|
||
else:
|
||
timeInfos_seg_post = ' '
|
||
'''
|
||
if segmodel:
|
||
timeS1=time.time()
|
||
#seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] )
|
||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||
timeS2=time.time()
|
||
mixFunction = segPar['mixFunction']['function']
|
||
|
||
p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] )
|
||
|
||
timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post )
|
||
|
||
else:
|
||
timeInfos_seg_post = ' '
|
||
#print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) )
|
||
'''
|
||
time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
|
||
|
||
if sort_tracker:
|
||
#在这里增加设置调用追踪器的频率
|
||
#..................USE TRACK FUNCTION....................
|
||
#pass an empty array to sort
|
||
dets_to_sort = np.empty((0,7), dtype=np.float32)
|
||
|
||
# NOTE: We send in detected object class too
|
||
#for detclass,x1,y1,x2,y2,conf in p_result[2]:
|
||
for x1,y1,x2,y2,conf, detclass in p_result[2]:
|
||
#print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
|
||
dets_to_sort = np.vstack((dets_to_sort,
|
||
np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
|
||
|
||
# Run SORT
|
||
tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
|
||
tracks =sort_tracker.getTrackers()
|
||
p_result.append(tracked_dets) ###index=4
|
||
p_result.append(tracks) ###index=5
|
||
|
||
return p_result,time_info+timeOut+timeInfos_seg_post
|
||
def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None):
|
||
'''
|
||
输入:
|
||
imgarray_list--图像列表
|
||
iframe_list -- 帧号列表
|
||
modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
|
||
processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
|
||
sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
|
||
trackPar--跟踪参数,关键字包括:det_cnt,windowsize
|
||
segPar--None,分割模型相关参数。如果用不到,则为None
|
||
输入:retResults,timeInfos
|
||
retResults:list
|
||
retResults[0]--imgarray_list
|
||
retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
|
||
retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
|
||
'''
|
||
|
||
det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
|
||
trackers_dic={}
|
||
index_list = list(range( 0, len(iframe_list) ,det_cnt ));
|
||
if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
|
||
index_list.append( len(iframe_list) - 1 )
|
||
|
||
if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
|
||
retResults = []
|
||
p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar )
|
||
##下面4行内容只是为了保持格式一致
|
||
detArray = np.array(p_result[2])
|
||
#print('##line371:',detArray)
|
||
if len(p_result[2])==0:res=[]
|
||
else:
|
||
cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
|
||
|
||
#detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
|
||
detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
|
||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
|
||
retResults=[imgarray_list,detArray,res ]
|
||
#print('##line380:',retResults[2])
|
||
return retResults,timeOut
|
||
|
||
else:
|
||
t0 = time.time()
|
||
timeInfos_track=''
|
||
for iframe_index, index_frame in enumerate(index_list):
|
||
p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar )
|
||
timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
|
||
|
||
for tracker in p_result[5]:
|
||
trackers_dic[tracker.id]=deepcopy(tracker)
|
||
t1 = time.time()
|
||
|
||
track_det_result = np.empty((0,8))
|
||
for trackId in trackers_dic.keys():
|
||
tracker = trackers_dic[trackId]
|
||
bbox_history = np.array(tracker.bbox_history)
|
||
if len(bbox_history)<2: continue
|
||
###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
|
||
xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
|
||
whs = bbox_history[:,2:4] - bbox_history[:,0:2]
|
||
bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
|
||
|
||
arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
|
||
#frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
|
||
#frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
|
||
|
||
##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
|
||
#inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
|
||
|
||
##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
|
||
inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
|
||
new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
|
||
f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
|
||
move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
|
||
for im in range(4):
|
||
interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
|
||
|
||
cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
|
||
interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
|
||
track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
|
||
#print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
|
||
|
||
##将[xc,yc,w,h]转为[x0,y0,x1,y1]
|
||
x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
|
||
y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
|
||
track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
|
||
track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
|
||
detResults=[]
|
||
for iiframe in iframe_list:
|
||
boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
|
||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
|
||
#[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
|
||
#[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
|
||
detResults.append( res )
|
||
|
||
|
||
retResults=[imgarray_list,track_det_result,detResults ]
|
||
t2 = time.time()
|
||
timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%( \
|
||
timeHelper.deltaTime_MS(t1,t0), \
|
||
timeHelper.deltaTime_MS(t2,t1), \
|
||
timeInfos_track )
|
||
return retResults,timeInfos
|
||
def AI_det_track_N( im0s_in,modelList,postProcess,sort_tracker):
|
||
im0s,iframe=im0s_in[0],im0s_in[1]
|
||
dets = AI_process_N(im0s,modelList,postProcess)
|
||
p_result=[[],[],dets[0],[] ]
|
||
if sort_tracker:
|
||
#在这里增加设置调用追踪器的频率
|
||
#..................USE TRACK FUNCTION....................
|
||
#pass an empty array to sort
|
||
dets_to_sort = np.empty((0,7), dtype=np.float32)
|
||
|
||
# NOTE: We send in detected object class too
|
||
#for detclass,x1,y1,x2,y2,conf in p_result[2]:
|
||
for x1,y1,x2,y2,conf, detclass in p_result[2]:
|
||
#print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
|
||
dets_to_sort = np.vstack((dets_to_sort,
|
||
np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
|
||
|
||
# Run SORT
|
||
tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
|
||
tracks =sort_tracker.getTrackers()
|
||
p_result.append(tracked_dets) ###index=4
|
||
p_result.append(tracks) ###index=5
|
||
|
||
return p_result,dets[1]
|
||
def get_tracker_cls(boxes,scId=4,clsId=5):
|
||
#正常来说一各跟踪链上是一个类别,但是有时目标框检测错误,导致有的跟踪链上有多个类别
|
||
#为此,根据跟踪链上每一个类别对应的所有框的置信度之和,作为这个跟踪链上目标的类别
|
||
#输入boxes--跟踪是保留的box_history,[[xc,yc,width,height,score,class,iframe],[...],[...]]
|
||
## scId=4,score所在的序号; clsId=5;类别所在的序号
|
||
#输出类别
|
||
##这个跟踪链上目标的类别
|
||
ids = list(set(boxes[:,clsId].tolist()))
|
||
scores = [np.sum( boxes[:,scId] [ boxes[:,clsId]==x ] ) for x in ids]
|
||
maxScoreId = scores.index(np.max(scores))
|
||
return int(ids[maxScoreId])
|
||
|
||
def AI_det_track_batch_N(imgarray_list, iframe_list ,modelList,postProcess,sort_tracker,trackPar):
|
||
'''
|
||
输入:
|
||
imgarray_list--图像列表
|
||
iframe_list -- 帧号列表
|
||
modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
|
||
processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
|
||
sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
|
||
trackPar--跟踪参数,关键字包括:det_cnt,windowsize
|
||
segPar--None,分割模型相关参数。如果用不到,则为None
|
||
输入:retResults,timeInfos
|
||
retResults:list
|
||
retResults[0]--imgarray_list
|
||
retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
|
||
retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
|
||
'''
|
||
|
||
det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
|
||
trackers_dic={}
|
||
index_list = list(range( 0, len(iframe_list) ,det_cnt ));
|
||
if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
|
||
index_list.append( len(iframe_list) - 1 )
|
||
|
||
if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
|
||
retResults = []
|
||
p_result,timeOut = AI_det_track_N( [ [imgarray_list[0]] ,iframe_list[0] ],modelList,postProcess,None )
|
||
##下面4行内容只是为了保持格式一致
|
||
detArray = np.array(p_result[2])
|
||
if len(p_result[2])==0:res=[]
|
||
else:
|
||
cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
|
||
|
||
#detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
|
||
detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
|
||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
|
||
retResults=[imgarray_list,detArray,res ]
|
||
#print('##line380:',retResults[2])
|
||
return retResults,timeOut
|
||
|
||
else:
|
||
t0 = time.time()
|
||
timeInfos_track=''
|
||
for iframe_index, index_frame in enumerate(index_list):
|
||
p_result,timeOut = AI_det_track_N( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelList,postProcess,sort_tracker )
|
||
timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
|
||
|
||
for tracker in p_result[5]:
|
||
trackers_dic[tracker.id]=deepcopy(tracker)
|
||
t1 = time.time()
|
||
|
||
track_det_result = np.empty((0,8))
|
||
for trackId in trackers_dic.keys():
|
||
tracker = trackers_dic[trackId]
|
||
bbox_history = np.array(tracker.bbox_history).copy()
|
||
if len(bbox_history)<2: continue
|
||
###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
|
||
xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
|
||
whs = bbox_history[:,2:4] - bbox_history[:,0:2]
|
||
bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
|
||
|
||
#2023.11.17添加的。目的是修正跟踪链上所有的框的类别一样
|
||
chainClsId = get_tracker_cls(bbox_history,scId=4,clsId=5)
|
||
bbox_history[:,5] = chainClsId
|
||
|
||
arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
|
||
#frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
|
||
#frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
|
||
|
||
##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
|
||
#inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
|
||
|
||
##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
|
||
inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
|
||
new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
|
||
f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
|
||
move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
|
||
for im in range(4):
|
||
interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
|
||
|
||
cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
|
||
interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
|
||
track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
|
||
#print('#####line116:',trackId,'----------',interpolation_x0s.shape,track_det_result.shape,bbox_history ,'-----')
|
||
|
||
##将[xc,yc,w,h]转为[x0,y0,x1,y1]
|
||
x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
|
||
y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
|
||
track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
|
||
track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
|
||
detResults=[]
|
||
for iiframe in iframe_list:
|
||
boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
|
||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
|
||
#[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
|
||
#[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
|
||
detResults.append( res )
|
||
|
||
|
||
retResults=[imgarray_list,track_det_result,detResults ]
|
||
t2 = time.time()
|
||
timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%( \
|
||
timeHelper.deltaTime_MS(t1,t0), \
|
||
timeHelper.deltaTime_MS(t2,t1), \
|
||
timeInfos_track )
|
||
return retResults,timeInfos
|
||
|
||
def ocr_process(pars):
|
||
|
||
img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
|
||
time1 = time.time()
|
||
img_tensor = AlignCollate_normal([ Image.fromarray(img_patch,'L') ])
|
||
img_input = img_tensor.to('cuda:0')
|
||
time2 = time.time()
|
||
|
||
preds,trtstr=OcrTrtForward(engine,[img_input],context)
|
||
time3 = time.time()
|
||
|
||
batch_size = preds.size(0)
|
||
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
|
||
|
||
######## filter ignore_char, rebalance
|
||
preds_prob = F.softmax(preds, dim=2)
|
||
preds_prob = preds_prob.cpu().detach().numpy()
|
||
pred_norm = preds_prob.sum(axis=2)
|
||
preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
|
||
preds_prob = torch.from_numpy(preds_prob).float().to(device)
|
||
_, preds_index = preds_prob.max(2)
|
||
preds_index = preds_index.view(-1)
|
||
time4 = time.time()
|
||
preds_str = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
|
||
time5 = time.time()
|
||
|
||
info_str= ('pre-process:%.2f TRTforward:%.2f (%s) postProcess:%2.f decoder:%.2f, Total:%.2f , pred:%s'%(\
|
||
timeHelper.deltaTime_MS(time2,time1 ), \
|
||
timeHelper.deltaTime_MS(time3,time2 ),trtstr, \
|
||
timeHelper.deltaTime_MS(time4,time3 ), \
|
||
timeHelper.deltaTime_MS(time5,time4 ), \
|
||
timeHelper.deltaTime_MS(time5,time1 ), preds_str ) )
|
||
return preds_str,info_str |