AIdemo2/demo.py

1595 lines
91 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import sys, yaml
from easydict import EasyDict as edict
from concurrent.futures import ThreadPoolExecutor
sys.path.extend(['..','../AIlib2' ])
from AI import AI_process,AI_process_forest,get_postProcess_para,get_postProcess_para_dic,ocr_process,AI_det_track,AI_det_track_batch,get_images_videos,AI_process_N,default_mix
from stdc import stdcModel
from yolov5 import yolov5Model
import cv2,os,time
from segutils.segmodel import SegModel
from segutils.trafficUtils import tracfficAccidentMixFunction
from models.experimental import attempt_load
from utils.torch_utils import select_device
from utils.plots import plot_one_box_PIL
from utilsK.queRiver import get_labelnames,get_label_arrays,save_problem_images,riverDetSegMixProcess,draw_painting_joint
from utilsK.drownUtils import mixDrowing_water_postprocess
from ocrUtils.ocrUtils import CTCLabelConverter,AlignCollate
from trackUtils.sort import Sort,track_draw_boxAndTrace,track_draw_trace_boxes,moving_average_wang,drawBoxTraceSimplied
from obbUtils.load_obb_model import load_model_decoder_OBB
from obbUtils.shipUtils import OBB_infer,draw_obb
import numpy as np
import torch,glob
import tensorrt as trt
from utilsK.masterUtils import get_needed_objectsIndex
from utilsK.noParkingUtils import mixNoParking_road_postprocess
from copy import deepcopy
from scipy import interpolate
#import warnings
#warnings.filterwarnings("error")
import inspect
import psutil
def check_cpu(current_line_number):
cpu_use = psutil.cpu_percent()
cpu_mem = psutil.virtual_memory().percent
cpu_swap = psutil.swap_memory().percent
mem = psutil.virtual_memory()
# 已经使用的内存量(包括缓存和缓冲区)
used_mem = mem.used/(1024**2)
#current_line_number = inspect.currentframe().f_lineno
print( '---line:{} ,CPUe使用率:{}, 内存使用:{},{:4.0f}M, SWAP内存使用率:{}'.format(current_line_number,cpu_use, cpu_mem,used_mem,cpu_swap) )
rainbows=[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
def view_bar(num, total,time1,prefix='prefix'):
rate = num / total
time_n=time.time()
rate_num = int(rate * 30)
rate_nums = np.round(rate * 100)
r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 )
sys.stdout.write(r)
sys.stdout.flush()
'''
多线程
'''
def drawAllBox(preds,imgDraw,label_arraylist,rainbows,font):
for box in preds:
#cls,conf,xyxy = box[0],box[5], box[1:5]
#print('#'*20,'line47',box)
cls,conf,xyxy = box[5],box[4], box[0:4] ##2023.08.03,修改了格式
#print('#####line46 demo.py:', cls,conf,xyxy, len(label_arraylist),len(rainbows) )
imgDraw = draw_painting_joint(xyxy,imgDraw,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftTop")
return imgDraw
def process_v1(frame):
#try:
time00 = time.time()
H,W,C = frame[0][0].shape
#frmess---- (im0s,model,segmodel,names,label_arraylist,rainbows,objectPar,digitFont,os.path.basename(imgpath),segPar,mode,postPar)
#p_result[1] = draw_painting_joint(xyxy,p_result[1],label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom")
p_result,timeOut = AI_process(frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],objectPar=frame[6],font=frame[7],segPar=frame[9],mode=frame[10],postPar=frame[11])
p_result[1] = drawAllBox(p_result[2],p_result[1],frame[4],frame[5],frame[7])
time11 = time.time()
image_array = p_result[1]
cv2.imwrite(os.path.join('images/results/',frame[8] ) ,image_array)
bname = frame[8].split('.')[0]
if frame[2]:
if len(p_result)==5:
image_mask = p_result[4]
if isinstance(image_mask,np.ndarray) and image_mask.shape[0]>0:
cv2.imwrite(os.path.join('images/results/',bname+'_mask.png' ) , (image_mask).astype(np.uint8))
boxes=p_result[2]
with open( os.path.join('images/results/',bname+'.txt' ),'w' ) as fp:
for box in boxes:
box_str=[str(x) for x in box]
out_str=','.join(box_str)+'\n'
fp.write(out_str)
time22 = time.time()
print('%s,%d*%d,AI-process: %.1f,image save:%.1f , %s'%(frame[8],H,W, (time11 - time00) * 1000.0, (time22-time11)*1000.0,timeOut))
return 'success'
#except Exception as e:
# return 'failed:'+str(e)
def get_video_para(cap):
fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5)
framecnt=int(cap.get(7)+0.5)
return fps,width,height,framecnt
def process_video(video,par0,mode='detSeg'):
cap=cv2.VideoCapture(video)
if not cap.isOpened():
print('#####error url:',video)
return False
#check_cpu(inspect.currentframe().f_lineno)
bname=os.path.basename(video).split('.')[0]
fps = int(cap.get(cv2.CAP_PROP_FPS)+0.5)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )+0.5)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)+0.5)
framecnt=int(cap.get(7)+0.5)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
save_path_AI = os.path.join(par0['outpth'],os.path.basename(video))
problem_image_dir= os.path.join( par0['outpth'], 'probleImages' )
os.makedirs(problem_image_dir,exist_ok=True)
vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height))
num=0
iframe=0;post_results=[];fpsample=30*10
#check_cpu(inspect.currentframe().f_lineno)
imgarray_list = []; iframe_list = []
#patch_cnt = par0['trackPar']['patchCnt']
##windowsize 对逐帧插值后的结果做平滑windowsize为平滑的长度,没隔det_cnt帧做一次跟踪。
trackPar={'det_cnt':10,'windowsize':29 }
##track_det_result_update= np.empty((0,8)) ###每100帧跑出来的结果放在track_det_result_update只保留当前100帧里有的tracker Id.
#while cap.isOpened():
while True:
ret, imgarray = cap.read() #读取摄像头画面
if not ret: break
iframe +=1
if not ret:break
if mode=='detSeg':
p_result,timeOut = AI_process([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],objectPar=par0['objectPar'],font=par0['digitFont'],segPar=par0['segPar'])
else:
p_result,timeOut = AI_process_forest([imgarray],par0['model'],par0['segmodel'],par0['names'],par0['label_arraylist'],par0['rainbows'],par0['half'],par0['device'],par0['conf_thres'], par0['iou_thres'],par0['allowedList'],font=par0['digitFont'],trtFlag_det=par0['trtFlag_det'])
p_result[1] = drawAllBox(p_result[2],p_result[1],par0['label_arraylist'],par0['rainbows'],par0['digitFont'])
#check_cpu(inspect.currentframe().f_lineno)
if mode != 'track':
image_array = p_result[1];num+=1
ret = vid_writer_AI.write(image_array)
view_bar(num, framecnt,time.time(),prefix=os.path.basename(video))
##每隔 fpsample帧处理一次如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
parImage=save_problem_images(post_results,iframe,par0['names'],streamName=bname,outImaDir=problem_image_dir,imageTxtFile=False)
post_results=[]
if len(p_result[2] )>0:
post_results.append(p_result)
#imgarray.release()
vid_writer_AI.release();
def detSeg_demo(opt):
if opt['business'] == 'river':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/river/labelnames.json", ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示,输出
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':1,###分割模型结果需要保留的等值线数目
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
'postFile': '../AIlib2/conf/river/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':3},###显示框、线、数字设置
'testImgPath':'/mnt/thsw2/DSP2/videos/river/',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'river2':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/river2/labelnames.json", ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示,输出
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':1,###分割模型结果需要保留的等值线数目
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
'postFile': '../AIlib2/conf/river2/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':3},###显示框、线、数字设置
'testImgPath':'images/river2/',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'riverT':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/riverT/labelnames.json", ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6,7,8,9] ],###控制哪些检测类别显示,输出
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':1,###分割模型结果需要保留的等值线数目
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True,#分割模型预处理参数
'mixFunction':{'function':riverDetSegMixProcess,'pars':{'slopeIndex':[1,3,4,7], 'riverIou':0.1}} #分割和检测混合处理的函数
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
'postFile': '../AIlib2/conf/riverT/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':3},###显示框、线、数字设置
'testImgPath':'images/riverT/',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'highWay2':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'seg_nclass':3,###分割模型类别数目默认2类
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'segPar':{
'modelSize':(640,360),
#'modelSize':(1920,1080),
'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
'mixFunction':{'function':tracfficAccidentMixFunction,
'pars':{ 'RoadArea': 16000, 'roadVehicleAngle': 15, 'speedRoadVehicleAngleMax': 75, 'roundness': 1.0, 'cls': 9, 'vehicleFactor': 0.1, 'confThres':0.25,'roadIou':0.6,'radius': 50 ,'vehicleFlag':False,'distanceFlag': False}
}
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
#'Segweights' : "../weights/%s/AIlib2/%s/stdc_1080X1920_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
#'Segweights' :'/mnt/thsw2/DSP2/weights/highWay2/stdc_360X640.pth',
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
#'testImgPath':'images/highWayTest/',###测试图像的位置
'testImgPath':'images/tt',
#'testImgPath':'/mnt/thsw2/DSP2/highWay2/videos/',
'testOutPath':'images/results/',###输出测试图像位置
}
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
if opt['business'] == 'drowning':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'Detweights':"/mnt/thsw2/DSP2/weights/drowning/yolov5.pt",
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
'mixFunction':{'function':mixDrowing_water_postprocess,
'pars':{ }
}
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
#'Segweights' : "/mnt/thsw2/DSP2/weights/drowning/stdc_360X640_2080Ti_fp16.engine",
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
#'testImgPath':'/mnt/thsw2/DSP2/videos/drowning/',###测试图像的位置
'testImgPath':'images/drowning/',
'testOutPath':'images/results/',###输出测试图像位置
}
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
if opt['business'] == 'noParking':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'seg_nclass':4,###分割模型类别数目默认2类
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'segPar':{'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,###分割模型预处理参数
'mixFunction':{'function':mixNoParking_road_postprocess,
'pars':{ 'roundness': 0.3, 'cls': 9, 'laneArea': 10, 'laneAngleCha': 5 ,'RoadArea': 16000,'fitOrder':2}
}
},
'Segweights' : "../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
'testImgPath':'images/noParking/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
par['segPar']['mixFunction']['pars']['modelSize'] = par['segPar']['modelSize']
if opt['business'] == 'illParking':
from utilsK.illParkingUtils import illParking_postprocess
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'seg_nclass':4,###分割模型类别数目默认2类
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'segPar':{
'mixFunction':{'function':illParking_postprocess,
'pars':{ }
}
},
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
'testImgPath':'images/cityMangement',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'cityMangement2':
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'seg_nclass':4,###分割模型类别数目默认2类
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'segPar':{ 'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
'mixFunction':{'function':dmpr_yolo,
'pars':{'carCls':0 ,'illCls':3,'scaleRatio':0.5,'border':80}
}
},
'Segweights':"../weights/%s/AIlib2/%s/dmpr_%s.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'Segweights':"../AIlib2/conf/cityMangement2/dmpr.pth",###检测模型路径
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
'testImgPath':'images/cityMangement2/',
'testOutPath':'images/results/',###输出测试图像位置
}
if par['Segweights']:
par['trtFlag_seg']=True if par['Segweights'].endswith('.engine') else False
else:
par['trtFlag_seg']=False
par['trtFlag_det']=True if par['Detweights'].endswith('.engine') else False
mode = par['mode'] if 'mode' in par.keys() else 'others'
postPar = par['postPar'] if 'postPar' in par.keys() else None
device_=par['device']
labelnames = par['labelnames'] ##对应类别表
max_workers=par['max_workers'];
trtFlag_det=par['trtFlag_det'];trtFlag_seg=par['trtFlag_seg'];segRegionCnt=par['segRegionCnt']
device = select_device(device_)
names=get_labelnames(labelnames)
half = device.type != 'cpu' # half precision only supported on CUDA
if trtFlag_det:
Detweights = par['Detweights']##升级后的检测模型
logger = trt.Logger(trt.Logger.ERROR)
with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件返回ICudaEngine对象
print('############locad det model trtsuccess:',Detweights)
else:
Detweights = par['Detweights']
model = attempt_load(Detweights, map_location=device) # load FP32 model
print('############locad det model pth success:',Detweights)
if half: model.half()
#check_cpu(inspect.currentframe().f_lineno)
par['segPar']['seg_nclass'] = par['seg_nclass']
segPar=par['segPar']
if par['Segweights']:
if opt['business'] == 'cityMangement2':
segmodel = DMPRModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel = stdcModel(weights=par['Segweights'], par = par['segPar'])
else:
segmodel= None
print('############None seg model is loaded###########:' )
#check_cpu(inspect.currentframe().f_lineno)
postFile= par['postFile']
digitFont= par['digitFont']
#conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
detPostPar = get_postProcess_para_dic(postFile)
conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
if 'ovlap_thres_crossCategory' in detPostPar.keys(): ovlap_thres_crossCategory=detPostPar['ovlap_thres_crossCategory']
else:ovlap_thres_crossCategory = None
if 'score_byClass' in detPostPar.keys(): score_byClass=detPostPar['score_byClass']
else: score_byClass = None
#check_cpu(inspect.currentframe().f_lineno)
####模型选择参数用如下:
mode_paras=par['detModelpara']
allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
#allowedList=[0,1,2,3]
##加载模型,准备好显示字符
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
#check_cpu(inspect.currentframe().f_lineno)
##图像测试
#impth = 'images/slope/'
impth = par['testImgPath']
outpth = par['testOutPath']
imgpaths=[]###获取文件里所有的图像
for postfix in ['.jpg','.JPG','.PNG','.png']:
imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
videopaths=[]###获取文件里所有的视频
for postfix in ['.MP4','.mp4','.avi']:
videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
#check_cpu(inspect.currentframe().f_lineno)
###先处理图像
frames=[]
for imgpath in imgpaths:
im0s=[cv2.imread(imgpath)]
objectPar={ 'half':half,'device':device,'conf_thres':conf_thres,'ovlap_thres_crossCategory':ovlap_thres_crossCategory,'iou_thres':iou_thres,'allowedList':allowedList,'segRegionCnt':segRegionCnt, 'trtFlag_det':trtFlag_det,'trtFlag_seg':trtFlag_seg ,'score_byClass':score_byClass}
#p_result[1] = draw_painting_joint(xyxy,p_result[1],label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom")
frame=(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar,digitFont,os.path.basename(imgpath),segPar,mode,postPar)
frames.append(frame)
t1=time.time()
if max_workers==1:
for i in range(len(imgpaths)):
print('-'*20,imgpaths[i],'-'*20)
t5=time.time()
process_v1(frames[i])
t6=time.time()
#print('#######%s, ms:%.1f , accumetate time:%.1f, avage:%1.f '%(os.path.basename(imgpaths[i]), (t6-t5)*1000.0,(t6-t1)*1000.0, (t6-t1)*1000.0/(i+1)))
else:
with ThreadPoolExecutor(max_workers=max_workers) as t:
for result in t.map(process_v1, frames):
#print(result)
t=result
t2=time.time()
if len(imgpaths)>0:
print('All %d images time:%.1f ms ,each:%.1f ms, with %d threads'%(len(imgpaths),(t2-t1)*1000, (t2-t1)*1000.0/len(imgpaths) , max_workers) )
#check_cpu(inspect.currentframe().f_lineno)
objectPar={ 'half':half,'device':device,'conf_thres':conf_thres,'iou_thres':iou_thres,'allowedList':allowedList,'segRegionCnt':segRegionCnt, 'trtFlag_det':trtFlag_det,'trtFlag_seg':trtFlag_seg }
par0={ 'model':model,'segmodel':segmodel,'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,
'objectPar':objectPar,'digitFont':digitFont,'segPar':segPar,'outpth':outpth
}
###如果是视频文件
for video in videopaths:
process_video(video,par0)
print(' ')
def detSeg_demo2(opt):
if opt['business'] == 'cityMangement3':
from DMPR import DMPRModel
from DMPRUtils.jointUtil import dmpr_yolo_stdc
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{
'function':dmpr_yolo_stdc,
'pars':{'carCls':0 ,'illCls':5,'scaleRatio':0.5,'border':80,
#车辆","垃圾","商贩","裸土","占道经营","违停"--->
#"车辆","垃圾","商贩","违停","占道经营","裸土"
'classReindex':{ 0:0,1:1,2:2,3:5,4:4,5:3 }
}
},
'models':[
{
# 'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
#"weight":'/mnt/thsw2/DSP2/cityMangement3/weights/yolov5.pt',
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.55,'iou_thres':0.45,'allowedList':[0,1,2,3,4,5],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.5 ,"4":0.5,"5":0.5 } }
},
{
#'weight':"../weights/%s/AIlib2/%s/dmpr_%s.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/dmpr.pth'%(opt['business'] ),
#'weight':'/mnt/thsw2/DSP2/cityMangement3/weights/dmpr_20231202.pth',
'weight':'/mnt/thsw2/DSP2/cityMangement3/weights/dmpr.pth',
'par':{
'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.1, 'dmprimg_size':640,
'name':'dmpr'
},
'model':DMPRModel,
'name':'dmpr'
},
{
#'weight':"../weights/%s/AIlib2/%s/dmpr_%s.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'weight':'../weights/pth/AIlib2/%s/stdc_360X640.pth'%(opt['business'] ),
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
#'testImgPath':'images/%s/'%(opt['business']),
#'testImgPath':'images/tt',
'testImgPath':'/mnt/thsw2/DSP2/cityMangement3/images',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'highWaySpill':
from utilsK.spillUtils import mixSpillage_postprocess
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{
'function':mixSpillage_postprocess,
'pars':{}
},
'models':[
{
# 'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
"weight":'../weights/pth/AIlib2/%s/yolov5.pt'%( opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.55,'iou_thres':0.45,'allowedList':[0,1,2,3,4,5],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.5 ,"4":0.5,"5":0.5 } }
},
{
#'weight':'../weights/pth/AIlib2/highWay2/stdc_360X640.pth',
'weight':'../weights/%s/AIlib2/highWay2/stdc_360X640_%s_fp16.engine'%(opt['gpu'],opt['gpu']),
'par':{
'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':3},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'segRegionCnt':2,###分割模型结果需要保留的等值线数目
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
#'testImgPath':'images/%s/'%(opt['business']),
#'testImgPath':'images/tt',
'testImgPath':'/mnt/thsw2/DSP2/%s/images'%(opt['business']),
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'forest2':
from utilsK.crowdGather import gather_post_process
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/forest2/labelnames.json", ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{'function':default_mix,'pars':{}},
'models':
[
{
#'weight':"../weights/%s/AIlib2/forest2/yolov5_%s_fp16.engine"%(opt['gpu'], opt['gpu'] ),###检测模型路径
'weight':'../weights/pth/AIlib2/forestCrowd/yolov5.pt',###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],
'postFile': '../AIlib2/conf/forest/para.json',###后处理参数文件
'txtFontSize':50,###文本字符的大小
'digitFont': { 'line_thickness':1,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'waterLineWidth':1},###显示框、线设置
#'testImgPath':'../AIdemo2/images/tt/',###测试图像的位置
#'testImgPath':'images/smogfire',
'testImgPath':'/mnt/thsw2/DSP2/forest2/videos',
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_cloud_FP',
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_visdrone',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'forestCrowd':
from utilsK.crowdGather import gather_post_process
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/forestCrowd/labelnames.json", ###检测类别对照表
'max_workers':1, ###并行线程数
#'pedestrianId':行人的ID,'crowdThreshold':判断是否是人群时人的数量,'gatherId':人群的ID,'distancePersonScale':人与人之间的距离/人的身高
'postProcess':{'function':gather_post_process,'pars':{'pedestrianId':2,'crowdThreshold':4,'gatherId':5,'distancePersonScale':2.0}},
'models':
[
{
'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/forestCrowd/yolov5.pt',###检测模型路径
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{ "0":0.25,"1":0.25,"2":0.6,"3":0.6,'4':0.6 ,'5':0.6 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],
'postFile': '../AIlib2/conf/forest/para.json',###后处理参数文件
'txtFontSize':10,###文本字符的大小
'digitFont': { 'line_thickness':1,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'waterLineWidth':1},###显示框、线设置
'testImgPath':'../AIdemo2/images/tt/',###测试图像的位置
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_visdrone',
'testOutPath':'images/results/',###输出测试图像位置
}
##智慧工地-- "工人","塔式起重机","悬臂","起重机","压路机","推土机","挖掘机","卡车","装载机","泵车","混凝土搅拌车","打桩","其他车辆"
if opt['business'] == 'smartSite':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{'function':default_mix,'pars':{}},
'models':
[
{
'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'txtFontSize':50,###文本字符的大小
'digitFont': { 'line_thickness':1,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'waterLineWidth':1},###显示框、线设置
#'testImgPath':'../AIdemo2/images/tt/',###测试图像的位置
'testImgPath':'images/%s'%(opt['business'] ),
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_cloud_FP',
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_visdrone',
'testOutPath':'images/results/',###输出测试图像位置
}
##烟火检测-- 烟花
if opt['business'] == 'firework':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{'function':default_mix,'pars':{}},
'models':
[
{
'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in range(20) ],
'postFile': '../AIlib2/conf/forest/para.json',###后处理参数文件
'txtFontSize':50,###文本字符的大小
'digitFont': { 'line_thickness':1,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'waterLineWidth':1},###显示框、线设置
#'testImgPath':'../AIdemo2/images/tt/',###测试图像的位置
'testImgPath':'images/%s'%(opt['business'] ),
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_cloud_FP',
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_visdrone',
'testOutPath':'images/results/',###输出测试图像位置
}
###垃圾检测--"建筑垃圾","白色垃圾","其他垃圾"
if opt['business'] == 'rubbish':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{'function':default_mix,'pars':{}},
'models':
[
{
'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.25,"1":0.3,"2":0.3,"3":0.3 } },
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in range(20) ],
'postFile': '../AIlib2/conf/forest/para.json',###后处理参数文件
'txtFontSize':50,###文本字符的大小
'digitFont': { 'line_thickness':1,'boxLine_thickness':1, 'fontSize':0.5,'waterLineColor':(0,255,255),'waterLineWidth':1},###显示框、线设置
#'testImgPath':'../AIdemo2/images/tt/',###测试图像的位置
#'testImgPath':'images/%s'%(opt['business'] ),
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/images_cloud_FP',
'testImgPath':'/mnt/thsw2/DSP2/rubbish/images_TaiZhouFeedback',
'testOutPath':'images/results/',###输出测试图像位置
}
#第一步加载模型
modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
print(' load moder over')
#准备画图字体
labelnames = par['labelnames'] ##对应类别表
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
#图像测试
imgpaths,videopaths = get_images_videos( par['testImgPath'])
#开始测试
for imgUrl in imgpaths:
img = cv2.imread(imgUrl);bname = os.path.basename(imgUrl)
ret,timeInfos = AI_process_N([img],modelList,par['postProcess'])
print(ret)
timeInfos=bname+':'+timeInfos
print(timeInfos )
if len(ret)>0:
img0 = drawAllBox(ret,img,label_arraylist,rainbows,par['digitFont'])
else: img0= img
cv2.imwrite(os.path.join('images/results/',bname ) ,img0)
def det_demo(business ):
####森林巡检的参数
if opt['business'] == 'forest':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/forest/labelnames.json", ###检测类别对照表
'gpuname':'3090',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':0,###分割模型结果需要保留的等值线数目
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/forest/para.json',###后处理参数文件
'txtFontSize':80,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/forest/',###测试图像的位置
#'testImgPath':'/mnt/thsw2/DSP2/weights/forest2/',
'testOutPath':'images/results/',###输出测试图像位置
}
###车辆巡检参数
if opt['business'] == 'vehicle':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/vehicle/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':0,###分割模型结果需要保留的等值线数目
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/vehicle/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/vehicle/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###行人检测模型
if opt['business'] == 'pedestrian':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/pedestrian/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###分割模型类别数目默认2类
'segRegionCnt':0,###分割模型结果需要保留的等值线数目
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/pedestrian/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/pedestrian/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###烟雾火焰检测模型
if opt['business'] == 'smogfire':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/smogfire/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':False,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
#'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'Detweights':"../weights/pth/AIlib2/%s/yolov5.pt"%(opt['business']),
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/smogfire/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/smogfire/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###钓鱼游泳检测
if opt['business'] == 'AnglerSwimmer':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/AnglerSwimmer/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/AnglerSwimmer/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/AnglerSwimmer/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###航道应急,做落水人员检测, channelEmergency
if opt['business'] == 'channelEmergency':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/channelEmergency/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/channelEmergency/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/channelEmergency/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###乡村路违法种植
if opt['business'] == 'countryRoad':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/countryRoad/labelnames.json", ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/countryRoad/para.json',###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../AIdemo2/images/countryRoad/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###河道上大型船只
if opt['business'] == 'ship':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'gpuname':'2080T',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business']),###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'../../../data/XunHe/shipData/',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###城管项目,检测城市垃圾和车辆
if opt['business'] == 'cityMangement':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'gpuname':'2080Ti',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business']),###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'images/tmp',###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
###城管项目,检测道路情况,输入类别为五个:"护栏","交通标志","非交通标志","施工","施工“(第4第5类别合并名称相同)
###实际模型检测输出的类别为:"护栏","交通标志","非交通标志","锥桶","水马"
if opt['business'] == 'cityRoad':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'gpuname':'2080Ti',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business']),###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'pothole':
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'gpuname':'2080Ti',###显卡名称
'max_workers':1, ###并行线程数
'trtFlag_det':True,###检测模型是否采用TRT
'trtFlag_seg':False,###分割模型是否采用TRT
'Detweights':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,4,5,6] ],###控制哪些检测类别显示、输出
'slopeIndex':[],###岸坡类别(或者其它业务里的类别),不与河道(分割的前景区域)计算交并比,即不论是否在河道内都显示。
'seg_nclass':2,###没有分割模型,此处不用
'segRegionCnt':0,###没有分割模型,此处不用
'segPar':None,###分割模型预处理参数
'Segweights' : None,###分割模型权重位置
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business']),###后处理参数文件
'txtFontSize':40,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'waterLineWidth':3},###显示框、线设置
'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
#segRegionCnt=par['segRegionCnt']
trtFlag_seg = par['trtFlag_seg'];segPar=par['segPar']
##使用森林,道路模型,business 控制['forest','road']
##预先设置的参数
gpuname=par['gpuname']#如果用trt就需要此参数只能是"3090" "2080Ti"
device_=par['device'] ##选定模型,可选 cpu,'0','1'
device = select_device(device_)
half = device.type != 'cpu' # half precision only supported on CUDA
trtFlag_det=par['trtFlag_det'] ###是否采用TRT模型加速
##以下参数目前不可改
imageW=1536 ####道路模型
digitFont= par['digitFont']
if trtFlag_det:
Detweights=par['Detweights']
logger = trt.Logger(trt.Logger.ERROR)
with open(Detweights, "rb") as f, trt.Runtime(logger) as runtime:
model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件返回ICudaEngine对象
print('####load TRT model :%s'%(Detweights))
else:
Detweights=par['Detweights']
model = attempt_load(Detweights, map_location=device) # load FP32 model
if half: model.half()
labelnames = par['labelnames']
postFile= par['postFile']
print( Detweights,labelnames )
#conf_thres,iou_thres,classes,rainbows=get_postProcess_para(postFile)
detPostPar = get_postProcess_para_dic(postFile)
conf_thres,iou_thres,classes,rainbows = detPostPar["conf_thres"],detPostPar["iou_thres"],detPostPar["classes"],detPostPar["rainbows"]
if 'ovlap_thres_crossCategory' in detPostPar.keys(): ovlap_thres_crossCategory=detPostPar['ovlap_thres_crossCategory']
else:ovlap_thres_crossCategory = None
####模型选择参数用如下:
mode_paras=par['detModelpara']
allowedList,allowedList_string=get_needed_objectsIndex(mode_paras)
slopeIndex = par['slopeIndex']
##只加载检测模型,准备好显示字符
names=get_labelnames(labelnames)
#imageW=4915;###默认是1920在森林巡检的高清图像中是4920
outfontsize=int(imageW/1920*40);###
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
segmodel = None
##图像测试
#url='images/examples/20220624_响水河_12300_1621.jpg'
impth = par['testImgPath']
outpth = par['testOutPath']
imgpaths=[]###获取文件里所有的图像
for postfix in ['.jpg','.JPG','.PNG','.png']:
imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
videopaths=[]###获取文件里所有的视频
for postfix in ['.MP4','.mp4','.avi']:
videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
imgpaths.sort()
for i in range(len(imgpaths)):
#for i in range(2):
#imgpath = os.path.join(impth, folders[i])
imgpath = imgpaths[i]
bname = os.path.basename(imgpath )
im0s=[cv2.imread(imgpath)]
time00 = time.time()
#使用不同的函数。每一个领域采用一个函数
p_result,timeOut = AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,font=digitFont,trtFlag_det=trtFlag_det,SecNms=ovlap_thres_crossCategory)
time11 = time.time()
image_array = p_result[1]
cv2.imwrite( os.path.join( outpth,bname ) ,image_array )
print('----image:%s, process:%.1f ,save:%.1f, %s'%(bname,(time11-time00) * 1000, (time.time() - time11) * 1000,timeOut ) )
##process video
print('##begin to process videos, total %d videos'%( len(videopaths)))
for i,video in enumerate(videopaths):
print('process video%d :%s '%(i,video))
par0={'model':model,'segmodel':segmodel, 'names':names,'label_arraylist':label_arraylist,'rainbows':rainbows,'outpth':par['testOutPath'],
'half':half,'device':device,'conf_thres':conf_thres, 'iou_thres':iou_thres,'allowedList':allowedList,'digitFont':digitFont,'trtFlag_det': trtFlag_det
}
process_video(video,par0,mode='det')
def OCR_demo2(opt):
from ocr import ocrModel
if opt['business'] == 'ocr_ch':
par={
#weights = '/home/thsw2/WJ/src/OCR/benchmarking-chinese-text-recognition/weights/scene_base.pth'
'weights' : '/mnt/thsw2/DSP2/weights/ocr2/crnn_ch_2080Ti_fp16_192X32.engine',
'modelpar':{
'char_file':'/home/thsw2/WJ/src/OCR/benchmarking-chinese-text-recognition/src/models/CRNN/data/benchmark.txt',
'mode':'ch',
'nc':3,
'imgH':32,
'imgW':192,
'hidden':256,
'mean':[0.5,0.5,0.5],
'std':[0.5,0.5,0.5],
'dynamic':False
},
'inputDir' : '/home/thsw2/WJ/src/OCR/shipNames'
}
if opt['business'] == 'ocr_en':
par={
'weights' : '/home/thsw2/WJ/src/DSP2/weights/pth/AIlib2/ocr2/crnn_448X32.pth',
#'weights' : '/mnt/thsw2/DSP2/weights/ocr2/crnn_en_2080Ti_fp16_448X32.engine',
'modelpar':{
#'cfg':'../AIlib2/conf/OCR_Ch/360CC_config.yaml',
'char_file':'/home/thsw2/WJ/src/DSP2/AIlib2/conf/ocr2/chars2.txt',
'mode':'en',
'nc':1,
'imgH':32,
'imgW':448,
'hidden':256,
'mean':[0.588,0.588,0.588],
'std':[0.193,0.193,0.193 ],
'dynamic':True
},
'inputDir':'/home/thsw2/WJ/src/DSP2/AIdemo2/images/ocr_en'
}
model = ocrModel(weights=par['weights'],par=par['modelpar'] )
imgUrls = glob.glob('%s/*.jpg'%(par['inputDir']))
for imgUrl in imgUrls[0:]:
img = cv2.imread(imgUrl)
res_real,timeInfos = model.eval(img)
#res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real)))
print(res_real,os.path.basename(imgUrl),timeInfos )
def OBB_demo(opt):
###倾斜框OBB的ship目标检测
par={
'model_size':(608,608), #width,height
'K':100, #Maximum of objects'
'conf_thresh':0.18,##Confidence threshold, 0.1 for general evaluation
'device':"cuda:0",
'down_ratio':4,'num_classes':15,
#'weights':'../AIlib2/weights/conf/ship2/obb_608X608.engine',
'weights':'../weights/%s/AIlib2/%s/obb_608X608_%s_fp16.engine'%(opt['gpu'],opt['business'],opt['gpu']),
'dataset':'dota',
'test_dir': 'images/ship/',
'result_dir': 'images/results',
'half': False,
'mean':(0.5, 0.5, 0.5),
'std':(1, 1, 1),
'model_size':(608,608),##width,height
'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1},
'decoder':None,
'test_flag':True,
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'drawBox':False,#####是否画框
'digitWordFont': { 'line_thickness':2,'boxLine_thickness':1,'wordSize':40, 'fontSize':1.0,'label_location':'leftTop'},
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business'] ), ###检测类别对照表
}
####加载模型
model,decoder2=load_model_decoder_OBB(par)
par['decoder']=decoder2
names=get_labelnames(par['labelnames']);par['labelnames']=names
conf_thres,iou_thres,classes,rainbows=get_postProcess_para(par['postFile']);par['rainbows']=rainbows
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['digitWordFont']['wordSize'],fontpath="../AIlib2/conf/platech.ttf")
par['label_array']=label_arraylist
img_urls=glob.glob('%s/*'%( par['test_dir'] ))
for img_url in img_urls:
#print(img_url)
ori_image=cv2.imread(img_url)
ori_image_list,infos = OBB_infer(model,ori_image,par)
ori_image_list[1] = draw_obb(ori_image_list[2] ,ori_image_list[1],par)
imgName = os.path.basename(img_url)
saveFile = os.path.join(par['result_dir'], imgName)
ret=cv2.imwrite(saveFile, ori_image_list[1] )
if not ret:
print(saveFile, ' not created ')
print( os.path.basename(img_url),':',infos)
def jkm_demo():
from utilsK.jkmUtils import pre_process,post_process,get_return_data
img_type = 'plate' ## code,plate
par={'code':{'weights':'../AIlib2/weights/jkm/health_yolov5s_v3.jit','img_type':'code','nc':10 },
'plate':{'weights':'../AIlib2/weights/jkm/plate_yolov5s_v3.jit','img_type':'plate','nc':1 },
'conf_thres': 0.4,
'iou_thres':0.45,
'device':'cuda:0',
'plate_dilate':(0.5,0.1)
}
###加载模型
device = torch.device(par['device'])
jit_weights = par['code']['weights']
model = torch.jit.load(jit_weights)
jit_weights = par['plate']['weights']
model_plate = torch.jit.load(jit_weights)
imgd='images/plate'
imgpaths = os.listdir(imgd)
for imgp in imgpaths[0:]:
#imgp = 'plate_IMG_20221030_100612.jpg'
imgpath = os.path.join(imgd,imgp)
im0 = cv2.imread(imgpath) #读取数据
img ,padInfos = pre_process(im0,device) ##预处理
if img_type=='code': pred = model(img) ##模型推理
else: pred = model_plate(img)
boxes = post_process(pred,padInfos,device,conf_thres= par['conf_thres'], iou_thres= par['iou_thres'],nc=par[img_type]['nc']) #后处理
dataBack=get_return_data(im0,boxes,modelType=img_type,plate_dilate=par['plate_dilate'])
print(imgp,boxes,dataBack['type'])
for key in dataBack.keys():
if isinstance(dataBack[key],list):
cv2.imwrite( 'images/results/%s_%s.jpg'%( imgp.replace('.jpg','').replace('.png',''),key),dataBack[key][0] ) ###返回值: dataBack
def crowd_demo(opt):
if opt['business']=='crowdCounting':
from crowd import crowdModel as Model
par={
'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225],'threshold':0.5,
'input_profile_shapes':[(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)],
'modelPar':{'backbone':'vgg16_bn', 'gpu_id':0,'anchorFlag':False, 'width':None,'height':None ,'line':2, 'row':2},
'weights':"../weights/%s/AIlib2/%s/crowdCounting_%s_dynamic.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'testImgPath':'images/%s'%(opt['business'] ),###测试图像的位置
'testOutPath':'images/results/',###输出测试图像位置
}
#weights='weights/best_mae.pth'
cmodel = Model(par['weights'],par)
img_path = par['testImgPath']
File = os.listdir(img_path)
targetList = []
for file in File[0:]:
COORlist = []
imgPath = img_path + os.sep + file
img_raw = cv2.cvtColor(cv2.imread(imgPath),cv2.COLOR_BGR2RGB)
# cmodel.eval---
# 输入读取的RGB数组
# 输出:list,0--原图1-人头坐标list,2-对接OBB的格式数据其中4个坐标均相同2-格式如下:
# [ [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score cls ], [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score cls ],........ ]
prets, infos = cmodel.eval(img_raw)
print(file,infos,' 人数:',len(prets[1]))
img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
# 打印预测图像中人头的个数
for p in prets[1]:
img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), 2, (0, 255, 0), -1)
COORlist.append((int(p[0]), int(p[1])))
# 将各测试图像中的人头坐标存储在targetList中, 格式:[[(x1, y1),(x2, y2),...], [(X1, Y1),(X2, Y2),..], ...]
targetList.append(COORlist)
#time.sleep(2)
# 保存预测图片
cv2.imwrite(os.path.join(par['testOutPath'], file), img_to_draw)
def customization_demo(opt):
from AI import AI_process_C
if opt['business'] == 'channel2':
from ocr import ocrModel
from utilsK.channel2postUtils import channel2_post_process
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{'function':channel2_post_process,'name':'channel2','pars':{
'objs':[2],'wRation':1/6.0,'hRation':1/6.0,'smallId':0, 'bigId':3, #船只
'newId':4, #未挂国旗船只
'uncoverId':5, #未封仓标签
'recScale':1.2,
'target_cls':3.0, #目标种类
'filter_cls':4.0 #被过滤的种类
}},
'models':[
{
#'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
#'weight':'/mnt/thsw2/DSP2/channel2/weights/yolov5.pt',
'weight':"/mnt/thsw2/DSP2/channel2/weights/yolov5_20250515.pt",
#'weight':'/mnt/thsw2/DSP2/weights/channel2/yolov5_2080Ti_fp16.engine',
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':list(range(20)),'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{ "0":0.2,"1":0.2,"2":0.2,"3":0.2 } }
},
{
#'weight' : '../weights/%s/AIlib2/ocr2/crnn_ch_%s_fp16_192X32.engine'%(opt['gpu'], opt['gpu']),
'weight':'../weights/pth/AIlib2/ocr2/crnn_ch.pth',
'name':'ocr',
'model':ocrModel,
'par':{
'char_file':'../AIlib2/conf/ocr2/benchmark.txt',
'weight' : '../weights/%s/AIlib2/ocr2/crnn_ch_%s_fp16_192X32.engine'%(opt['gpu'], opt['gpu']),
'mode':'ch',
'nc':3,
'imgH':32,
'imgW':192,
'hidden':256,
'mean':[0.5,0.5,0.5],
'std':[0.5,0.5,0.5],
'dynamic':False,
},
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
#'testImgPath':'/mnt/thsw2/DSP2/weights/channel_tmp/videos/',
#'testImgPath':'/home/thsw2/WJ/src/OCR/shipNames',
'testImgPath':'images/tt',
'testOutPath':'images/results/',###输出测试图像位置
}
if opt['business'] == 'crackMeasurement':
from utilsK.crackUtils import Crack_measure
print( '%s 只能测试图像,不能测试视频%s'%('#'*20,'#'*20))
par={
'device':'0', ###显卡号,如果用TRT模型只支持0单显卡
'labelnames':"../AIlib2/conf/%s/labelnames.json"%(opt['business']), ###检测类别对照表
'max_workers':1, ###并行线程数
'postProcess':{
'name':'crackMeasurement',
'function':Crack_measure,
'pars':{'dsx':(123-30)*1000/35*0.004387636 ,'objs':[0,1,2]}
},
'models':[
{
'weight':"../weights/%s/AIlib2/%s/yolov5_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
#'weight':'../weights/pth/AIlib2/%s/yolov5.pt'%(opt['business'] ),
'name':'yolov5',
'model':yolov5Model,
'par':{ 'half':True,'device':'cuda:0' ,'conf_thres':0.1,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False, "score_byClass":{"0":0.1,"1":0.1,"2":0.1 } }
},
{
#'weight':"../weights/%s/AIlib2/%s/stdc_360X640_%s_fp16.engine"%(opt['gpu'], opt['business'] ,opt['gpu'] ),###检测模型路径
'weight':'../weights/pth/AIlib2/%s/stdc_360X640.pth'%(opt['business'] ),
'par':{
#'modelSize':(640,360),
'modelSize':(1920,1080),
'dynamic':True,
'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'predResize':True,'numpy':False, 'RGB_convert_first':True,'seg_nclass':2},###分割模型预处理参数
'model':stdcModel,
'name':'stdc'
}
],
'detModelpara':[{"id":str(x),"config":{"k1":"v1","k2":"v2"}} for x in [0,1,2,3,5,6,7,8,9] ],###控制哪些检测类别显示、输出
'postFile': '../AIlib2/conf/%s/para.json'%(opt['business'] ),###后处理参数文件
'txtFontSize':20,###文本字符的大小
'digitFont': { 'line_thickness':2,'boxLine_thickness':1, 'fontSize':1.0,'waterLineColor':(0,255,255),'segLineShow':True,'waterLineWidth':2},###显示框、线设置
'testImgPath':'images/%s/'%(opt['business']),
#'testImgPath':'/mnt/thsw2/DSP2/weights/cityMangement2_1102/images/debug',
'testOutPath':'images/results/',###输出测试图像位置
}
#第一步加载模型
modelList=[ modelPar['model'](weights=modelPar['weight'],par=modelPar['par']) for modelPar in par['models'] ]
print(' load moder over')
#准备画图字体
labelnames = par['labelnames'] ##对应类别表
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=par['txtFontSize'],fontpath="../AIlib2/conf/platech.ttf")
#图像测试
imgpaths,videopaths = get_images_videos( par['testImgPath'])
print('imgs:',imgpaths,'\n videos:',videopaths)
#开始测试
for imgUrl in imgpaths[0:]:
img = cv2.imread(imgUrl);bname = os.path.basename(imgUrl)
if opt['business'] == 'crackMeasurement':
ret,timeInfos = AI_process_C([img],modelList,par['postProcess'])
#返回类型ret-list,[[ x0,y0,x1,y1,score,class,裂缝长度,平均宽度,最大宽度,最小宽度],[...],[...]]
for re in ret:
print('Summarized Cracklength = %.1f mm Mean crack width = %.1f mm Max crack width = %.1f mm Min crack width = %.1f mm '%( re[6], re[7],re[8],re[9] ) )
elif opt['business'] == 'channel2':
ret,timeInfos = AI_process_C([img],modelList,par['postProcess'])
timeInfos=bname+':'+timeInfos
print(timeInfos,ret )
img0 = img.copy()
if len(ret)>0:
img0 = drawAllBox(ret,img0,label_arraylist,rainbows,par['digitFont'])
'''
ret_shipName=list(filter(lambda x: int(x[5])==2, ret))
ret_ship=list(filter(lambda x: int(x[5]) in [3,4], ret))
ret_others = list(filter(lambda x: int(x[5]) not in [2,3,4], ret))
img0 = img.copy()
if len(ret_others)>0:
img0 = drawAllBox(ret_others,img0,label_arraylist,rainbows,par['digitFont'])
if len(ret_shipName) >0:
for rett in ret_shipName:
x0,y0,x1,y1=rett[0:4]
print(' shipName width:%d , height:%d'%( (x1-x0),(y1-y0) ))
img0= plot_one_box_PIL( rett[0:4], img0, color=rainbows[2], label=rett[6], line_thickness=par['digitFont']['line_thickness'])
if len(ret_ship) >0:
for rett in ret_ship:
label = '船只 %.2f: 已封仓'%( rett[4] )
img0= plot_one_box_PIL( rett[0:4], img0, color=rainbows[4], label=label, line_thickness=par['digitFont']['line_thickness'])
'''
cv2.imwrite(os.path.join('images/results/',bname ) ,img0)
#测试视频
###如果是视频文件
for video in videopaths:
cap=cv2.VideoCapture(video)
fps,width,height,framecnt = get_video_para(cap)
save_path_AI = os.path.join(par['testOutPath'],os.path.basename(video))
vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width,height))
num=0
iframe=0;post_results=[];fpsample=30*10
while cap.isOpened():
ret, img = cap.read() #读取摄像头画面
iframe +=1
if not ret:break
ret,timeInfos = AI_process_C([img],modelList,par['postProcess'])
ret_shipName=list(filter(lambda x: int(x[5])==2, ret))
ret_ship=list(filter(lambda x: int(x[5]) in [3,4], ret))
ret_others = list(filter(lambda x: int(x[5]) not in [2,3,4], ret))
img0 = img.copy()
if len(ret_others)>0:
img0 = drawAllBox(ret_others,img0,label_arraylist,rainbows,par['digitFont'])
if len(ret_shipName) >0:
for rett in ret_shipName:
x0,y0,x1,y1=rett[0:4]
#print(' shipName width:%d , height:%d'%( (x1-x0),(y1-y0) ))
if x1-x0>60 and y1-y0>20:
label = rett[6]
#else: label ='船名'
img0= plot_one_box_PIL( rett[0:4], img0, color=rainbows[2], label=label, line_thickness=par['digitFont']['line_thickness'])
if len(ret_ship) >0:
for rett in ret_ship:
label = '船只 %.2f: 已封仓'%( rett[4] )
img0= plot_one_box_PIL( rett[0:4], img0, color=rainbows[4], label=label, line_thickness=par['digitFont']['line_thickness'])
ret = vid_writer_AI.write(img0)
view_bar(iframe, framecnt,time.time(),prefix=os.path.basename(video))
vid_writer_AI.release();
def AI_process_C_multi( ps ):
return AI_process_C( ps[0] ,ps[1],ps[2] )
###用视频文件做一个多线程测试
'''
max_workers=4
bs=4
for video in videopaths:
cap=cv2.VideoCapture(video)
fps,width,height,framecnt = get_video_para(cap)
print('-'*10,' line1307 fps:{}, width:{},height:{},framecnt:{} '.format( fps,width,height,framecnt) )
iframe =0
parsIns=[]
while cap.isOpened():
ret, img = cap.read() #读取摄像头画面
if not ret:break
parsIns.append( [ [img],modelList,par['postProcess'] ] )
iframe+=1
if iframe%bs==0:
with ThreadPoolExecutor(max_workers=max_workers) as t:
results = t.map(AI_process_C_multi, parsIns)
results = list(results)
print(iframe,len( parsIns ))
parsIns=[]
view_bar(iframe, framecnt,time.time(),prefix=os.path.basename(video))
'''
if __name__=="__main__":
#jkm_demo()
businessAll=['river2','AnglerSwimmer', 'countryRoad','forest2', 'forestCrowd','pedestrian' , 'smogfire' , 'vehicle','ship2',"highWay2","channelEmergency","cityMangement","drowning","noParking","illParking",'cityMangement2',"cityRoad","crowdCounting",'cityMangement3','ocr_en','ocr_ch','pothole','crackMeasurement','channel2','riverT','rubbish','firework','smartSite','highWaySpill']
#businessAll=['crackMeasurement']
businessAll = ['highWaySpill']
# forest 、 ocr2 、ocr_en 、 river 、 road 、 ship ,目前都没有在用
for busi in businessAll:
print('-'*40,'beg to test ',busi,'-'*40)
opt={'gpu':'2080Ti','business':busi}
if opt['business'] in ['highWay2','river2','drowning','noParking','river',"illParking","cityMangement2","riverT"]:
detSeg_demo(opt)
elif opt['business'] in ['cityMangement3','forest2','forestCrowd','rubbish','firework','smartSite','highWaySpill'] :
detSeg_demo2(opt)
elif opt['business'] in ['crowdCounting'] :
crowd_demo(opt)
elif opt['business'] in ['ship2']:
OBB_demo(opt)
elif opt['business'] in ['ocr']:
OCR_demo(opt)
elif opt['business'] in ['crackMeasurement','channel2'] :
customization_demo(opt)
elif opt['business'] in ['ocr_en','ocr_ch']:
OCR_demo2(opt)
else:
det_demo( opt )