AIlib2文件初始化
This commit is contained in:
parent
447e66e2c2
commit
7e2adf6f75
|
|
@ -0,0 +1,808 @@
|
|||
import cv2,os,time,json
|
||||
from models.experimental import attempt_load
|
||||
from segutils.segmodel import SegModel,get_largest_contours
|
||||
from segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward
|
||||
from segutils.trafficUtils import tracfficAccidentMixFunction
|
||||
|
||||
from utils.torch_utils import select_device
|
||||
from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections,getDetectionsFromPreds
|
||||
from utilsK.jkmUtils import pre_process, post_process, get_return_data
|
||||
from trackUtils.sort import moving_average_wang
|
||||
|
||||
from utils.datasets import letterbox
|
||||
import numpy as np
|
||||
import torch
|
||||
import math
|
||||
from PIL import Image
|
||||
import torch.nn.functional as F
|
||||
from copy import deepcopy
|
||||
from scipy import interpolate
|
||||
import glob
|
||||
|
||||
def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']):
|
||||
imgpaths=[];###获取文件里所有的图像
|
||||
videopaths=[]###获取文件里所有的视频
|
||||
if os.path.isdir(impth):
|
||||
for postfix in imageFixs:
|
||||
imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
|
||||
for postfix in videoFixs:
|
||||
videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) )
|
||||
else:
|
||||
postfix = os.path.splitext(impth)[-1]
|
||||
if postfix in imageFixs: imgpaths=[ impth ]
|
||||
if postfix in videoFixs: videopaths = [impth ]
|
||||
|
||||
print('%s: test Images:%d , test videos:%d '%(impth, len(imgpaths), len(videopaths)))
|
||||
return imgpaths,videopaths
|
||||
|
||||
def xywh2xyxy(box,iW=None,iH=None):
|
||||
xc,yc,w,h = box[0:4]
|
||||
x0 =max(0, xc-w/2.0)
|
||||
x1 =min(1, xc+w/2.0)
|
||||
y0=max(0, yc-h/2.0)
|
||||
y1=min(1,yc+h/2.0)
|
||||
if iW: x0,x1 = x0*iW,x1*iW
|
||||
if iH: y0,y1 = y0*iH,y1*iH
|
||||
return [x0,y0,x1,y1]
|
||||
|
||||
def get_ms(t2,t1):
|
||||
return (t2-t1)*1000.0
|
||||
def get_postProcess_para(parfile):
|
||||
with open(parfile) as fp:
|
||||
par = json.load(fp)
|
||||
assert 'post_process' in par.keys(), ' parfile has not key word:post_process'
|
||||
parPost=par['post_process']
|
||||
|
||||
return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"]
|
||||
def get_postProcess_para_dic(parfile):
|
||||
with open(parfile) as fp:
|
||||
par = json.load(fp)
|
||||
parPost=par['post_process']
|
||||
return parPost
|
||||
def score_filter_byClass(pdetections,score_para_2nd):
|
||||
ret=[]
|
||||
for det in pdetections:
|
||||
score,cls = det[4],det[5]
|
||||
if int(cls) in score_para_2nd.keys():
|
||||
score_th = score_para_2nd[int(cls)]
|
||||
elif str(int(cls)) in score_para_2nd.keys():
|
||||
score_th = score_para_2nd[str(int(cls))]
|
||||
else:
|
||||
score_th = 0.7
|
||||
if score > score_th:
|
||||
ret.append(det)
|
||||
return ret
|
||||
# 按类过滤
|
||||
def filter_byClass(pdetections,allowedList):
|
||||
ret=[]
|
||||
for det in pdetections:
|
||||
score,cls = det[4],det[5]
|
||||
if int(cls) in allowedList:
|
||||
ret.append(det)
|
||||
elif str(int(cls)) in allowedList:
|
||||
ret.append(det)
|
||||
|
||||
return ret
|
||||
|
||||
# 对ocr识别车牌格式化处理
|
||||
def plat_format(ocr):
|
||||
carDct = ['黑','吉','辽','冀','晋','陕','甘','青','鲁','苏','浙','皖','闽','赣','豫','鄂',\
|
||||
'湘','粤','琼','川','贵','云','蒙','藏','宁','新','桂','京','津','沪','渝','使','领']
|
||||
label = ocr[0]
|
||||
# print(label)
|
||||
label = list(filter(lambda x: (ord(x) > 19968 and ord(x) < 63865) or (ord(x) > 96 and ord(x) < 123)
|
||||
or (ord(x) > 47 and ord(x) < 58) or (ord(x) in [33, 73, 65281]), label))
|
||||
def spt(x):
|
||||
if x in ['I', 'i', '!', '!']:
|
||||
return '1'
|
||||
else:
|
||||
return x
|
||||
|
||||
label = list(map(spt, label))
|
||||
if len(label) < 7 or len(label) >8:
|
||||
return None
|
||||
if not label[0] in carDct:
|
||||
return None
|
||||
|
||||
label.insert(2, '・')
|
||||
label = ' '.join(label)
|
||||
# label = label.split('I','1').split('!','1').split('i','1').split('!','1')
|
||||
# label = label.split('I','1').split('!','1').split('i','1').split('!','1
|
||||
|
||||
return label.upper()
|
||||
|
||||
def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False,'score_byClass':{x:0.1 for x in range(30)} }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},mode='others',postPar=None):
|
||||
|
||||
#输入参数
|
||||
# im0s---原始图像列表
|
||||
# model---检测模型,segmodel---分割模型(如若没有用到,则为None)
|
||||
#
|
||||
#输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
|
||||
# [im0s[0],im0,det_xywh,iframe]中,
|
||||
# im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
|
||||
# det_xywh--检测结果,是一个列表。
|
||||
# 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c) ] ,2023.08.03修改输出格式
|
||||
# #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
|
||||
# #strout---统计AI处理个环节的时间
|
||||
# Letterbox
|
||||
|
||||
half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList']
|
||||
|
||||
trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt']
|
||||
if 'ovlap_thres_crossCategory' in objectPar.keys(): ovlap_thres = objectPar['ovlap_thres_crossCategory']
|
||||
else: ovlap_thres = None
|
||||
|
||||
if 'score_byClass' in objectPar.keys(): score_byClass = objectPar['score_byClass']
|
||||
else: score_byClass = None
|
||||
|
||||
time0=time.time()
|
||||
if trtFlag_det:
|
||||
img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
|
||||
else:
|
||||
#print('####line72:',im0s[0][10:12,10:12,2])
|
||||
img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
|
||||
#print('####line74:',img[0][10:12,10:12,2])
|
||||
# Stack
|
||||
img = np.stack(img, 0)
|
||||
# Convert
|
||||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||||
img = np.ascontiguousarray(img)
|
||||
|
||||
img = torch.from_numpy(img).to(device)
|
||||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||||
img /= 255.0
|
||||
time01=time.time()
|
||||
|
||||
if segmodel:
|
||||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||||
segFlag=True
|
||||
else:
|
||||
seg_pred = None;segFlag=False;segstr='Not implemented'
|
||||
|
||||
|
||||
time1=time.time()
|
||||
if trtFlag_det:
|
||||
pred = yolov5Trtforward(model,img)
|
||||
else:
|
||||
#print('####line96:',img[0,0,10:12,10:12])
|
||||
pred = model(img,augment=False)[0]
|
||||
|
||||
time2=time.time()
|
||||
|
||||
|
||||
p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
|
||||
if score_byClass:
|
||||
p_result[2] = score_filter_byClass(p_result[2],score_byClass)
|
||||
#if mode=='highWay3.0':
|
||||
#if segmodel:
|
||||
if segPar and segPar['mixFunction']['function']:
|
||||
|
||||
mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2]
|
||||
parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
|
||||
parMix['imgSize'] = (W,H)
|
||||
#print(' -----------line110: ',p_result[2] ,'\n', seg_pred)
|
||||
p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix )
|
||||
#print(' -----------line112: ',p_result[2] )
|
||||
p_result.append(seg_pred)
|
||||
|
||||
else:
|
||||
timeMixPost=':0 ms'
|
||||
#print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut ))
|
||||
time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost )
|
||||
if allowedList:
|
||||
p_result[2] = filter_byClass(p_result[2],allowedList)
|
||||
|
||||
print('-'*10,p_result[2])
|
||||
return p_result,time_info
|
||||
def default_mix(predlist,par):
|
||||
return predlist[0],''
|
||||
def AI_process_N(im0s,modelList,postProcess):
|
||||
|
||||
#输入参数
|
||||
## im0s---原始图像列表
|
||||
## modelList--所有的模型
|
||||
# postProcess--字典{},包括后处理函数,及其参数
|
||||
#输出参数
|
||||
##ret[0]--检测结果;
|
||||
##ret[1]--时间信息
|
||||
|
||||
#modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
|
||||
modelRets=[ model.eval(im0s[0]) for model in modelList]
|
||||
|
||||
timeInfos = [ x[1] for x in modelRets]
|
||||
timeInfos=''.join(timeInfos)
|
||||
timeInfos=timeInfos
|
||||
|
||||
#postProcess['function']--后处理函数,输入的就是所有模型输出结果
|
||||
mixFunction =postProcess['function']
|
||||
predsList = [ modelRet[0] for modelRet in modelRets ]
|
||||
H,W = im0s[0].shape[0:2]
|
||||
postProcess['pars']['imgSize'] = (W,H)
|
||||
|
||||
#ret就是混合处理后的结果
|
||||
ret = mixFunction( predsList, postProcess['pars'])
|
||||
return ret[0],timeInfos+ret[1]
|
||||
def getMaxScoreWords(detRets0):
|
||||
maxScore=-1;maxId=0
|
||||
for i,detRet in enumerate(detRets0):
|
||||
if detRet[4]>maxScore:
|
||||
maxId=i
|
||||
maxScore = detRet[4]
|
||||
return maxId
|
||||
|
||||
def AI_process_C(im0s,modelList,postProcess):
|
||||
#函数定制的原因:
|
||||
## 之前模型处理流是
|
||||
## 图片---> 模型1-->result1;图片---> 模型2->result2;[result1,result2]--->后处理函数
|
||||
## 本函数的处理流程是
|
||||
## 图片---> 模型1-->result1;[图片,result1]---> 模型2->result2;[result1,result2]--->后处理函数
|
||||
## 模型2的输入,是有模型1的输出决定的。如模型2是ocr模型,需要将模型1检测出来的船名抠图出来输入到模型2.
|
||||
## 之前的模型流都是模型2是分割模型,输入就是原始图片,与模型1的输出无关。
|
||||
#输入参数
|
||||
## im0s---原始图像列表
|
||||
## modelList--所有的模型
|
||||
# postProcess--字典{},包括后处理函数,及其参数
|
||||
#输出参数
|
||||
##ret[0]--检测结果;
|
||||
##ret[1]--时间信息
|
||||
|
||||
#modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果
|
||||
|
||||
t0=time.time()
|
||||
detRets0 = modelList[0].eval(im0s[0])
|
||||
|
||||
#detRets0=[[12, 46, 1127, 1544, 0.2340087890625, 2.0], [1884, 1248, 2992, 1485, 0.64208984375, 1.0]]
|
||||
detRets0 = detRets0[0]
|
||||
parsIn=postProcess['pars']
|
||||
|
||||
_detRets0_obj = list(filter(lambda x: x[5] in parsIn['objs'], detRets0 ))
|
||||
_detRets0_others = list(filter(lambda x: x[5] not in parsIn['objs'], detRets0 ))
|
||||
_detRets0 = []
|
||||
if postProcess['name']=='channel2':
|
||||
if len(_detRets0_obj)>0:
|
||||
maxId=getMaxScoreWords(_detRets0_obj)
|
||||
_detRets0 = _detRets0_obj[maxId:maxId+1]
|
||||
else: _detRets0 = detRets0
|
||||
|
||||
|
||||
t1=time.time()
|
||||
imagePatches = [ im0s[0][int(x[1]):int(x[3] ) ,int(x[0]):int(x[2])] for x in _detRets0 ]
|
||||
detRets1 = [modelList[1].eval(patch) for patch in imagePatches]
|
||||
print('###line240:',detRets1)
|
||||
if postProcess['name']=='crackMeasurement':
|
||||
detRets1 = [x[0]*255 for x in detRets1]
|
||||
t2=time.time()
|
||||
mixFunction =postProcess['function']
|
||||
crackInfos = [mixFunction(patchMask,par=parsIn) for patchMask in detRets1]
|
||||
|
||||
rets = [detRets0[i]+ crackInfos[i] for i in range(len(imagePatches)) ]
|
||||
t3=time.time()
|
||||
outInfos='total:%.1f (det:%.1f %d次segs:%.1f mixProcess:%.1f) '%( (t3-t0)*1000, (t1-t0)*1000, len(detRets1),(t2-t1)*1000, (t3-t2)*1000 )
|
||||
elif postProcess['name']=='channel2':
|
||||
H,W = im0s[0].shape[0:2];parsIn['imgSize'] = (W,H)
|
||||
mixFunction =postProcess['function']
|
||||
_detRets0_others = mixFunction([_detRets0_others], parsIn)
|
||||
ocrInfo='no ocr'
|
||||
if len(_detRets0_obj)>0:
|
||||
res_real = detRets1[0][0]
|
||||
res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real)))
|
||||
|
||||
#detRets1[0][0]="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),detRets1[0][0])))
|
||||
_detRets0_obj[maxId].append(res_real )
|
||||
_detRets0_obj = [_detRets0_obj[maxId]]##只输出有OCR的那个船名结果
|
||||
ocrInfo=detRets1[0][1]
|
||||
print( ' _detRets0_obj:{} _detRets0_others:{} '.format( _detRets0_obj, _detRets0_others ) )
|
||||
rets=_detRets0_obj+_detRets0_others
|
||||
t3=time.time()
|
||||
outInfos='total:%.1f ,where det:%.1f, ocr:%s'%( (t3-t0)*1000, (t1-t0)*1000, ocrInfo)
|
||||
|
||||
#print('###line233:',detRets1,detRets0 )
|
||||
|
||||
return rets,outInfos
|
||||
|
||||
def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
|
||||
#输入参数
|
||||
# im0s---原始图像列表
|
||||
# model---检测模型,segmodel---分割模型(如若没有用到,则为None)
|
||||
#输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout
|
||||
# [im0s[0],im0,det_xywh,iframe]中,
|
||||
# im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。
|
||||
# det_xywh--检测结果,是一个列表。
|
||||
# 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式
|
||||
# #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间
|
||||
# #strout---统计AI处理个环节的时间
|
||||
|
||||
# Letterbox
|
||||
time0=time.time()
|
||||
if trtFlag_det:
|
||||
img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
|
||||
else:
|
||||
img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
|
||||
#img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s]
|
||||
# Stack
|
||||
img = np.stack(img, 0)
|
||||
# Convert
|
||||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||||
img = np.ascontiguousarray(img)
|
||||
|
||||
img = torch.from_numpy(img).to(device)
|
||||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||||
|
||||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||||
if segmodel:
|
||||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||||
segFlag=True
|
||||
else:
|
||||
seg_pred = None;segFlag=False
|
||||
time1=time.time()
|
||||
pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
|
||||
|
||||
|
||||
time2=time.time()
|
||||
datas = [[''], img, im0s, None,pred,seg_pred,10]
|
||||
|
||||
ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
|
||||
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
|
||||
#print('###line274:',p_result[2])
|
||||
#p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
|
||||
time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
|
||||
return p_result,time_info+timeOut
|
||||
def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None):
|
||||
im0s,iframe=im0s_in[0],im0s_in[1]
|
||||
model = modelPar['det_Model']
|
||||
segmodel = modelPar['seg_Model']
|
||||
half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
|
||||
if 'score_byClass' in processPar.keys(): score_byClass = processPar['score_byClass']
|
||||
else: score_byClass = None
|
||||
|
||||
iou2nd = processPar['iou2nd']
|
||||
time0=time.time()
|
||||
|
||||
if trtFlag_det:
|
||||
img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
|
||||
else:
|
||||
img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
|
||||
img = np.stack(img, 0)
|
||||
# Convert
|
||||
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
||||
img = np.ascontiguousarray(img)
|
||||
|
||||
img = torch.from_numpy(img).to(device)
|
||||
img = img.half() if half else img.float() # uint8 to fp16/32
|
||||
|
||||
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
||||
|
||||
seg_pred = None;segFlag=False
|
||||
time1=time.time()
|
||||
pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
|
||||
|
||||
time2=time.time()
|
||||
|
||||
#p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
|
||||
p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos)
|
||||
if score_byClass:
|
||||
p_result[2] = score_filter_byClass(p_result[2],score_byClass)
|
||||
if segmodel:
|
||||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||||
segFlag=True
|
||||
else:
|
||||
seg_pred = None;segFlag=False;segstr='No segmodel'
|
||||
|
||||
|
||||
if segPar and segPar['mixFunction']['function']:
|
||||
mixFunction = segPar['mixFunction']['function']
|
||||
|
||||
H,W = im0s[0].shape[0:2]
|
||||
parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
|
||||
parMix['imgSize'] = (W,H)
|
||||
|
||||
|
||||
p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=parMix )
|
||||
timeInfos_seg_post = 'segInfer:%s ,postMixProcess:%s'%( segstr, timeInfos_post )
|
||||
else:
|
||||
timeInfos_seg_post = ' '
|
||||
'''
|
||||
if segmodel:
|
||||
timeS1=time.time()
|
||||
#seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] )
|
||||
seg_pred,segstr = segmodel.eval(im0s[0] )
|
||||
timeS2=time.time()
|
||||
mixFunction = segPar['mixFunction']['function']
|
||||
|
||||
p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] )
|
||||
|
||||
timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post )
|
||||
|
||||
else:
|
||||
timeInfos_seg_post = ' '
|
||||
#print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) )
|
||||
'''
|
||||
time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
|
||||
|
||||
if sort_tracker:
|
||||
#在这里增加设置调用追踪器的频率
|
||||
#..................USE TRACK FUNCTION....................
|
||||
#pass an empty array to sort
|
||||
dets_to_sort = np.empty((0,7), dtype=np.float32)
|
||||
|
||||
# NOTE: We send in detected object class too
|
||||
#for detclass,x1,y1,x2,y2,conf in p_result[2]:
|
||||
for x1,y1,x2,y2,conf, detclass in p_result[2]:
|
||||
#print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
|
||||
dets_to_sort = np.vstack((dets_to_sort,
|
||||
np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
|
||||
|
||||
# Run SORT
|
||||
tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
|
||||
tracks =sort_tracker.getTrackers()
|
||||
p_result.append(tracked_dets) ###index=4
|
||||
p_result.append(tracks) ###index=5
|
||||
|
||||
return p_result,time_info+timeOut+timeInfos_seg_post
|
||||
def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None):
|
||||
'''
|
||||
输入:
|
||||
imgarray_list--图像列表
|
||||
iframe_list -- 帧号列表
|
||||
modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
|
||||
processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
|
||||
sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
|
||||
trackPar--跟踪参数,关键字包括:det_cnt,windowsize
|
||||
segPar--None,分割模型相关参数。如果用不到,则为None
|
||||
输入:retResults,timeInfos
|
||||
retResults:list
|
||||
retResults[0]--imgarray_list
|
||||
retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
|
||||
retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
|
||||
'''
|
||||
|
||||
det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
|
||||
trackers_dic={}
|
||||
index_list = list(range( 0, len(iframe_list) ,det_cnt ));
|
||||
if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
|
||||
index_list.append( len(iframe_list) - 1 )
|
||||
|
||||
if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
|
||||
retResults = []
|
||||
p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar )
|
||||
##下面4行内容只是为了保持格式一致
|
||||
detArray = np.array(p_result[2])
|
||||
#print('##line371:',detArray)
|
||||
if len(p_result[2])==0:res=[]
|
||||
else:
|
||||
cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
|
||||
|
||||
#detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
|
||||
detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
|
||||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
|
||||
retResults=[imgarray_list,detArray,res ]
|
||||
#print('##line380:',retResults[2])
|
||||
return retResults,timeOut
|
||||
|
||||
else:
|
||||
t0 = time.time()
|
||||
timeInfos_track=''
|
||||
for iframe_index, index_frame in enumerate(index_list):
|
||||
p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar )
|
||||
timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
|
||||
|
||||
for tracker in p_result[5]:
|
||||
trackers_dic[tracker.id]=deepcopy(tracker)
|
||||
t1 = time.time()
|
||||
|
||||
track_det_result = np.empty((0,8))
|
||||
for trackId in trackers_dic.keys():
|
||||
tracker = trackers_dic[trackId]
|
||||
bbox_history = np.array(tracker.bbox_history)
|
||||
if len(bbox_history)<2: continue
|
||||
###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
|
||||
xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
|
||||
whs = bbox_history[:,2:4] - bbox_history[:,0:2]
|
||||
bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
|
||||
|
||||
arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
|
||||
#frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
|
||||
#frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
|
||||
|
||||
##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
|
||||
#inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
|
||||
|
||||
##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
|
||||
inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
|
||||
new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
|
||||
f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
|
||||
move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
|
||||
for im in range(4):
|
||||
interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
|
||||
|
||||
cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
|
||||
interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
|
||||
track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
|
||||
#print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
|
||||
|
||||
##将[xc,yc,w,h]转为[x0,y0,x1,y1]
|
||||
x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
|
||||
y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
|
||||
track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
|
||||
track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
|
||||
detResults=[]
|
||||
for iiframe in iframe_list:
|
||||
boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
|
||||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
|
||||
#[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
|
||||
#[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
|
||||
detResults.append( res )
|
||||
|
||||
|
||||
retResults=[imgarray_list,track_det_result,detResults ]
|
||||
t2 = time.time()
|
||||
timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
|
||||
return retResults,timeInfos
|
||||
def AI_det_track_N( im0s_in,modelList,postProcess,sort_tracker):
|
||||
im0s,iframe=im0s_in[0],im0s_in[1]
|
||||
dets = AI_process_N(im0s,modelList,postProcess)
|
||||
p_result=[[],[],dets[0],[] ]
|
||||
if sort_tracker:
|
||||
#在这里增加设置调用追踪器的频率
|
||||
#..................USE TRACK FUNCTION....................
|
||||
#pass an empty array to sort
|
||||
dets_to_sort = np.empty((0,7), dtype=np.float32)
|
||||
|
||||
# NOTE: We send in detected object class too
|
||||
#for detclass,x1,y1,x2,y2,conf in p_result[2]:
|
||||
for x1,y1,x2,y2,conf, detclass in p_result[2]:
|
||||
#print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
|
||||
dets_to_sort = np.vstack((dets_to_sort,
|
||||
np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
|
||||
|
||||
# Run SORT
|
||||
tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
|
||||
tracks =sort_tracker.getTrackers()
|
||||
p_result.append(tracked_dets) ###index=4
|
||||
p_result.append(tracks) ###index=5
|
||||
|
||||
return p_result,dets[1]
|
||||
def get_tracker_cls(boxes,scId=4,clsId=5):
|
||||
#正常来说一各跟踪链上是一个类别,但是有时目标框检测错误,导致有的跟踪链上有多个类别
|
||||
#为此,根据跟踪链上每一个类别对应的所有框的置信度之和,作为这个跟踪链上目标的类别
|
||||
#输入boxes--跟踪是保留的box_history,[[xc,yc,width,height,score,class,iframe],[...],[...]]
|
||||
## scId=4,score所在的序号; clsId=5;类别所在的序号
|
||||
#输出类别
|
||||
##这个跟踪链上目标的类别
|
||||
ids = list(set(boxes[:,clsId].tolist()))
|
||||
scores = [np.sum( boxes[:,scId] [ boxes[:,clsId]==x ] ) for x in ids]
|
||||
maxScoreId = scores.index(np.max(scores))
|
||||
return int(ids[maxScoreId])
|
||||
|
||||
def AI_det_track_batch_N(imgarray_list, iframe_list ,modelList,postProcess,sort_tracker,trackPar):
|
||||
'''
|
||||
输入:
|
||||
imgarray_list--图像列表
|
||||
iframe_list -- 帧号列表
|
||||
modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':}
|
||||
processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det'
|
||||
sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。
|
||||
trackPar--跟踪参数,关键字包括:det_cnt,windowsize
|
||||
segPar--None,分割模型相关参数。如果用不到,则为None
|
||||
输入:retResults,timeInfos
|
||||
retResults:list
|
||||
retResults[0]--imgarray_list
|
||||
retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId
|
||||
retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式
|
||||
'''
|
||||
|
||||
det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
|
||||
trackers_dic={}
|
||||
index_list = list(range( 0, len(iframe_list) ,det_cnt ));
|
||||
if len(index_list)>1 and index_list[-1]!= iframe_list[-1]:
|
||||
index_list.append( len(iframe_list) - 1 )
|
||||
|
||||
if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪
|
||||
retResults = []
|
||||
p_result,timeOut = AI_det_track_N( [ [imgarray_list[0]] ,iframe_list[0] ],modelList,postProcess,None )
|
||||
##下面4行内容只是为了保持格式一致
|
||||
detArray = np.array(p_result[2])
|
||||
if len(p_result[2])==0:res=[]
|
||||
else:
|
||||
cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0]
|
||||
|
||||
#detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) )
|
||||
detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式
|
||||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ]
|
||||
retResults=[imgarray_list,detArray,res ]
|
||||
#print('##line380:',retResults[2])
|
||||
return retResults,timeOut
|
||||
|
||||
else:
|
||||
t0 = time.time()
|
||||
timeInfos_track=''
|
||||
for iframe_index, index_frame in enumerate(index_list):
|
||||
p_result,timeOut = AI_det_track_N( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelList,postProcess,sort_tracker )
|
||||
timeInfos_track='%s:%s'%(timeInfos_track,timeOut)
|
||||
|
||||
for tracker in p_result[5]:
|
||||
trackers_dic[tracker.id]=deepcopy(tracker)
|
||||
t1 = time.time()
|
||||
|
||||
track_det_result = np.empty((0,8))
|
||||
for trackId in trackers_dic.keys():
|
||||
tracker = trackers_dic[trackId]
|
||||
bbox_history = np.array(tracker.bbox_history).copy()
|
||||
if len(bbox_history)<2: continue
|
||||
###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
|
||||
xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
|
||||
whs = bbox_history[:,2:4] - bbox_history[:,0:2]
|
||||
bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
|
||||
|
||||
#2023.11.17添加的。目的是修正跟踪链上所有的框的类别一样
|
||||
chainClsId = get_tracker_cls(bbox_history,scId=4,clsId=5)
|
||||
bbox_history[:,5] = chainClsId
|
||||
|
||||
arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
|
||||
#frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
|
||||
#frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
|
||||
|
||||
##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
|
||||
#inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
|
||||
|
||||
##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
|
||||
inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
|
||||
new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
|
||||
f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
|
||||
move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
|
||||
for im in range(4):
|
||||
interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
|
||||
|
||||
cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
|
||||
interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
|
||||
track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
|
||||
#print('#####line116:',trackId,'----------',interpolation_x0s.shape,track_det_result.shape,bbox_history ,'-----')
|
||||
|
||||
##将[xc,yc,w,h]转为[x0,y0,x1,y1]
|
||||
x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
|
||||
y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
|
||||
track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
|
||||
track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
|
||||
detResults=[]
|
||||
for iiframe in iframe_list:
|
||||
boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
|
||||
res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in boxes_oneFrame ]
|
||||
#[ x0 ,y0 ,x1 ,y1 ,conf,cls,ifrmae,trackId ]
|
||||
#[ifrmae, x0 ,y0 ,x1 ,y1 ,conf,cls,trackId ]
|
||||
detResults.append( res )
|
||||
|
||||
retResults=[imgarray_list,track_det_result,detResults ]
|
||||
t2 = time.time()
|
||||
timeInfos = 'detTrack:%.1f TrackPost:%.1f, %s'%(get_ms(t1,t0),get_ms(t2,t1), timeInfos_track )
|
||||
return retResults,timeInfos
|
||||
|
||||
def ocr_process(pars):
|
||||
|
||||
img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
|
||||
time1 = time.time()
|
||||
img_tensor = AlignCollate_normal([ Image.fromarray(img_patch,'L') ])
|
||||
img_input = img_tensor.to('cuda:0')
|
||||
time2 = time.time()
|
||||
|
||||
preds,trtstr=OcrTrtForward(engine,[img_input],context)
|
||||
time3 = time.time()
|
||||
|
||||
batch_size = preds.size(0)
|
||||
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
|
||||
|
||||
######## filter ignore_char, rebalance
|
||||
preds_prob = F.softmax(preds, dim=2)
|
||||
preds_prob = preds_prob.cpu().detach().numpy()
|
||||
pred_norm = preds_prob.sum(axis=2)
|
||||
preds_prob = preds_prob/np.expand_dims(pred_norm, axis=-1)
|
||||
preds_prob = torch.from_numpy(preds_prob).float().to(device)
|
||||
_, preds_index = preds_prob.max(2)
|
||||
preds_index = preds_index.view(-1)
|
||||
time4 = time.time()
|
||||
preds_str = converter.decode_greedy(preds_index.data.cpu().detach().numpy(), preds_size.data)
|
||||
time5 = time.time()
|
||||
|
||||
info_str= ('pre-process:%.2f TRTforward:%.2f (%s) postProcess:%2.f decoder:%.2f, Total:%.2f , pred:%s'%(get_ms(time2,time1 ),get_ms(time3,time2 ),trtstr, get_ms(time4,time3 ), get_ms(time5,time4 ), get_ms(time5,time1 ), preds_str ) )
|
||||
return preds_str,info_str
|
||||
|
||||
def AI_process_Ocr(im0s,modelList,device,detpar):
|
||||
timeMixPost = ':0 ms'
|
||||
new_device = torch.device(device)
|
||||
time0 = time.time()
|
||||
img, padInfos = pre_process(im0s[0], new_device)
|
||||
ocrModel = modelList[1]
|
||||
time1 = time.time()
|
||||
preds,timeOut = modelList[0].eval(img)
|
||||
time2 = time.time()
|
||||
boxes = post_process(preds, padInfos, device, conf_thres=detpar['conf_thres'], iou_thres=detpar['iou_thres'],
|
||||
nc=detpar['nc']) # 后处理
|
||||
imagePatches = [im0s[0][int(x[1]):int(x[3]), int(x[0]):int(x[2])] for x in boxes]
|
||||
|
||||
detRets1 = [ocrModel.eval(patch) for patch in imagePatches]
|
||||
time3 = time.time()
|
||||
dets = []
|
||||
for i, (box, ocr) in enumerate(zip(boxes, detRets1)):
|
||||
label = plat_format(ocr)
|
||||
if label:
|
||||
xyxy = box[0:4]
|
||||
dets.append([label, xyxy])
|
||||
|
||||
time_info = 'pre_process:%.1f, det:%.1f , ocr:%.1f ,timeMixPost:%s ' % (
|
||||
(time1 - time0) * 1000, (time2 - time1) * 1000, (time3 - time2) * 1000, timeMixPost)
|
||||
|
||||
return [im0s[0],im0s[0],dets,0],time_info
|
||||
|
||||
|
||||
def AI_process_Crowd(im0s,model,device,postPar):
|
||||
timeMixPost = ':0 ms'
|
||||
new_device = torch.device(device)
|
||||
time0 = time.time()
|
||||
preds = model.eval(im0s[0])
|
||||
time1 = time.time()
|
||||
outputs_scores = torch.nn.functional.softmax(preds['pred_logits'], -1)[:, :, 1][0]
|
||||
|
||||
outputs_points = preds['pred_points'][0]
|
||||
|
||||
points = outputs_points[outputs_scores > postPar['conf']].detach().cpu().numpy().tolist()
|
||||
predict_cnt = int((outputs_scores > postPar['conf']).sum())
|
||||
#img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
|
||||
time2 = time.time()
|
||||
# for p in points:
|
||||
# img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), line, (0, 0, 255), -1)
|
||||
Calc_label = '当前人数: %d' % (predict_cnt)
|
||||
|
||||
|
||||
dets = [[Calc_label, points]]
|
||||
time_info = 'det:%.1f , post:%.1f ,timeMixPost:%s ' % (
|
||||
(time1 - time0) * 1000, (time2 - time1) * 1000, timeMixPost)
|
||||
|
||||
|
||||
|
||||
return [im0s[0],im0s[0],dets,0],time_info
|
||||
|
||||
|
||||
def main():
|
||||
##预先设置的参数
|
||||
device_='1' ##选定模型,可选 cpu,'0','1'
|
||||
|
||||
##以下参数目前不可改
|
||||
Detweights = "weights/yolov5/class5/best_5classes.pt"
|
||||
seg_nclass = 2
|
||||
Segweights = "weights/BiSeNet/checkpoint.pth"
|
||||
conf_thres,iou_thres,classes= 0.25,0.45,5
|
||||
labelnames = "weights/yolov5/class5/labelnames.json"
|
||||
rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
allowedList=[0,1,2,3]
|
||||
|
||||
|
||||
##加载模型,准备好显示字符
|
||||
device = select_device(device_)
|
||||
names=get_labelnames(labelnames)
|
||||
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf")
|
||||
half = device.type != 'cpu' # half precision only supported on CUDA
|
||||
model = attempt_load(Detweights, map_location=device) # load FP32 model
|
||||
if half: model.half()
|
||||
segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
|
||||
|
||||
|
||||
##图像测试
|
||||
#url='images/examples/20220624_响水河_12300_1621.jpg'
|
||||
impth = 'images/examples/'
|
||||
outpth = 'images/results/'
|
||||
folders = os.listdir(impth)
|
||||
for i in range(len(folders)):
|
||||
imgpath = os.path.join(impth, folders[i])
|
||||
im0s=[cv2.imread(imgpath)]
|
||||
time00 = time.time()
|
||||
p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0)
|
||||
time11 = time.time()
|
||||
image_array = p_result[1]
|
||||
cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array )
|
||||
#print('----process:%s'%(folders[i]), (time.time() - time11) * 1000)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__=="__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
from DMPRUtils.DMPR_process import DMPR_process
|
||||
import tensorrt as trt
|
||||
import sys,os
|
||||
#from DMPRUtils.model.detector import DirectionalPointDetector
|
||||
from DMPRUtils.yolo_net import Model
|
||||
import torch
|
||||
|
||||
class DMPRModel(object):
|
||||
def __init__(self, weights=None,
|
||||
par={'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640}
|
||||
):
|
||||
|
||||
self.par = par
|
||||
self.device = 'cuda:0'
|
||||
self.half =True
|
||||
|
||||
if weights.endswith('.engine'):
|
||||
self.infer_type ='trt'
|
||||
elif weights.endswith('.pth') or weights.endswith('.pt') :
|
||||
self.infer_type ='pth'
|
||||
else:
|
||||
print('#########ERROR:',weights,': no registered inference type, exit')
|
||||
sys.exit(0)
|
||||
|
||||
if self.infer_type=='trt':
|
||||
logger = trt.Logger(trt.Logger.ERROR)
|
||||
with open(weights, "rb") as f, trt.Runtime(logger) as runtime:
|
||||
self.model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象
|
||||
elif self.infer_type=='pth':
|
||||
#self.model = DirectionalPointDetector(3, self.par['depth_factor'], self.par['NUM_FEATURE_MAP_CHANNEL']).to(self.device)
|
||||
confUrl = os.path.join( os.path.dirname(__file__),'DMPRUtils','config','yolov5s.yaml' )
|
||||
self.model = Model(confUrl, ch=3).to(self.device)
|
||||
self.model.load_state_dict(torch.load(weights))
|
||||
print('#######load pt model:%s success '%(weights))
|
||||
self.par['modelType']=self.infer_type
|
||||
print('#########加载模型:',weights,' 类型:',self.infer_type)
|
||||
def eval(self,image):
|
||||
det,timeInfos = DMPR_process(image, self.model, self.device, self.par)
|
||||
det = det.cpu().detach().numpy()
|
||||
return det,timeInfos
|
||||
|
||||
def get_ms(self,t1,t0):
|
||||
return (t1-t0)*1000.0
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,227 @@
|
|||
import math
|
||||
import os
|
||||
import time
|
||||
from collections import namedtuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from torchvision.transforms import ToTensor
|
||||
|
||||
from DMPRUtils.model import DirectionalPointDetector
|
||||
|
||||
from utils.datasets import letterbox
|
||||
from utils.general import clip_coords
|
||||
from utils.torch_utils import select_device
|
||||
#from DMPRUtils.trtUtils import TrtForwardCase
|
||||
#import segutils.trtUtils.segTrtForward as TrtForwardCase
|
||||
from segutils.trtUtils import segTrtForward
|
||||
MarkingPoint = namedtuple('MarkingPoint', ['x', 'y', 'direction', 'shape'])
|
||||
|
||||
|
||||
def plot_points(image, pred_points, line_thickness=3):
|
||||
"""Plot marking points on the image."""
|
||||
if len(pred_points):
|
||||
tl = line_thickness or round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1 # line/font thickness
|
||||
tf = max(tl - 1, 1) # font thickness
|
||||
for conf, *point in pred_points:
|
||||
p0_x, p0_y = int(point[0]), int(point[1])
|
||||
cos_val = math.cos(point[2])
|
||||
sin_val = math.sin(point[2])
|
||||
p1_x = int(p0_x + 20 * cos_val * tl)
|
||||
p1_y = int(p0_y + 20 * sin_val * tl)
|
||||
p2_x = int(p0_x - 10 * sin_val * tl)
|
||||
p2_y = int(p0_y + 10 * cos_val * tl)
|
||||
p3_x = int(p0_x + 10 * sin_val * tl)
|
||||
p3_y = int(p0_y - 10 * cos_val * tl)
|
||||
|
||||
cv2.line(image, (p0_x, p0_y), (p1_x, p1_y), (0, 0, 255), thickness=tl)
|
||||
cv2.putText(image, str(float(conf)), (p0_x, p0_y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), thickness=tf)
|
||||
if point[3] > 0.5:
|
||||
cv2.line(image, (p0_x, p0_y), (p2_x, p2_y), (0, 0, 255), thickness=tl)
|
||||
else:
|
||||
cv2.line(image, (p2_x, p2_y), (p3_x, p3_y), (0, 0, 255), thickness=tf)
|
||||
|
||||
|
||||
def preprocess_image(image):
|
||||
"""Preprocess numpy image to torch tensor."""
|
||||
if image.shape[0] != 640 or image.shape[1] != 640:
|
||||
image = cv2.resize(image, (640, 640))
|
||||
return torch.unsqueeze(ToTensor()(image), 0)
|
||||
|
||||
def non_maximum_suppression(pred_points):
|
||||
"""Perform non-maxmum suppression on marking points."""
|
||||
t1 = time.time()
|
||||
suppressed = [False] * len(pred_points)
|
||||
for i in range(len(pred_points) - 1):
|
||||
for j in range(i + 1, len(pred_points)):
|
||||
i_x = pred_points[i][1].x
|
||||
i_y = pred_points[i][1].y
|
||||
j_x = pred_points[j][1].x
|
||||
j_y = pred_points[j][1].y
|
||||
# 0.0625 = 1 / 16
|
||||
if abs(j_x - i_x) < 0.0625 and abs(j_y - i_y) < 0.0625:
|
||||
idx = i if pred_points[i][0] < pred_points[j][0] else j
|
||||
suppressed[idx] = True
|
||||
if any(suppressed):
|
||||
unsupres_pred_points = []
|
||||
for i, supres in enumerate(suppressed):
|
||||
if not supres:
|
||||
unsupres_pred_points.append(pred_points[i])
|
||||
return unsupres_pred_points
|
||||
t2 = time.time()
|
||||
print(f'nms: {t2 - t1:.3f}s')
|
||||
return pred_points
|
||||
|
||||
def ms(t2,t1):
|
||||
return ('%.1f '%( (t2-t1)*1000 ) )
|
||||
def get_predicted_points(prediction, thresh):
|
||||
"""Get marking points from one predicted feature map."""
|
||||
t1 = time.time()
|
||||
assert isinstance(prediction, torch.Tensor)
|
||||
|
||||
prediction = prediction.permute(1, 2, 0).contiguous() # prediction (20, 20, 6)
|
||||
height = prediction.shape[0]
|
||||
width = prediction.shape[1]
|
||||
j = torch.arange(prediction.shape[1], device=prediction.device).float().repeat(prediction.shape[0], 1).unsqueeze(dim=2)
|
||||
i = torch.arange(prediction.shape[0], device=prediction.device).float().view(prediction.shape[0], 1).repeat(1,prediction.shape[1]).unsqueeze(dim=2)
|
||||
prediction = torch.cat((prediction, j, i), dim=2).view(-1, 8).contiguous()
|
||||
t2 = time.time()
|
||||
|
||||
# 过滤小于thresh的置信度
|
||||
mask = prediction[..., 0] > thresh
|
||||
t3 = time.time()
|
||||
|
||||
prediction = prediction[mask]
|
||||
t4 = time.time()
|
||||
prediction[..., 2] = (prediction[..., 2] + prediction[..., 6]) / width
|
||||
prediction[..., 3] = (prediction[..., 3] + prediction[..., 7]) / height
|
||||
direction = torch.atan2(prediction[..., 5], prediction[..., 4])
|
||||
prediction = torch.stack((prediction[..., 0], prediction[..., 2], prediction[..., 3], direction, prediction[..., 1]), dim=1)
|
||||
t5 = time.time()
|
||||
timeInfo = 'rerange:%s scoreFilter:%s , getMask:%s stack:%s '%( ms(t2,t1),ms(t3,t2),ms(t4,t3),ms(t5,t4) )
|
||||
#print('-'*20,timeInfo)
|
||||
return prediction,timeInfo
|
||||
|
||||
def get_predicted_points_np(prediction, thresh):
|
||||
"""Get marking points from one predicted feature map."""
|
||||
t1 = time.time()
|
||||
prediction = prediction.permute(1, 2, 0).contiguous() # prediction (20, 20, 6)
|
||||
t1_1 = time.time()
|
||||
prediction = prediction.cpu().detach().numpy()
|
||||
t1_2 = time.time()
|
||||
height,width = prediction.shape[0:2]
|
||||
i,j = np.mgrid[0:height, 0:width]
|
||||
i = np.expand_dims(i,axis=2);j = np.expand_dims(j,axis=2)
|
||||
#print('##line112:',i.shape,j.shape,prediction.shape)
|
||||
prediction = np.concatenate( (prediction,i,j),axis=2 )
|
||||
prediction = prediction.reshape(-1,8)
|
||||
t2 = time.time()
|
||||
mask = prediction[..., 0] > thresh
|
||||
t3 = time.time()
|
||||
|
||||
prediction = prediction[mask]
|
||||
t4 = time.time()
|
||||
prediction[..., 2] = (prediction[..., 2] + prediction[..., 6]) / width
|
||||
prediction[..., 3] = (prediction[..., 3] + prediction[..., 7]) / height
|
||||
direction = np.arctan(prediction[..., 5:6], prediction[..., 4:5])
|
||||
#print('-'*20,prediction.shape,direction.shape)
|
||||
prediction = np.hstack((prediction[:, 0:1], prediction[:, 2:3], prediction[:, 3:4], direction, prediction[:, 1:2]))
|
||||
#print('-line126:','-'*20,type(prediction),prediction.shape)
|
||||
|
||||
t5 = time.time()
|
||||
|
||||
|
||||
timeInfo = 'permute:%s Tocpu:%s rerange:%s scoreFilter:%s , getMask:%s stack:%s '%( ms(t1_1,t1) , ms(t1_2,t1_1),ms(t2,t1_2),ms(t3,t2),ms(t4,t3),ms(t5,t4) )
|
||||
print('-'*20,timeInfo,prediction.shape)
|
||||
return prediction
|
||||
|
||||
|
||||
def detect_marking_points(detector, image, thresh, device,modelType='pth'):
|
||||
"""Given image read from opencv, return detected marking points."""
|
||||
t1 = time.time()
|
||||
|
||||
image_preprocess = preprocess_image(image).to(device)
|
||||
if modelType=='pth':
|
||||
prediction = detector(image_preprocess)
|
||||
#print(prediction)
|
||||
elif modelType=='trt':
|
||||
a=0
|
||||
prediction = segTrtForward(detector,[image_preprocess ])
|
||||
#print(prediction)
|
||||
|
||||
|
||||
torch.cuda.synchronize(device)
|
||||
t2 = time.time()
|
||||
|
||||
rets,timeInfo = get_predicted_points(prediction[0], thresh)
|
||||
string_t2 = ' infer:%s postprocess:%s'%(ms(t2,t1),timeInfo)
|
||||
|
||||
return rets
|
||||
|
||||
def scale_coords2(img1_shape, coords, img0_shape, ratio_pad=None):
|
||||
# Rescale coords (xy) from img1_shape to img0_shape
|
||||
if ratio_pad is None: # calculate from img0_shape
|
||||
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
||||
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
||||
else:
|
||||
gain = ratio_pad[0][0]
|
||||
pad = ratio_pad[1]
|
||||
# 百分比x,y转换为实际x,y
|
||||
height, width = img1_shape
|
||||
if isinstance(coords, torch.Tensor):
|
||||
coords[:, 0] = torch.round(width * coords[:, 0] - 0.5)
|
||||
coords[:, 1] = torch.round(height * coords[:, 1] - 0.5)
|
||||
else:
|
||||
coords[:, 0] = (width * coords[:, 0] + 0.5).astype(np.int32)
|
||||
coords[:, 1] = (height * coords[:, 1] + 0.5).astype(np.int32)
|
||||
|
||||
coords[:, 0] -= pad[0] # x padding
|
||||
coords[:, 1] -= pad[1] # y padding
|
||||
coords[:, :3] /= gain
|
||||
#恢复成原始图片尺寸
|
||||
if isinstance(coords, torch.Tensor):
|
||||
coords[:, 0].clamp_(0, img0_shape[1])
|
||||
coords[:, 1].clamp_(0, img0_shape[0])
|
||||
else:
|
||||
coords[:, 0] = np.clip( coords[:, 0], 0,img0_shape[1] )
|
||||
coords[:, 1] = np.clip( coords[:, 1], 0,img0_shape[0] )
|
||||
|
||||
return coords
|
||||
|
||||
|
||||
def DMPR_process(img0, model, device, DMPRmodelPar):
|
||||
t0 = time.time()
|
||||
height, width, _ = img0.shape
|
||||
|
||||
img, ratio, (dw, dh) = letterbox(img0, DMPRmodelPar['dmprimg_size'], auto=False)
|
||||
t1 = time.time()
|
||||
det = detect_marking_points(model, img, DMPRmodelPar['dmpr_thresh'], device,modelType=DMPRmodelPar['modelType'])
|
||||
t2 = time.time()
|
||||
if len(det):
|
||||
det[:, 1:3] = scale_coords2(img.shape[:2], det[:, 1:3], img0.shape)
|
||||
|
||||
t3 = time.time()
|
||||
timeInfos = 'dmpr:%1.f (lettbox:%.1f dectect:%.1f scaleBack:%.1f) '%( (t3-t0)*1000,(t1-t0)*1000,(t2-t1)*1000,(t3-t2)*1000, )
|
||||
return det,timeInfos
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
impath = r'I:\zjc\weiting1\Images'
|
||||
file = 'DJI_0001_8.jpg'
|
||||
imgpath = os.path.join(impath, file)
|
||||
img0 = cv2.imread(imgpath)
|
||||
|
||||
device_ = '0'
|
||||
device = select_device(device_)
|
||||
args = config.get_parser_for_inference().parse_args()
|
||||
model = DirectionalPointDetector(3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device)
|
||||
weights = r"E:\pycharmProject\DMPR-PS\weights\dp_detector_499.pth"
|
||||
model.load_state_dict(torch.load(weights))
|
||||
|
||||
det = DMPR_process(img0, model, device, args)
|
||||
|
||||
plot_points(img0, det)
|
||||
|
||||
cv2.imwrite(file, img0, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 9, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
||||
[-1, 3, C3, [1024, False]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.67 # model depth multiple
|
||||
width_multiple: 0.75 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 9, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
||||
[-1, 3, C3, [1024, False]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.50 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 9, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
||||
[-1, 3, C3, [1024, False]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
# parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.33 # model depth multiple
|
||||
width_multiple: 1.25 # layer channel multiple
|
||||
|
||||
# anchors
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Focus, [64, 3]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 9, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 1, SPP, [1024, [5, 9, 13]]],
|
||||
[-1, 3, C3, [1024, False]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import time
|
||||
|
||||
|
||||
def dmpr_yolo( yolo_det, dmpr_det,pars):
|
||||
#if len(yolo_det)==0 or len(dmpr_det)==0:
|
||||
|
||||
#print('line11:\n',yolo_det, dmpr_det,pars)
|
||||
time1=time.time()
|
||||
if len(yolo_det)==0:
|
||||
return yolo_det,' No yolo detections'
|
||||
|
||||
img_shape = (pars['imgSize'][1],pars['imgSize'][0])
|
||||
cls = pars['carCls']; scaleRatio = pars['scaleRatio']
|
||||
illParkCls = pars['illCls'];border = pars['border']
|
||||
|
||||
yolo_det = np.array(yolo_det)
|
||||
yolo_det_0 = yolo_det.copy()
|
||||
|
||||
|
||||
|
||||
|
||||
#print('-'*10,'line17:',yolo_det_0)
|
||||
|
||||
# 过滤在图像边界的box(防止出现类似一小半车辆的情况)
|
||||
|
||||
x_c = (yolo_det[:, 0] + yolo_det[:, 2]) / 2
|
||||
y_c = (yolo_det[:, 1] + yolo_det[:, 3]) / 2
|
||||
tmp = (x_c >= border) & (x_c <= (img_shape[1] - border)) & (y_c >= border) & (y_c <= (img_shape[0] - border))
|
||||
yolo_det = yolo_det[tmp]
|
||||
|
||||
|
||||
# 创建yolo_det_clone内容为x1, y1, x2, y2, conf, cls, unlabel (unlabel代表该类是否需要忽略,0:不忽略 其他:忽略)
|
||||
yolo_det_clone = yolo_det.copy()
|
||||
tmp_0_tensor = np.zeros([len(yolo_det), 1])
|
||||
yolo_det_clone = np.concatenate([yolo_det_clone, tmp_0_tensor], axis=1)
|
||||
|
||||
# cls为需要计算的类别
|
||||
yolo_det = yolo_det[yolo_det[:, -1] == cls]
|
||||
|
||||
# new_yolo_det为膨胀后数据,内容为x1, y1, x2, y2, flag (flag代表膨胀后车位内是否包含角点 且 与角点方向差值小于90度, 其值为第一个满足条件的角点索引)
|
||||
new_yolo_det = np.zeros([len(yolo_det), 7])
|
||||
|
||||
# yolo框膨胀,长的边两边各膨胀0.4倍总长,短的边两边各膨胀0.2倍总长
|
||||
x_length = yolo_det[:, 2] - yolo_det[:, 0] #x2-x1
|
||||
y_length = yolo_det[:, 3] - yolo_det[:, 1] #y2-y1
|
||||
|
||||
# x, y哪个方向差值大哪个方向膨胀的多
|
||||
x_dilate_coefficient = ((x_length > y_length) + 1)*scaleRatio
|
||||
y_dilate_coefficient = ((~(x_length > y_length)) + 1)*scaleRatio
|
||||
|
||||
# 原始框中心点x_c, y_c
|
||||
new_yolo_det[:, 5] = (yolo_det[:, 0] + yolo_det[:, 2]) / 2
|
||||
new_yolo_det[:, 6] = (yolo_det[:, 1] + yolo_det[:, 3]) / 2
|
||||
|
||||
# 膨胀
|
||||
new_yolo_det[:, 0] = np.round(yolo_det[:, 0] - x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x1 膨胀
|
||||
new_yolo_det[:, 1] = np.round(yolo_det[:, 1] - y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y1 膨胀
|
||||
new_yolo_det[:, 2] = np.round(yolo_det[:, 2] + x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x2 膨胀
|
||||
new_yolo_det[:, 3] = np.round(yolo_det[:, 3] + y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y2 膨胀
|
||||
|
||||
m, n = new_yolo_det.size, dmpr_det.size
|
||||
|
||||
if not m or not n:
|
||||
#print('##line47 original yolo_det_clone:',yolo_det_clone)
|
||||
yolo_det_clone[np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls),-2] = illParkCls
|
||||
|
||||
#yolo_det_clone[yolo_det_clone[:, -1] == 0 & yolo_det_clone[:, -2==cls] , -2] = illParkCls
|
||||
return yolo_det_clone[:,0:6], ' no cars or T/L corners'
|
||||
|
||||
new_yolo = new_yolo_det[:, np.newaxis, :].repeat(dmpr_det.shape[0], 1) # 扩展为 (m , n, 5)
|
||||
dmpr_det = dmpr_det[np.newaxis, ...].repeat(new_yolo_det.shape[0], 0)
|
||||
yolo_dmpr = np.concatenate((new_yolo, dmpr_det), axis=2) # (m, n, 10)
|
||||
|
||||
x_p, y_p = yolo_dmpr[..., 8], yolo_dmpr[..., 9]
|
||||
x1, y1, x2, y2 = yolo_dmpr[..., 0], yolo_dmpr[..., 1], yolo_dmpr[..., 2], yolo_dmpr[..., 3]
|
||||
x_c, y_c = yolo_dmpr[..., 5], yolo_dmpr[..., 6]
|
||||
|
||||
direction1 = np.arctan2(y_c - y_p, x_c - x_p) / math.pi * 180
|
||||
direction2 = yolo_dmpr[..., 10] / math.pi * 180
|
||||
direction3 = direction2 + 90 # L形角点另外一个方向
|
||||
direction3[direction3 > 180] -= 360
|
||||
ang_diff = direction1 - direction2
|
||||
ang_diff2 = direction1 - direction3
|
||||
|
||||
# 判断膨胀后yolo框包含角点关系 && 包含角点的时候计算水平框中心点与角点的角度关系
|
||||
# direction ∈ (-180, 180) 若角差大于180,需算补角
|
||||
# T形角点比较一个方向,L形角点比较两个方向
|
||||
mask = (x_p >= x1) & (x_p <= x2) & (y_p >= y1) & (y_p <= y2) & \
|
||||
(((yolo_dmpr[..., 11] <= 0.5) & # T形角点情况
|
||||
(((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) |
|
||||
(((ang_diff) < -180) & ((360 + ang_diff) <= 90)))) |
|
||||
((yolo_dmpr[..., 11] > 0.5) & # L形角点情况
|
||||
(((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) |
|
||||
(((ang_diff) < -180) & ((360 + ang_diff) <= 90))) &
|
||||
(((ang_diff2 >= -90) & (ang_diff2 <= 90)) | ((ang_diff2 > 180) & ((360 - ang_diff2) <= 90)) |
|
||||
(((ang_diff2) < -180) & ((360 + ang_diff2) <= 90)))))
|
||||
|
||||
res = np.sum(mask, axis=1)
|
||||
|
||||
yolo_det_clone[yolo_det_clone[:, -2] == cls, -1] = res
|
||||
#print('##line69 original yolo_det_clone:',yolo_det_clone)
|
||||
#yolo_det_clone[yolo_det_clone[:, -1] == 0, -2] = illParkCls
|
||||
|
||||
#print('-'*20,'--line78',yolo_det_clone)
|
||||
yolo_det_clone[ np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls) ,-2 ] = illParkCls
|
||||
#print('-'*20,'--line80:',yolo_det_clone)
|
||||
yolo_det_clone = yolo_det_clone[:,0:6]
|
||||
time2=time.time()
|
||||
|
||||
return np.array(yolo_det_clone), 'dmpr_yolo:%.1f'%( (time2-time1)*1000 )
|
||||
def stdc_yolo(stdc_det, yolo_det,pars):
|
||||
|
||||
is_car = yolo_det[:, -1] == pars['carCls'] # 获取最后一列,判断是否等于0
|
||||
car = yolo_det[is_car] # 筛选出最后一列等于0的行
|
||||
no_car = yolo_det[~is_car] # 筛选出最后一列不等于0的行
|
||||
|
||||
im = np.uint8(stdc_det)
|
||||
x_c = ((car[:, 0] + car[:, 2]) // 2).astype(int)
|
||||
y_c = ((car[:, 1] + car[:, 3]) // 2).astype(int)
|
||||
car_filted = car[im[y_c, x_c] == 0]
|
||||
#yolo_filted = yolo_det
|
||||
|
||||
yolo_filted = np.concatenate((car_filted, no_car), axis=0)
|
||||
return yolo_filted
|
||||
|
||||
def dmpr_yolo_stdc(predsList,pars):
|
||||
if len(predsList)==2:
|
||||
yolo_det, dmpr_det = predsList[0:2]
|
||||
else:
|
||||
yolo_det, dmpr_det,stdc_det = predsList[0:3]
|
||||
if len(yolo_det)==0:
|
||||
return yolo_det,' No yolo detections'
|
||||
if isinstance(yolo_det,list):
|
||||
yolo_det = np.array(yolo_det)
|
||||
if len(predsList)>2:
|
||||
yolo_det = stdc_yolo(stdc_det, yolo_det,pars)
|
||||
|
||||
rets = dmpr_yolo(yolo_det, dmpr_det,pars)
|
||||
for i,ret in enumerate(rets[0]):
|
||||
#print(ret,'\n ',rets,pars['classReindex'])
|
||||
ret[5] = pars['classReindex'][ret[5]]
|
||||
#rets[i][5] = pars['classReindex'][ret[5]]
|
||||
|
||||
return rets
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
"""Network model related package."""
|
||||
from .detector import DirectionalPointDetector
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
"""Defines the detector network structure."""
|
||||
import torch
|
||||
from torch import nn
|
||||
from DMPRUtils.model.network import define_halve_unit, define_detector_block
|
||||
|
||||
|
||||
class YetAnotherDarknet(nn.modules.Module):
|
||||
"""Yet another darknet, imitating darknet-53 with depth of darknet-19."""
|
||||
def __init__(self, input_channel_size, depth_factor):
|
||||
super(YetAnotherDarknet, self).__init__()
|
||||
layers = []
|
||||
# 0
|
||||
layers += [nn.Conv2d(input_channel_size, depth_factor, kernel_size=3,
|
||||
stride=1, padding=1, bias=False)]
|
||||
layers += [nn.BatchNorm2d(depth_factor)]
|
||||
layers += [nn.LeakyReLU(0.1)]
|
||||
# 1
|
||||
layers += define_halve_unit(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
# 2
|
||||
depth_factor *= 2
|
||||
layers += define_halve_unit(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
# 3
|
||||
depth_factor *= 2
|
||||
layers += define_halve_unit(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
# 4
|
||||
depth_factor *= 2
|
||||
layers += define_halve_unit(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
# 5
|
||||
depth_factor *= 2
|
||||
layers += define_halve_unit(depth_factor)
|
||||
layers += define_detector_block(depth_factor)
|
||||
self.model = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, *x):
|
||||
return self.model(x[0])
|
||||
|
||||
|
||||
class DirectionalPointDetector(nn.modules.Module):
|
||||
"""Detector for point with direction."""
|
||||
def __init__(self, input_channel_size, depth_factor, output_channel_size):
|
||||
super(DirectionalPointDetector, self).__init__()
|
||||
self.extract_feature = YetAnotherDarknet(input_channel_size,
|
||||
depth_factor)
|
||||
layers = []
|
||||
layers += define_detector_block(16 * depth_factor)
|
||||
layers += define_detector_block(16 * depth_factor)
|
||||
layers += [nn.Conv2d(32 * depth_factor, output_channel_size,
|
||||
kernel_size=1, stride=1, padding=0, bias=False)]
|
||||
self.predict = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, *x):
|
||||
prediction = self.predict(self.extract_feature(x[0]))
|
||||
# 4 represents that there are 4 value: confidence, shape, offset_x,
|
||||
# offset_y, whose range is between [0, 1].
|
||||
point_pred, angle_pred = torch.split(prediction, 4, dim=1)
|
||||
point_pred = torch.sigmoid(point_pred)
|
||||
angle_pred = torch.tanh(angle_pred)
|
||||
return torch.cat((point_pred, angle_pred), dim=1)
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
"""Universal network struture unit definition."""
|
||||
from torch import nn
|
||||
|
||||
|
||||
def define_squeeze_unit(basic_channel_size):
|
||||
"""Define a 1x1 squeeze convolution with norm and activation."""
|
||||
conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1,
|
||||
stride=1, padding=0, bias=False)
|
||||
norm = nn.BatchNorm2d(basic_channel_size)
|
||||
relu = nn.LeakyReLU(0.1)
|
||||
layers = [conv, norm, relu]
|
||||
return layers
|
||||
|
||||
|
||||
def define_expand_unit(basic_channel_size):
|
||||
"""Define a 3x3 expand convolution with norm and activation."""
|
||||
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3,
|
||||
stride=1, padding=1, bias=False)
|
||||
norm = nn.BatchNorm2d(2 * basic_channel_size)
|
||||
relu = nn.LeakyReLU(0.1)
|
||||
layers = [conv, norm, relu]
|
||||
return layers
|
||||
|
||||
|
||||
def define_halve_unit(basic_channel_size):
|
||||
"""Define a 4x4 stride 2 expand convolution with norm and activation."""
|
||||
conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4,
|
||||
stride=2, padding=1, bias=False)
|
||||
norm = nn.BatchNorm2d(2 * basic_channel_size)
|
||||
relu = nn.LeakyReLU(0.1)
|
||||
layers = [conv, norm, relu]
|
||||
return layers
|
||||
|
||||
|
||||
def define_depthwise_expand_unit(basic_channel_size):
|
||||
"""Define a 3x3 expand convolution with norm and activation."""
|
||||
conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size,
|
||||
kernel_size=1, stride=1, padding=0, bias=False)
|
||||
norm1 = nn.BatchNorm2d(2 * basic_channel_size)
|
||||
relu1 = nn.LeakyReLU(0.1)
|
||||
conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3,
|
||||
stride=1, padding=1, bias=False, groups=2 * basic_channel_size)
|
||||
norm2 = nn.BatchNorm2d(2 * basic_channel_size)
|
||||
relu2 = nn.LeakyReLU(0.1)
|
||||
layers = [conv1, norm1, relu1, conv2, norm2, relu2]
|
||||
return layers
|
||||
|
||||
|
||||
def define_detector_block(basic_channel_size):
|
||||
"""Define a unit composite of a squeeze and expand unit."""
|
||||
layers = []
|
||||
layers += define_squeeze_unit(basic_channel_size)
|
||||
layers += define_expand_unit(basic_channel_size)
|
||||
return layers
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
import time,argparse
|
||||
import cv2
|
||||
import torch
|
||||
import sys
|
||||
sys.path.extend(['..' ])
|
||||
from DMPRUtils.model.detector import DirectionalPointDetector
|
||||
from pathlib import Path
|
||||
from segutils.trtUtils import toONNX,ONNXtoTrt
|
||||
from DMPRUtils.yolo_net import Model
|
||||
|
||||
def main(opt):
|
||||
|
||||
pars={'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640,
|
||||
'mWidth':640,'mHeight':640
|
||||
}
|
||||
|
||||
##以下参数目前不可改
|
||||
#DMPRweights = "weights/urbanManagement/DMPR/dp_detector_499.pth"
|
||||
|
||||
DMPRweights = opt.weights.strip()
|
||||
DMPR_pthFile = Path(DMPRweights)
|
||||
inputShape =(1, 3, pars['mHeight'],pars['mWidth'])#(bs,channels,height,width)
|
||||
DMPR_onnxFile = str(DMPR_pthFile.with_suffix('.onnx'))
|
||||
DMPR_trtFile = DMPR_onnxFile.replace('.onnx','.engine' )
|
||||
|
||||
|
||||
##加载模型,准备好显示字符
|
||||
device = 'cuda:0'
|
||||
|
||||
# DMPR model
|
||||
#DMPRmodel = DirectionalPointDetector(3, pars['depth_factor'], pars['NUM_FEATURE_MAP_CHANNEL']).to(device)
|
||||
confUrl = os.path.join( os.path.dirname(__file__),'config','yolov5s.yaml' )
|
||||
DMPRmodel = Model(confUrl, ch=3).to(device)
|
||||
|
||||
|
||||
DMPRmodel.load_state_dict(torch.load(DMPRweights))
|
||||
|
||||
toONNX(DMPRmodel,DMPR_onnxFile,inputShape=inputShape,device=device,dynamic=True)
|
||||
ONNXtoTrt(DMPR_onnxFile,DMPR_trtFile)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--weights', type=str, default='/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/DMPR/dp_detector_499.pth', help='model path(s)')
|
||||
opt = parser.parse_args()
|
||||
|
||||
main(opt)
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
weights=/mnt/thsw2/DSP2/weights/cityMangement3/dmpr
|
||||
#weights=/mnt/thsw2/DSP2/weights/cityMangement2_0916/weights/urbanManagement/DMPR/dp_detector_299
|
||||
gpu=2080Ti
|
||||
python toTrt.py --weights ${weights}.pth
|
||||
mv ${weights}.engine ${weights}_${gpu}.engine
|
||||
|
|
@ -0,0 +1,285 @@
|
|||
# YOLOv5 YOLO-specific modules
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
|
||||
sys.path.append('./') # to run '$ python *.py' files in subdirectories
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from models.common import *
|
||||
from models.experimental import *
|
||||
from utils.autoanchor import check_anchor_order
|
||||
from utils.general import make_divisible, check_file, set_logging
|
||||
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
|
||||
select_device, copy_attr
|
||||
|
||||
try:
|
||||
import thop # for FLOPS computation
|
||||
except ImportError:
|
||||
thop = None
|
||||
|
||||
|
||||
class Detect(nn.Module):
|
||||
stride = None # strides computed during build
|
||||
export = False # onnx export
|
||||
|
||||
def __init__(self, nc=80, anchors=(), ch=()): # detection layers
|
||||
super(Detect, self).__init__()
|
||||
self.no = 6
|
||||
self.nl = 3
|
||||
self.na = len(anchors[0]) // 2 # number of anchors
|
||||
self.grid = [torch.zeros(1)] * self.nl # init grid
|
||||
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
|
||||
self.register_buffer('anchors', a) # shape(nl,na,2)
|
||||
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
|
||||
self.m = nn.ModuleList(nn.Conv2d(x, self.no, 1) for x in ch) # output conv
|
||||
|
||||
def forward(self, x):
|
||||
# x = x.copy() # for profiling
|
||||
# z = [] # inference output
|
||||
# # self.training |= self.export
|
||||
# for i in range(self.nl):
|
||||
# x[i] = self.m[i](x[i]) # conv
|
||||
# bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
||||
# x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
||||
#
|
||||
# if not self.training: # inference
|
||||
# if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
||||
# self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
||||
#
|
||||
# y = x[i].sigmoid()
|
||||
# y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
|
||||
# y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
||||
# z.append(y.view(bs, -1, self.no))
|
||||
|
||||
prediction = self.m[1](x[1]) #40*40
|
||||
#prediction = self.m[0](x[0]) #80*80
|
||||
point_pred, angle_pred = torch.split(prediction, 4, dim=1)
|
||||
point_pred = torch.sigmoid(point_pred)
|
||||
angle_pred = torch.tanh(angle_pred)
|
||||
|
||||
return torch.cat((point_pred, angle_pred), dim=1)
|
||||
|
||||
@staticmethod
|
||||
def _make_grid(nx=20, ny=20):
|
||||
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
||||
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
||||
|
||||
|
||||
class Model(nn.Module):
|
||||
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
|
||||
super(Model, self).__init__()
|
||||
if isinstance(cfg, dict):
|
||||
self.yaml = cfg # model dict
|
||||
else: # is *.yaml
|
||||
import yaml # for torch hub
|
||||
self.yaml_file = Path(cfg).name
|
||||
with open(cfg) as f:
|
||||
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
|
||||
|
||||
# Define model
|
||||
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
|
||||
if nc and nc != self.yaml['nc']:
|
||||
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
if anchors:
|
||||
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
|
||||
self.yaml['anchors'] = round(anchors) # override yaml value
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
||||
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
|
||||
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
|
||||
|
||||
# Build strides, anchors
|
||||
# m = self.model[-1] # Detect()
|
||||
# if isinstance(m, Detect):
|
||||
# s = 256 # 2x min stride
|
||||
# m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
|
||||
# m.anchors /= m.stride.view(-1, 1, 1)
|
||||
# check_anchor_order(m)
|
||||
# self.stride = m.stride
|
||||
# self._initialize_biases() # only run once
|
||||
# print('Strides: %s' % m.stride.tolist())
|
||||
|
||||
# Init weights, biases
|
||||
initialize_weights(self)
|
||||
self.info()
|
||||
logger.info('')
|
||||
|
||||
def forward(self, x, augment=False, profile=False):
|
||||
if augment:
|
||||
img_size = x.shape[-2:] # height, width
|
||||
s = [1, 0.83, 0.67] # scales
|
||||
f = [None, 3, None] # flips (2-ud, 3-lr)
|
||||
y = [] # outputs
|
||||
for si, fi in zip(s, f):
|
||||
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
|
||||
yi = self.forward_once(xi)[0] # forward
|
||||
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
|
||||
yi[..., :4] /= si # de-scale
|
||||
if fi == 2:
|
||||
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
|
||||
elif fi == 3:
|
||||
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
|
||||
y.append(yi)
|
||||
return torch.cat(y, 1), None # augmented inference, train
|
||||
else:
|
||||
return self.forward_once(x, profile) # single-scale inference, train
|
||||
|
||||
def forward_once(self, x, profile=False):
|
||||
y, dt = [], [] # outputs
|
||||
for m in self.model:
|
||||
if m.f != -1: # if not from previous layer
|
||||
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
||||
|
||||
if profile:
|
||||
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
|
||||
t = time_synchronized()
|
||||
for _ in range(10):
|
||||
_ = m(x)
|
||||
dt.append((time_synchronized() - t) * 100)
|
||||
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
|
||||
|
||||
x = m(x) # run
|
||||
y.append(x if m.i in self.save else None) # save output
|
||||
|
||||
if profile:
|
||||
print('%.1fms total' % sum(dt))
|
||||
return x
|
||||
|
||||
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
||||
# https://arxiv.org/abs/1708.02002 section 3.3
|
||||
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi, s in zip(m.m, m.stride): # from
|
||||
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
||||
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
||||
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
|
||||
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
||||
|
||||
def _print_biases(self):
|
||||
m = self.model[-1] # Detect() module
|
||||
for mi in m.m: # from
|
||||
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
|
||||
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
|
||||
|
||||
# def _print_weights(self):
|
||||
# for m in self.model.modules():
|
||||
# if type(m) is Bottleneck:
|
||||
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
|
||||
|
||||
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
||||
print('Fusing layers... ')
|
||||
for m in self.model.modules():
|
||||
if type(m) is Conv and hasattr(m, 'bn'):
|
||||
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
||||
delattr(m, 'bn') # remove batchnorm
|
||||
m.forward = m.fuseforward # update forward
|
||||
self.info()
|
||||
return self
|
||||
|
||||
def nms(self, mode=True): # add or remove NMS module
|
||||
present = type(self.model[-1]) is NMS # last layer is NMS
|
||||
if mode and not present:
|
||||
print('Adding NMS... ')
|
||||
m = NMS() # module
|
||||
m.f = -1 # from
|
||||
m.i = self.model[-1].i + 1 # index
|
||||
self.model.add_module(name='%s' % m.i, module=m) # add
|
||||
self.eval()
|
||||
elif not mode and present:
|
||||
print('Removing NMS... ')
|
||||
self.model = self.model[:-1] # remove
|
||||
return self
|
||||
|
||||
def autoshape(self): # add autoShape module
|
||||
print('Adding autoShape... ')
|
||||
m = autoShape(self) # wrap model
|
||||
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
|
||||
return m
|
||||
|
||||
def info(self, verbose=False, img_size=640): # print model information
|
||||
model_info(self, verbose, img_size)
|
||||
|
||||
|
||||
def parse_model(d, ch): # model_dict, input_channels(3)
|
||||
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
|
||||
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
||||
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
||||
# no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
||||
no = 6
|
||||
|
||||
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
||||
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
||||
m = eval(m) if isinstance(m, str) else m # eval strings
|
||||
for j, a in enumerate(args):
|
||||
try:
|
||||
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
||||
except:
|
||||
pass
|
||||
|
||||
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
||||
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
|
||||
C3, C3TR]:
|
||||
c1, c2 = ch[f], args[0]
|
||||
if c2 != no: # if not output
|
||||
c2 = make_divisible(c2 * gw, 8)
|
||||
|
||||
args = [c1, c2, *args[1:]]
|
||||
if m in [BottleneckCSP, C3, C3TR]:
|
||||
args.insert(2, n) # number of repeats
|
||||
n = 1
|
||||
elif m is nn.BatchNorm2d:
|
||||
args = [ch[f]]
|
||||
elif m is Concat:
|
||||
c2 = sum([ch[x] for x in f])
|
||||
elif m is Detect:
|
||||
args.append([ch[x] for x in f])
|
||||
if isinstance(args[1], int): # number of anchors
|
||||
args[1] = [list(range(args[1] * 2))] * len(f)
|
||||
elif m is Contract:
|
||||
c2 = ch[f] * args[0] ** 2
|
||||
elif m is Expand:
|
||||
c2 = ch[f] // args[0] ** 2
|
||||
else:
|
||||
c2 = ch[f]
|
||||
|
||||
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
|
||||
t = str(m)[8:-2].replace('__main__.', '') # module type
|
||||
np = sum([x.numel() for x in m_.parameters()]) # number params
|
||||
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
||||
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
|
||||
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
||||
layers.append(m_)
|
||||
if i == 0:
|
||||
ch = []
|
||||
ch.append(c2)
|
||||
return nn.Sequential(*layers), sorted(save)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
opt = parser.parse_args()
|
||||
opt.cfg = check_file(opt.cfg) # check file
|
||||
set_logging()
|
||||
device = select_device(opt.device)
|
||||
|
||||
# Create model
|
||||
model = Model(opt.cfg).to(device)
|
||||
model.train()
|
||||
|
||||
# Profile
|
||||
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
||||
# y = model(img, profile=True)
|
||||
|
||||
# Tensorboard
|
||||
# from torch.utils.tensorboard import SummaryWriter
|
||||
# tb_writer = SummaryWriter()
|
||||
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
|
||||
# tb_writer.add_graph(model.model, img) # add model to tensorboard
|
||||
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["钓鱼","游泳"],
|
||||
"labelIndexs":["SL01","SL02"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames_实际":["国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ],
|
||||
"labelnames":[ "国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ],
|
||||
"labelIndexs":["SL040", "SL041","SL042","SL043","SL044"]
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["人"],
|
||||
"labelIndexs":["SL031"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
#检测混合模型
|
||||
for bus in highWay2 river2 drowning noParking river
|
||||
do
|
||||
diff /mnt/thsw2/DSP2/weights/${bus}/yolov5.pt ${bus}/yolov5.pt
|
||||
diff /mnt/thsw2/DSP2/weights/${bus}/stdc_360X640.pth ${bus}/stdc_360X640.pth
|
||||
done
|
||||
#检查检测模型
|
||||
for bus in forest2 vehicle pedestrian smogfire AnglerSwimmer countryRoad cityMangement
|
||||
do
|
||||
diff /mnt/thsw2/DSP2/weights/${bus}/yolov5.pt ${bus}/yolov5.pt
|
||||
done
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["车辆","垃圾","商贩"],
|
||||
"labelIndexs":["SL01","SL02","SL03"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["车辆","垃圾","商贩","违停"],
|
||||
"labelIndexs":["SL01","SL02","SL03","SL04"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"score_byClass":{"0":0.88,"1":0.3,"2":0.3,"3":0.3 } ,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["车辆","垃圾","商贩","违停","占道经营","裸土"],
|
||||
"labelIndexs":["SL01","SL02","SL03","SL04"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"score_byClass":{"0":0.88,"1":0.3,"2":0.3,"3":0.3 } ,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames":["护栏","交通标志","非交通标志","施工","施工"],
|
||||
"labelIndexs":["SL01","SL02","SL03","SL04","SL05"],
|
||||
"labelnamesActual":["护栏","交通标志","非交通标志","锥桶","水马" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["违法种植"],
|
||||
"labelIndexs":["SL01"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":[ "纵向裂缝","横向裂缝","网状裂缝" ],
|
||||
"labelIndexs":["SL01","SL02","SL03"]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
crack_yolov5_202302.pt对应类别['pedestrian', 'vehicle', 'D00', 'D10', 'Repair', 'D20', 'D40', 'Block crack', 'JiShui']
|
||||
roaddamage20231028.pt对应类别[ 'D00','D10','D20','D40','D44','D50','Repair','D43','D01','D11','D0w0','Block crack' ]
|
||||
[ 'D00':纵向裂缝,
|
||||
'D10':横向裂缝,
|
||||
'D20':网状裂缝 ]
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["人头"],
|
||||
"labelIndexs":["SL01"]
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
|
||||
"labelnames":[ "人头","人","船只" ],
|
||||
"labelIndexs":[ "SL001","SL002","SL003" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["烟花"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["林斑","病死树"],
|
||||
"labelIndexs":["SL031","SL032"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["林斑","病死树","行人","火焰","烟雾","云朵"],
|
||||
"labelIndexs":["SL031","SL032","SL033","SL034","SL035","SL036","SL037"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["林斑","病死树","行人","火焰","烟雾","人群"],
|
||||
"labelIndexs":["SL031","SL032","SL033","SL034","SL035","SL036","SL037"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
name,r,g,b,cls
|
||||
0,0,0,0,bg
|
||||
1,128,0,0,road
|
||||
2,0,128,0,vehicle
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
|
||||
"labelnames":["行人","车辆","裂缝","裂缝","修补","裂缝","坑槽","裂缝","积水", "影子","事故"],
|
||||
"labelnames_实际":["行人","车辆","纵向裂缝","横向裂缝","修补","网状裂纹","坑槽","块状裂纹","积水","影子","事故"],
|
||||
"labelIndexs":["SL01","SL02","SL03","SL04","SL05","SL06","SL007","SL008","SL009","SL010","SL011" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["危化品","罐体","危险标识","普通车"]
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"post_process":{ "name":"post_process","conf_thres":0.88,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["抛洒物","车辆"]
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.5,"iou_thres":0.25,"classes":3,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
|
||||
"labelnames":[ "车","T角点","L角点","违停" ],
|
||||
"labelIndexs":[ "SL001","SL002","SL003","SL004" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["行人"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":1,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["火","烟"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":1,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
Binary file not shown.
|
|
@ -0,0 +1,5 @@
|
|||
name,cls
|
||||
背景,0
|
||||
道路,1
|
||||
车道线,2
|
||||
车辆,3
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
|
||||
"labelnames":[ "车辆","违停" ],
|
||||
"labelIndexs":[ "SL001","SL002" ]
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
GPUID: 0
|
||||
WORKERS: 1
|
||||
PRINT_FREQ: 10
|
||||
SAVE_FREQ: 10
|
||||
PIN_MEMORY: False
|
||||
OUTPUT_DIR: 'output'
|
||||
|
||||
CUDNN:
|
||||
BENCHMARK: True
|
||||
DETERMINISTIC: False
|
||||
ENABLED: True
|
||||
|
||||
DATASET:
|
||||
DATASET: 360CC
|
||||
ROOT: "../textGenerator/dataset/dataset9/images"
|
||||
CHAR_FILE: '../textGenerator/dataset/dataset9/chars.txt'
|
||||
JSON_FILE: {'train': '../textGenerator/dataset/dataset9/train.txt', 'val': '../textGenerator/dataset/dataset9/val.txt'}
|
||||
# JSON_FILE: {'train': 'H:/DL-DATASET/360M/train.txt', 'val': 'H:/DL-DATASET/360M/test.txt'}
|
||||
SCALE_FACTOR: 0.25
|
||||
ROT_FACTOR: 30
|
||||
STD: 0.193
|
||||
MEAN: 0.588
|
||||
ALPHABETS: ''
|
||||
|
||||
TRAIN:
|
||||
BATCH_SIZE_PER_GPU: 32
|
||||
SHUFFLE: True
|
||||
BEGIN_EPOCH: 0
|
||||
END_EPOCH: 100
|
||||
RESUME:
|
||||
IS_RESUME: False
|
||||
FILE: 'output/360CC/crnn/2023-04-27-13-01/checkpoints/checkpoint_99_acc_0.5030.pth'
|
||||
OPTIMIZER: 'adam'
|
||||
LR: 0.0001
|
||||
WD: 0.0
|
||||
LR_STEP: [60, 80]
|
||||
LR_FACTOR: 0.1
|
||||
MOMENTUM: 0.0
|
||||
NESTEROV: False
|
||||
RMSPROP_ALPHA:
|
||||
RMSPROP_CENTERED:
|
||||
FINETUNE:
|
||||
IS_FINETUNE: False
|
||||
FINETUNE_CHECKPOINIT: 'output/checkpoints/mixed_second_finetune_acc_97P7.pth'
|
||||
FREEZE: true
|
||||
|
||||
TEST:
|
||||
BATCH_SIZE_PER_GPU: 16
|
||||
SHUFFLE: True # for random test rather than test on the whole validation set
|
||||
NUM_TEST_BATCH: 1000
|
||||
NUM_TEST_DISP: 10
|
||||
|
||||
MODEL:
|
||||
NAME: 'crnn'
|
||||
IMAGE_SIZE:
|
||||
OW: 160 # origial width: 280
|
||||
H: 32
|
||||
W: 160 # resized width: 160
|
||||
NUM_CLASSES: 0
|
||||
NUM_HIDDEN: 256
|
||||
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,92 @@
|
|||
a
|
||||
b
|
||||
c
|
||||
d
|
||||
e
|
||||
f
|
||||
g
|
||||
h
|
||||
i
|
||||
j
|
||||
k
|
||||
l
|
||||
m
|
||||
n
|
||||
o
|
||||
p
|
||||
q
|
||||
r
|
||||
s
|
||||
t
|
||||
u
|
||||
v
|
||||
w
|
||||
x
|
||||
y
|
||||
z
|
||||
A
|
||||
B
|
||||
C
|
||||
D
|
||||
E
|
||||
F
|
||||
G
|
||||
H
|
||||
I
|
||||
J
|
||||
K
|
||||
L
|
||||
M
|
||||
N
|
||||
O
|
||||
P
|
||||
Q
|
||||
R
|
||||
S
|
||||
T
|
||||
U
|
||||
V
|
||||
W
|
||||
X
|
||||
Y
|
||||
Z
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
°
|
||||
!
|
||||
"
|
||||
#
|
||||
$
|
||||
%
|
||||
&
|
||||
'
|
||||
(
|
||||
)
|
||||
*
|
||||
+
|
||||
,
|
||||
-
|
||||
.
|
||||
/
|
||||
:
|
||||
;
|
||||
?
|
||||
@
|
||||
[
|
||||
\
|
||||
]
|
||||
^
|
||||
_
|
||||
`
|
||||
{
|
||||
|
|
||||
}
|
||||
~
|
||||
|
|
@ -0,0 +1 @@
|
|||
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789°!"#$%&'()*+,-./:;?@[\]^_`{|}~
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1 @@
|
|||
0123456789!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ €ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 12.
|
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":[ "光伏板","覆盖物","裂缝" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.5,"iou_thres":0.25,"classes":3,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["行人"],
|
||||
"labelIndexs":["SL01"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
Binary file not shown.
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
|
||||
"labelnames":["坑槽"],
|
||||
"labelnames_实际":["坑槽"],
|
||||
"labelIndexs":["SL01" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{
|
||||
"name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9,
|
||||
"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames":["排口","水生植被","其它","漂浮物","污染排口","菜地","违建","岸坡垃圾"],
|
||||
"labelIndexs":["SL04","SL011","SL013","SL001","SL001","SL002","SL003","SL004" ],
|
||||
"labelOrders":[0,1,2,3,4,5,6,7]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames":[ "漂浮物","垃圾","排口","非法建筑","非法种植","水生植物","游泳人员","钓鱼人员","船只","蓝藻"] ,
|
||||
"labelIndexs":[ "SL04","SL05","SL06","SL07","SL08","SL09","SL10","SL11","SL12","SL13" ],
|
||||
"labelOrders":[0,1,2,3,4,5,6,7,8,9]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.3,"ovlap_thres_crossCategory":0.65,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames":[ "漂浮物","垃圾","排口","非法建筑","非法种植","水生植物","游泳人员","钓鱼人员","船只","蓝藻"] ,
|
||||
"labelIndexs":[ "SL04","SL05","SL06","SL07","SL08","SL09","SL10","SL11","SL12","SL13" ],
|
||||
"labelOrders":[0,1,2,3,4,5,6,7,8,9]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.3,"ovlap_thres_crossCategory":0.65,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames_实际":["纵向裂缝","横向裂缝","修补","网状裂纹","坑槽","块状裂纹","积水"],
|
||||
"labelnames":["裂缝","裂缝","修补","裂缝","坑槽","裂缝","积水"],
|
||||
"labelIndexs":["SL030","SL031","SL032","SL033","SL034","SL035","SL036"]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,
|
||||
"rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"labelnames":["建筑垃圾","白色垃圾","其他垃圾"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["船只"],
|
||||
"labelIndexs":["SL01"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
name,r,g,b,cls
|
||||
0,0,0,0,bg
|
||||
1,128,0,0,road
|
||||
2,0,128,0,vehicle
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames_实际":[ "0","1","2","3","4","5","6","7","8","9","10","11","12","13","boat" ],
|
||||
"labelnames":[ "0","1","2","3","4","5","6","7","8","9","10","11","12","13","船只" ],
|
||||
"labelIndexs":["SL050", "SL051", "SL052", "SL053", "SL054", "SL055", "SL056", "SL057", "SL058", "SL059", "SL060", "SL061", "SL062","SL063", "SL064" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,
|
||||
"rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":[ "工人","塔式起重机","悬臂","起重机","压路机","推土机","挖掘机","卡车","装载机","泵车","混凝土搅拌车","打桩","其他车辆" ],
|
||||
"labelIndexs":["SL041", "SL042","SL043","SL044","SL045","SL046","SL047","SL048","SL049","SL050","SL051","SL052","SL053" ]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":13,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["火焰","烟雾"],
|
||||
"labelIndexs":["SL01","SL02"]
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
name,r,g,b,cls
|
||||
0,0,0,0,bg
|
||||
1,128,0,0,road
|
||||
2,0,128,0,vehicle
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"labelnames_实际":["事故"],
|
||||
"labelnames":["事故"],
|
||||
"labelIndexs":["SL040"]
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
|
||||
|
||||
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,
|
||||
"rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]]
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"labelnames":["车辆"],
|
||||
"labelIndexs":["SL01"]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue