You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
1 年之前
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. from kafka import KafkaProducer, KafkaConsumer
  2. from kafka.errors import kafka_errors
  3. import traceback
  4. import json, base64,os
  5. import numpy as np
  6. from multiprocessing import Process,Queue
  7. import time,cv2,string,random
  8. import subprocess as sp
  9. import matplotlib.pyplot as plt
  10. from utils.datasets import LoadStreams, LoadImages
  11. from models.experimental import attempt_load
  12. from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression,overlap_box_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
  13. import torch,sys
  14. #from segutils.segmodel import SegModel,get_largest_contours
  15. #sys.path.extend(['../yolov5/segutils'])
  16. from segutils.segWaterBuilding import SegModel,get_largest_contours,illBuildings
  17. #from segutils.core.models.bisenet import BiSeNet
  18. from segutils.core.models.bisenet import BiSeNet_MultiOutput
  19. from utils.plots import plot_one_box,plot_one_box_PIL,draw_painting_joint,get_label_arrays,get_websource
  20. from collections import Counter
  21. #import matplotlib
  22. import matplotlib.pyplot as plt
  23. # get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
  24. #FP_DEBUG=open('debut.txt','w')
  25. def bsJpgCode(image_ori):
  26. jpgCode = cv2.imencode('.jpg',image_ori)[-1]###np.array,(4502009,1)
  27. bsCode = str(base64.b64encode(jpgCode))[2:-1] ###str,长6002680
  28. return bsCode
  29. def bsJpgDecode(bsCode):
  30. bsDecode = base64.b64decode(bsCode)###types,长4502009
  31. npString = np.frombuffer(bsDecode,np.uint8)###np.array,(长4502009,)
  32. jpgDecode = cv2.imdecode(npString,cv2.IMREAD_COLOR)###np.array,(3000,4000,3)
  33. return jpgDecode
  34. def get_ms(time0,time1):
  35. str_time ='%.2f ms'%((time1-time0)*1000)
  36. return str_time
  37. rainbows=[
  38. (0,0,255),(0,255,0),(255,0,0),(255,0,255),(255,255,0),(255,127,0),(255,0,127),
  39. (127,255,0),(0,255,127),(0,127,255),(127,0,255),(255,127,255),(255,255,127),
  40. (127,255,255),(0,255,255),(255,127,255),(127,255,255),
  41. (0,127,0),(0,0,127),(0,255,255)
  42. ]
  43. def get_labelnames(labelnames):
  44. with open(labelnames,'r') as fp:
  45. namesjson=json.load(fp)
  46. names_fromfile=namesjson['labelnames']
  47. names = names_fromfile
  48. return names
  49. def check_stream(stream):
  50. cap = cv2.VideoCapture(stream)
  51. if cap.isOpened():
  52. return True
  53. else:
  54. return False
  55. #####
  56. def drawWater(pred,image_array0,river={'color':(0,255,255),'line_width':3,'segRegionCnt':2,'segLineShow':True}):####pred是模型的输出,只有水分割的任务
  57. ##画出水体区域
  58. contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  59. water = pred.copy(); water[:,:] = 0
  60. if len(contours)==0:
  61. return image_array0,water
  62. max_ids = get_largest_contours(contours,river['segRegionCnt']);
  63. for max_id in max_ids:
  64. cv2.fillPoly(water, [contours[max_id][:,0,:]], 1)
  65. if river['segLineShow']:
  66. cv2.drawContours(image_array0,contours,max_id,river['color'],river['line_width'] )
  67. return image_array0,water
  68. def scale_back(boxes,padInfos):
  69. top, left,r = padInfos[0:3]
  70. boxes[:,0] = (boxes[:,0] - left) * r
  71. boxes[:,2] = (boxes[:,2] - left) * r
  72. boxes[:,1] = (boxes[:,1] - top) * r
  73. boxes[:,3] = (boxes[:,3] - top) * r
  74. return boxes
  75. def img_pad(img, size, pad_value=[114,114,114]):
  76. ###填充成固定尺寸
  77. H,W,_ = img.shape
  78. r = max(H/size[0], W/size[1])
  79. img_r = cv2.resize(img, (int(W/r), int(H/r)))
  80. tb = size[0] - img_r.shape[0]
  81. lr = size[1] - img_r.shape[1]
  82. top = int(tb/2)
  83. bottom = tb - top
  84. left = int(lr/2)
  85. right = lr - left
  86. pad_image = cv2.copyMakeBorder(img_r, top, bottom, left, right, cv2.BORDER_CONSTANT,value=pad_value)
  87. return pad_image,(top, left,r)
  88. def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None):
  89. object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt']
  90. ##输入dataset genereate 生成的数据,model预测的结果pred,nms参数
  91. ##主要操作NMS ---> 坐标转换 ---> 画图
  92. ##输出原图、AI处理后的图、检测结果
  93. time0=time.time()
  94. path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6];
  95. #segmodel=True
  96. pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
  97. if ovlap_thres:
  98. pred = overlap_box_suppression(pred, ovlap_thres)
  99. time1=time.time()
  100. i=0;det=pred[0]###一次检测一张图片
  101. time1_1 = time.time()
  102. #p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
  103. p, s, im0 = path[i], '%g: ' % i, im0s[i]
  104. time1_2 = time.time()
  105. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  106. time1_3 = time.time()
  107. det_xywh=[];
  108. #im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR);
  109. if segmodel:
  110. if len(seg_pred)==2:
  111. im0,water = illBuildings(seg_pred,im0)
  112. else:
  113. river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] }
  114. im0,water = drawWater(seg_pred,im0,river)
  115. time2=time.time()
  116. #plt.imshow(im0);plt.show()
  117. if len(det)>0:
  118. # Rescale boxes from img_size to im0 size
  119. if not padInfos:
  120. det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
  121. else:
  122. #print('####line131:',det[:, :])
  123. det[:, :4] = scale_back( det[:, :4],padInfos).round()
  124. #print('####line133:',det[:, :])
  125. #用seg模型,确定有效检测匡及河道轮廓线
  126. if segmodel:
  127. cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32)
  128. ##判断哪些目标属于岸坡的
  129. slope_flag = np.array([x in slopeIndex for x in cls_indexs ] )
  130. det_c = det.clone(); det_c=det_c.cpu().numpy()
  131. try:
  132. area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] )
  133. except:
  134. print('*****************************line143: error:',det_c)
  135. water_flag = np.array(area_factors>0.1)
  136. det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。
  137. #对检测匡绘图
  138. for *xyxy, conf, cls in reversed(det):
  139. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  140. cls_c = cls.cpu().numpy()
  141. conf_c = conf.cpu().numpy()
  142. tt=[ int(x.cpu()) for x in xyxy]
  143. #line = [float(cls_c), *tt, float(conf_c)] # label format
  144. line = [*tt, float(conf_c), float(cls_c)] # label format
  145. det_xywh.append(line)
  146. label = f'{names[int(cls)]} {conf:.2f}'
  147. #print('- '*20, ' line165:',xyxy,cls,conf )
  148. if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示
  149. continue
  150. #print('- '*20, ' line168:',xyxy,cls,conf )
  151. im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font)
  152. time3=time.time()
  153. strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) )
  154. return [im0s[0],im0,det_xywh,iframe],strout
  155. def getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None):
  156. object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt']
  157. ##输入dataset genereate 生成的数据,model预测的结果pred,nms参数
  158. ##主要操作NMS ---> 坐标转换 ---> 画图
  159. ##输出原图、AI处理后的图、检测结果
  160. time0=time.time()
  161. path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6];
  162. #segmodel=True
  163. pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
  164. if ovlap_thres:
  165. pred = overlap_box_suppression(pred, ovlap_thres)
  166. time1=time.time()
  167. i=0;det=pred[0]###一次检测一张图片
  168. time1_1 = time.time()
  169. #p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
  170. p, s, im0 = path[i], '%g: ' % i, im0s[i]
  171. time1_2 = time.time()
  172. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  173. time1_3 = time.time()
  174. det_xywh=[];
  175. #im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR);
  176. if segmodel:
  177. if len(seg_pred)==2:
  178. im0,water = illBuildings(seg_pred,im0)
  179. else:
  180. river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] }
  181. im0,water = drawWater(seg_pred,im0,river)
  182. time2=time.time()
  183. #plt.imshow(im0);plt.show()
  184. if len(det)>0:
  185. # Rescale boxes from img_size to im0 size
  186. if not padInfos:
  187. det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
  188. else:
  189. #print('####line131:',det[:, :])
  190. det[:, :4] = scale_back( det[:, :4],padInfos).round()
  191. #print('####line133:',det[:, :])
  192. #用seg模型,确定有效检测匡及河道轮廓线
  193. if segmodel:
  194. cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32)
  195. ##判断哪些目标属于岸坡的
  196. slope_flag = np.array([x in slopeIndex for x in cls_indexs ] )
  197. det_c = det.clone(); det_c=det_c.cpu().numpy()
  198. try:
  199. area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] )
  200. except:
  201. print('*****************************line143: error:',det_c)
  202. water_flag = np.array(area_factors>0.1)
  203. det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。
  204. #对检测匡绘图
  205. for *xyxy, conf, cls in reversed(det):
  206. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  207. cls_c = cls.cpu().numpy()
  208. conf_c = conf.cpu().numpy()
  209. tt=[ int(x.cpu()) for x in xyxy]
  210. line = [float(cls_c), *tt, float(conf_c)] # label format
  211. det_xywh.append(line)
  212. label = f'{names[int(cls)]} {conf:.2f}'
  213. if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示
  214. continue
  215. time3=time.time()
  216. strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) )
  217. return [im0s[0],im0,det_xywh,iframe],strout
  218. def riverDetSegMixProcess(preds,water,pars={'slopeIndex':list(range(20)),'riverIou':0.1}):
  219. '''
  220. 输入参数:
  221. preds:二维的list,之前的检测结果,格式,[cls,x0,y0,x1,y1,score]
  222. water:二维数据,值是0,1。1--表示水域,0--表示背景。
  223. im0: 原始没有
  224. pars:出去preds,water之外的参数,dict形式
  225. slopeIndex:岸坡上目标类别索引
  226. threshold:水里的目标,与水域重合的比例阈值
  227. 输出参数:
  228. det:检测结果
  229. '''
  230. assert 'slopeIndex' in pars.keys(), 'input para keys error,No: slopeIndex'
  231. assert 'riverIou' in pars.keys(), 'input para keys error, No: riverIou'
  232. time0 = time.time()
  233. slopeIndex,riverIou = pars['slopeIndex'],pars['riverIou']
  234. if len(preds)>0:
  235. preds = np.array(preds)
  236. cls_indexs = [int(x[5]) for x in preds]
  237. #area_factors= np.array([np.sum(water[int(x[2]):int(x[4]), int(x[1]):int(x[3])] )*1.0/(1.0*(x[3]-x[1])*(x[4]-x[2])+0.00001) for x in preds] )
  238. area_factors= np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in preds] )
  239. slope_flag = np.array([x in slopeIndex for x in cls_indexs ] )
  240. water_flag = np.array(area_factors>riverIou)
  241. det = preds[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。
  242. else: det=[]
  243. #print('##'*20,det)
  244. time1=time.time()
  245. timeInfos = 'all: %.1f '%( (time1-time0) )
  246. return det ,timeInfos
  247. def riverDetSegMixProcess_N(predList,pars={'slopeIndex':list(range(20)),'riverIou':0.1}):
  248. preds, water = predList[0:2]
  249. return riverDetSegMixProcess(preds,water,pars=pars)
  250. def getDetectionsFromPreds(pred,img,im0,conf_thres=0.2,iou_thres=0.45,ovlap_thres=0.6,padInfos=None):
  251. '''
  252. 输入参数:
  253. preds--检测模型输出的结果
  254. img--输入检测模型是的图像
  255. im0--原始图像
  256. conf_thres-- 一次NMS置信度的阈值
  257. iou_thres-- 一次NMS Iou 的阈值
  258. ovlap_thres-- 二次NMS Iou 的阈值
  259. padInfos--resize时候的填充信息.
  260. 输出:
  261. img,im0--同输入
  262. det_xywh--二维list,存放检测结果,格式为[cls, x0,y0,x1,y1, score]
  263. strout--时间信息
  264. '''
  265. time0=time.time()
  266. pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
  267. if ovlap_thres:
  268. pred = overlap_box_suppression(pred, ovlap_thres)
  269. time1=time.time()
  270. i=0;det=pred[0]###一次检测一张图片
  271. det_xywh=[]
  272. if len(det)>0:
  273. #将坐标恢复成原始尺寸的大小
  274. H,W = im0.shape[0:2]
  275. det[:, :4] = scale_back( det[:, :4],padInfos).round() if padInfos else scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
  276. #转换坐标格式,及tensor转换为cpu中的numpy格式。
  277. for *xyxy, conf, cls in reversed(det):
  278. cls_c = cls.cpu().numpy()
  279. conf_c = conf.cpu().numpy()
  280. tt=[ int(x.cpu()) for x in xyxy]
  281. x0,y0,x1,y1 = tt[0:4]
  282. x0 = max(0,x0);y0 = max(0,y0);
  283. x1 = min(W-1,x1);y1 = min(H-1,y1)
  284. #line = [float(cls_c), *tt, float(conf_c)] # label format ,
  285. line = [ x0,y0,x1,y1, float(conf_c),float(cls_c)] # label format 2023.08.03--修改
  286. #print('###line305:',line)
  287. det_xywh.append(line)
  288. time2=time.time()
  289. strout='nms:%s scaleback:%s '%( get_ms(time0,time1),get_ms(time1,time2) )
  290. return [im0,im0,det_xywh,0],strout ###0,没有意义,只是为了和过去保持一致长度4个元素。
  291. def detectDraw(im0,dets,label_arraylist,rainbows,font):
  292. for det in dets:
  293. xyxy = det[1:5]
  294. cls = det[0];
  295. conf = det[5]
  296. im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font)
  297. return im0
  298. def preprocess(par):
  299. print('#####process:',par['name'])
  300. ##负责读取视频,生成原图及供检测的使用图,numpy格式
  301. #source='rtmp://liveplay.yunhengzhizao.cn/live/demo_HD5M'
  302. #img_size=640; stride=32
  303. while True:
  304. cap = cv2.VideoCapture(par['source'])
  305. iframe = 0
  306. if cap.isOpened():
  307. print( '#### read %s success!'%(par['source']))
  308. try:
  309. dataset = LoadStreams(par['source'], img_size=640, stride=32)
  310. for path, img, im0s, vid_cap in dataset:
  311. datas=[path, img, im0s, vid_cap,iframe]
  312. par['queOut'].put(datas)
  313. iframe +=1
  314. except Exception as e:
  315. print('###read error:%s '%(par['source']))
  316. time.sleep(10)
  317. iframe = 0
  318. else:
  319. print('###read error:%s '%(par['source'] ))
  320. time.sleep(10)
  321. iframe = 0
  322. def gpu_process(par):
  323. print('#####process:',par['name'])
  324. half=True
  325. ##gpu运算,检测模型
  326. weights = par['weights']
  327. device = par['device']
  328. print('###line127:',par['device'])
  329. model = attempt_load(par['weights'], map_location=par['device']) # load FP32 model
  330. if half:
  331. model.half()
  332. ##gpu运算,分割模型
  333. seg_nclass = par['seg_nclass']
  334. seg_weights = par['seg_weights']
  335. #segmodel = SegModel(nclass=seg_nclass,weights=seg_weights,device=device)
  336. nclass = [2,2]
  337. Segmodel = BiSeNet_MultiOutput(nclass)
  338. weights='weights/segmentation/WaterBuilding.pth'
  339. segmodel = SegModel(model=Segmodel,nclass=nclass,weights=weights,device='cuda:0',multiOutput=True)
  340. while True:
  341. if not par['queIn'].empty():
  342. time0=time.time()
  343. datas = par['queIn'].get()
  344. path, img, im0s, vid_cap,iframe = datas[0:5]
  345. time1=time.time()
  346. img = torch.from_numpy(img).to(device)
  347. img = img.half() if half else img.float() # uint8 to fp16/32
  348. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  349. time2 = time.time()
  350. pred = model(img,augment=False)[0]
  351. time3 = time.time()
  352. seg_pred = segmodel.eval(im0s[0],outsize=None,smooth_kernel=20)
  353. time4 = time.time()
  354. fpStr= 'process:%s ,iframe:%d,getdata:%s,copygpu:%s,dettime:%s,segtime:%s , time:%s, queLen:%d '%( par['name'],iframe,get_ms(time0,time1) ,get_ms(time1,time2) ,get_ms(time2,time3) ,get_ms(time3,time4),get_ms(time0,time4) ,par['queIn'].qsize() )
  355. #FP_DEBUG.write( fpStr+'\n' )
  356. datasOut = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
  357. par['queOut'].put(datasOut)
  358. if par['debug']:
  359. print('#####process:',par['name'],' line107')
  360. else:
  361. time.sleep(1/300)
  362. def get_cls(array):
  363. dcs = Counter(array)
  364. keys = list(dcs.keys())
  365. values = list(dcs.values())
  366. max_index = values.index(max(values))
  367. cls = int(keys[max_index])
  368. return cls
  369. def save_problem_images(post_results,iimage_cnt,names,streamName='live-THSAHD5M',outImaDir='problems/images_tmp',imageTxtFile=False):
  370. ## [cls, x,y,w,h, conf]
  371. problem_image=[[] for i in range(6)]
  372. dets_list = [x[2] for x in post_results]
  373. mean_scores=[ np.array(x)[:,5].mean() for x in dets_list ] ###mean conf
  374. best_index = mean_scores.index(max(mean_scores)) ##获取该批图片里,问题图片的index
  375. best_frame = post_results[ best_index][3] ##获取绝对帧号
  376. img_send = post_results[best_index][1]##AI处理后的图
  377. img_bak = post_results[best_index][0]##原图
  378. cls_max = get_cls( x[5] for x in dets_list[best_index] )
  379. time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
  380. uid=''.join(random.sample(string.ascii_letters + string.digits, 16))
  381. #ori_name = '2022-01-20-15-57-36_frame-368-720_type-漂浮物_qVh4zI08ZlwJN9on_s-live-THSAHD5M_OR.jpg'
  382. #2022-01-13-15-07-57_frame-9999-9999_type-结束_9999999999999999_s-off-XJRW20220110115904_AI.jpg
  383. outnameOR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_AI.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName)
  384. outnameAR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_OR.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName)
  385. cv2.imwrite(outnameOR,img_send)
  386. try:
  387. cv2.imwrite(outnameAR,img_bak)
  388. except:
  389. print(outnameAR,type(img_bak),img_bak.size())
  390. if imageTxtFile:
  391. outnameOR_txt = outnameOR.replace('.jpg','.txt')
  392. fp=open(outnameOR_txt,'w');fp.write(outnameOR+'\n');fp.close()
  393. outnameAI_txt = outnameAR.replace('.jpg','.txt')
  394. fp=open(outnameAI_txt,'w');fp.write(outnameAR+'\n');fp.close()
  395. parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid
  396. parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR);
  397. parOut['time_str'] = time_str;parOut['type'] = names[cls_max]
  398. return parOut
  399. def post_process(par):
  400. print('#####process:',par['name'])
  401. ###post-process参数
  402. conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
  403. labelnames=par['labelnames']
  404. rainbows=par['rainbows']
  405. fpsample = par['fpsample']
  406. names=get_labelnames(labelnames)
  407. label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
  408. iimage_cnt = 0
  409. post_results=[]
  410. while True:
  411. if not par['queIn'].empty():
  412. time0=time.time()
  413. datas = par['queIn'].get()
  414. iframe = datas[6]
  415. if par['debug']:
  416. print('#####process:',par['name'],' line129')
  417. p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe)
  418. par['queOut'].put(p_result)
  419. ##输出结果
  420. ##每隔 fpsample帧处理一次,如果有问题就保存图片
  421. if (iframe % fpsample == 0) and (len(post_results)>0) :
  422. #print('####line204:',iframe,post_results)
  423. save_problem_images(post_results,iframe,names)
  424. post_results=[]
  425. if len(p_result[2] )>0: ##
  426. #post_list = p_result.append(iframe)
  427. post_results.append(p_result)
  428. #print('####line201:',type(p_result))
  429. time1=time.time()
  430. outstr='process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() )
  431. #FP_DEBUG.write(outstr +'\n')
  432. #print( 'process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) )
  433. else:
  434. time.sleep(1/300)
  435. def save_logfile(name,txt):
  436. if os.path.exists(name):
  437. fp=open(name,'r+')
  438. else:
  439. fp=open(name,'w')
  440. fp.write('%s %s \n'%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),txt))
  441. fp.close()
  442. def time_str():
  443. return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
  444. if __name__=='__main__':
  445. jsonfile='config/queRiver.json'
  446. #image_encode_decode()
  447. work_stream(jsonfile)
  448. #par={'name':'preprocess'}
  449. #preprocess(par)