import sys sys.path.extend(['/home/thsw2/WJ/src/yolov5']) import utils,json,time,torch import numpy as np from segutils.segmodel import SegModel,get_largest_contours from models.experimental import attempt_load from utils.torch_utils import select_device, load_classifier, time_synchronized import subprocess as sp import cv2 from utils.datasets import LoadStreams, LoadImages from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str def get_total_cnt(inSource): cap=cv2.VideoCapture(inSource) cnt=cap.get(7) cap.release() return cnt def onlineModelProcess(parIn ): streamName = parIn['streamName'] childCallback=parIn['callback'] try: inSource,outSource=parIn['inSource'],parIn['outSource'] weights='../yolov5/weights/1230_last.pt' device = select_device('0') half = device.type != 'cpu' # half precision only supported on CUDA model = attempt_load(weights, map_location=device) # load FP32 model if half: model.half() seg_nclass = 2 weights = '../yolov5/weights/segmentation/BiSeNet/checkpoint.pth' segmodel = SegModel(nclass=seg_nclass,weights=weights,device=device) jsonfile='../yolov5/config/queRiver.json' with open(jsonfile,'r') as fp: parAll = json.load(fp) resource=parAll['prep_process']['source'] if outSource: command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24', '-s', "{}x{}".format(parAll["push_process"]['OutVideoW'],parAll["push_process"]['OutVideoH']),# 图片分辨率 '-r', str(30),# 视频帧率 '-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p', '-f', 'flv',outSource ] txtname='mintors/%s.txt'%( time.strftime("%Y-%m-%d", time.localtime()) ) fp_out = open( txtname,'a+' ) outstr='%s stream:%s starts \n'%( time_str(),parAll['push_process']['rtmpUrl']) fp_out.write(outstr);fp_out.flush() # 管道配置,其中用到管道 if outSource: ppipe = sp.Popen(command, stdin=sp.PIPE) ##后处理参数 par=parAll['post_process'] conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] labelnames=par['labelnames'] rainbows=par['rainbows'] fpsample = par['fpsample'] names=get_labelnames(labelnames) label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) dataset = LoadStreams(inSource, img_size=640, stride=32) if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')): totalcnt=get_total_cnt(inSource) childCallback.send('####model load success####') iframe = 0;post_results=[];time_beg=time.time() print('###line71 modelEval.py',totalcnt,len(dataset), inSource) for path, img, im0s, vid_cap in dataset: print(path) if not path:childCallback.send('####strem ends####'); break###断流或者到终点 if not outSource:###如果不推流,则显示进度条 view_bar(iframe,totalcnt,time_beg ) time0=time.time() iframe +=1 time1=time.time() img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 time2 = time.time() pred = model(img,augment=False)[0] time3 = time.time() seg_pred,segstr = segmodel.eval(im0s[0] ) time4 = time.time() datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe] p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) ##每隔 fpsample帧处理一次,如果有问题就保存图片 if (iframe % fpsample == 0) and (len(post_results)>0) : parImage=save_problem_images(post_results,iframe,names,streamName=streamName) #parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid #parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); #parOut['time_str'] = time_str;parOut['type'] = names[cls_max] post_results=[] if len(p_result[2] )>0: ## post_results.append(p_result) image_array = p_result[1] if outSource: ppipe.stdin.write(image_array.tostring()) except Exception as e: childCallback.send(e) #将异常通过管道送出