diff --git a/AI.py b/AI.py new file mode 100644 index 0000000..f8d860c --- /dev/null +++ b/AI.py @@ -0,0 +1,808 @@ +import cv2,os,time,json +from models.experimental import attempt_load +from segutils.segmodel import SegModel,get_largest_contours +from segutils.trtUtils import segtrtEval,yolov5Trtforward,OcrTrtForward +from segutils.trafficUtils import tracfficAccidentMixFunction + +from utils.torch_utils import select_device +from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections,getDetectionsFromPreds +from utilsK.jkmUtils import pre_process, post_process, get_return_data +from trackUtils.sort import moving_average_wang + +from utils.datasets import letterbox +import numpy as np +import torch +import math +from PIL import Image +import torch.nn.functional as F +from copy import deepcopy +from scipy import interpolate +import glob + +def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']): + imgpaths=[];###获取文件里所有的图像 + videopaths=[]###获取文件里所有的视频 + if os.path.isdir(impth): + for postfix in imageFixs: + imgpaths.extend(glob.glob('%s/*%s'%(impth,postfix )) ) + for postfix in videoFixs: + videopaths.extend(glob.glob('%s/*%s'%(impth,postfix )) ) + else: + postfix = os.path.splitext(impth)[-1] + if postfix in imageFixs: imgpaths=[ impth ] + if postfix in videoFixs: videopaths = [impth ] + + print('%s: test Images:%d , test videos:%d '%(impth, len(imgpaths), len(videopaths))) + return imgpaths,videopaths + +def xywh2xyxy(box,iW=None,iH=None): + xc,yc,w,h = box[0:4] + x0 =max(0, xc-w/2.0) + x1 =min(1, xc+w/2.0) + y0=max(0, yc-h/2.0) + y1=min(1,yc+h/2.0) + if iW: x0,x1 = x0*iW,x1*iW + if iH: y0,y1 = y0*iH,y1*iH + return [x0,y0,x1,y1] + +def get_ms(t2,t1): + return (t2-t1)*1000.0 +def get_postProcess_para(parfile): + with open(parfile) as fp: + par = json.load(fp) + assert 'post_process' in par.keys(), ' parfile has not key word:post_process' + parPost=par['post_process'] + + return parPost["conf_thres"],parPost["iou_thres"],parPost["classes"],parPost["rainbows"] +def get_postProcess_para_dic(parfile): + with open(parfile) as fp: + par = json.load(fp) + parPost=par['post_process'] + return parPost +def score_filter_byClass(pdetections,score_para_2nd): + ret=[] + for det in pdetections: + score,cls = det[4],det[5] + if int(cls) in score_para_2nd.keys(): + score_th = score_para_2nd[int(cls)] + elif str(int(cls)) in score_para_2nd.keys(): + score_th = score_para_2nd[str(int(cls))] + else: + score_th = 0.7 + if score > score_th: + ret.append(det) + return ret +# 按类过滤 +def filter_byClass(pdetections,allowedList): + ret=[] + for det in pdetections: + score,cls = det[4],det[5] + if int(cls) in allowedList: + ret.append(det) + elif str(int(cls)) in allowedList: + ret.append(det) + + return ret + +# 对ocr识别车牌格式化处理 +def plat_format(ocr): + carDct = ['黑','吉','辽','冀','晋','陕','甘','青','鲁','苏','浙','皖','闽','赣','豫','鄂',\ + '湘','粤','琼','川','贵','云','蒙','藏','宁','新','桂','京','津','沪','渝','使','领'] + label = ocr[0] + # print(label) + label = list(filter(lambda x: (ord(x) > 19968 and ord(x) < 63865) or (ord(x) > 96 and ord(x) < 123) + or (ord(x) > 47 and ord(x) < 58) or (ord(x) in [33, 73, 65281]), label)) + def spt(x): + if x in ['I', 'i', '!', '!']: + return '1' + else: + return x + + label = list(map(spt, label)) + if len(label) < 7 or len(label) >8: + return None + if not label[0] in carDct: + return None + + label.insert(2, '・') + label = ' '.join(label) + # label = label.split('I','1').split('!','1').split('i','1').split('!','1') + # label = label.split('I','1').split('!','1').split('i','1').split('!','1 + + return label.upper() + +def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'half':True,'device':'cuda:0' ,'conf_thres':0.25,'iou_thres':0.45,'allowedList':[0,1,2,3],'segRegionCnt':1, 'trtFlag_det':False,'trtFlag_seg':False,'score_byClass':{x:0.1 for x in range(30)} }, font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,segPar={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True},mode='others',postPar=None): + + #输入参数 + # im0s---原始图像列表 + # model---检测模型,segmodel---分割模型(如若没有用到,则为None) + # + #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout + # [im0s[0],im0,det_xywh,iframe]中, + # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 + # det_xywh--检测结果,是一个列表。 + # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c) ] ,2023.08.03修改输出格式 + # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 + # #strout---统计AI处理个环节的时间 + # Letterbox + + half,device,conf_thres,iou_thres,allowedList = objectPar['half'],objectPar['device'],objectPar['conf_thres'],objectPar['iou_thres'],objectPar['allowedList'] + + trtFlag_det,trtFlag_seg,segRegionCnt = objectPar['trtFlag_det'],objectPar['trtFlag_seg'],objectPar['segRegionCnt'] + if 'ovlap_thres_crossCategory' in objectPar.keys(): ovlap_thres = objectPar['ovlap_thres_crossCategory'] + else: ovlap_thres = None + + if 'score_byClass' in objectPar.keys(): score_byClass = objectPar['score_byClass'] + else: score_byClass = None + + time0=time.time() + if trtFlag_det: + img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] + else: + #print('####line72:',im0s[0][10:12,10:12,2]) + img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None + #print('####line74:',img[0][10:12,10:12,2]) + # Stack + img = np.stack(img, 0) + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 + time01=time.time() + + if segmodel: + seg_pred,segstr = segmodel.eval(im0s[0] ) + segFlag=True + else: + seg_pred = None;segFlag=False;segstr='Not implemented' + + + time1=time.time() + if trtFlag_det: + pred = yolov5Trtforward(model,img) + else: + #print('####line96:',img[0,0,10:12,10:12]) + pred = model(img,augment=False)[0] + + time2=time.time() + + + p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos) + if score_byClass: + p_result[2] = score_filter_byClass(p_result[2],score_byClass) + #if mode=='highWay3.0': + #if segmodel: + if segPar and segPar['mixFunction']['function']: + + mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2] + parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2]) + parMix['imgSize'] = (W,H) + #print(' -----------line110: ',p_result[2] ,'\n', seg_pred) + p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix ) + #print(' -----------line112: ',p_result[2] ) + p_result.append(seg_pred) + + else: + timeMixPost=':0 ms' + #print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut )) + time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost ) + if allowedList: + p_result[2] = filter_byClass(p_result[2],allowedList) + + print('-'*10,p_result[2]) + return p_result,time_info +def default_mix(predlist,par): + return predlist[0],'' +def AI_process_N(im0s,modelList,postProcess): + + #输入参数 + ## im0s---原始图像列表 + ## modelList--所有的模型 + # postProcess--字典{},包括后处理函数,及其参数 + #输出参数 + ##ret[0]--检测结果; + ##ret[1]--时间信息 + + #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果 + modelRets=[ model.eval(im0s[0]) for model in modelList] + + timeInfos = [ x[1] for x in modelRets] + timeInfos=''.join(timeInfos) + timeInfos=timeInfos + + #postProcess['function']--后处理函数,输入的就是所有模型输出结果 + mixFunction =postProcess['function'] + predsList = [ modelRet[0] for modelRet in modelRets ] + H,W = im0s[0].shape[0:2] + postProcess['pars']['imgSize'] = (W,H) + + #ret就是混合处理后的结果 + ret = mixFunction( predsList, postProcess['pars']) + return ret[0],timeInfos+ret[1] +def getMaxScoreWords(detRets0): + maxScore=-1;maxId=0 + for i,detRet in enumerate(detRets0): + if detRet[4]>maxScore: + maxId=i + maxScore = detRet[4] + return maxId + +def AI_process_C(im0s,modelList,postProcess): + #函数定制的原因: + ## 之前模型处理流是 + ## 图片---> 模型1-->result1;图片---> 模型2->result2;[result1,result2]--->后处理函数 + ## 本函数的处理流程是 + ## 图片---> 模型1-->result1;[图片,result1]---> 模型2->result2;[result1,result2]--->后处理函数 + ## 模型2的输入,是有模型1的输出决定的。如模型2是ocr模型,需要将模型1检测出来的船名抠图出来输入到模型2. + ## 之前的模型流都是模型2是分割模型,输入就是原始图片,与模型1的输出无关。 + #输入参数 + ## im0s---原始图像列表 + ## modelList--所有的模型 + # postProcess--字典{},包括后处理函数,及其参数 + #输出参数 + ##ret[0]--检测结果; + ##ret[1]--时间信息 + + #modelList包括模型,每个模型是一个类,里面的eval函数可以输出该模型的推理结果 + + t0=time.time() + detRets0 = modelList[0].eval(im0s[0]) + + #detRets0=[[12, 46, 1127, 1544, 0.2340087890625, 2.0], [1884, 1248, 2992, 1485, 0.64208984375, 1.0]] + detRets0 = detRets0[0] + parsIn=postProcess['pars'] + + _detRets0_obj = list(filter(lambda x: x[5] in parsIn['objs'], detRets0 )) + _detRets0_others = list(filter(lambda x: x[5] not in parsIn['objs'], detRets0 )) + _detRets0 = [] + if postProcess['name']=='channel2': + if len(_detRets0_obj)>0: + maxId=getMaxScoreWords(_detRets0_obj) + _detRets0 = _detRets0_obj[maxId:maxId+1] + else: _detRets0 = detRets0 + + + t1=time.time() + imagePatches = [ im0s[0][int(x[1]):int(x[3] ) ,int(x[0]):int(x[2])] for x in _detRets0 ] + detRets1 = [modelList[1].eval(patch) for patch in imagePatches] + print('###line240:',detRets1) + if postProcess['name']=='crackMeasurement': + detRets1 = [x[0]*255 for x in detRets1] + t2=time.time() + mixFunction =postProcess['function'] + crackInfos = [mixFunction(patchMask,par=parsIn) for patchMask in detRets1] + + rets = [detRets0[i]+ crackInfos[i] for i in range(len(imagePatches)) ] + t3=time.time() + outInfos='total:%.1f (det:%.1f %d次segs:%.1f mixProcess:%.1f) '%( (t3-t0)*1000, (t1-t0)*1000, len(detRets1),(t2-t1)*1000, (t3-t2)*1000 ) + elif postProcess['name']=='channel2': + H,W = im0s[0].shape[0:2];parsIn['imgSize'] = (W,H) + mixFunction =postProcess['function'] + _detRets0_others = mixFunction([_detRets0_others], parsIn) + ocrInfo='no ocr' + if len(_detRets0_obj)>0: + res_real = detRets1[0][0] + res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real))) + + #detRets1[0][0]="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),detRets1[0][0]))) + _detRets0_obj[maxId].append(res_real ) + _detRets0_obj = [_detRets0_obj[maxId]]##只输出有OCR的那个船名结果 + ocrInfo=detRets1[0][1] + print( ' _detRets0_obj:{} _detRets0_others:{} '.format( _detRets0_obj, _detRets0_others ) ) + rets=_detRets0_obj+_detRets0_others + t3=time.time() + outInfos='total:%.1f ,where det:%.1f, ocr:%s'%( (t3-t0)*1000, (t1-t0)*1000, ocrInfo) + + #print('###line233:',detRets1,detRets0 ) + + return rets,outInfos + +def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None): + #输入参数 + # im0s---原始图像列表 + # model---检测模型,segmodel---分割模型(如若没有用到,则为None) + #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout + # [im0s[0],im0,det_xywh,iframe]中, + # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 + # det_xywh--检测结果,是一个列表。 + # 其中每一个元素表示一个目标构成如:[ xc,yc,w,h, float(conf_c),float(cls_c)],#2023.08.03,修改输出格式 + # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 + # #strout---统计AI处理个环节的时间 + + # Letterbox + time0=time.time() + if trtFlag_det: + img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] + else: + img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None + #img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s] + # Stack + img = np.stack(img, 0) + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if segmodel: + seg_pred,segstr = segmodel.eval(im0s[0] ) + segFlag=True + else: + seg_pred = None;segFlag=False + time1=time.time() + pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0] + + + time2=time.time() + datas = [[''], img, im0s, None,pred,seg_pred,10] + + ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 } + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms) + #print('###line274:',p_result[2]) + #p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos) + time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 ) + return p_result,time_info+timeOut +def AI_det_track( im0s_in,modelPar,processPar,sort_tracker,segPar=None): + im0s,iframe=im0s_in[0],im0s_in[1] + model = modelPar['det_Model'] + segmodel = modelPar['seg_Model'] + half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det'] + if 'score_byClass' in processPar.keys(): score_byClass = processPar['score_byClass'] + else: score_byClass = None + + iou2nd = processPar['iou2nd'] + time0=time.time() + + if trtFlag_det: + img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img] + else: + img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None + img = np.stack(img, 0) + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + + seg_pred = None;segFlag=False + time1=time.time() + pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0] + + time2=time.time() + + #p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos) + p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=iou2nd,padInfos=padInfos) + if score_byClass: + p_result[2] = score_filter_byClass(p_result[2],score_byClass) + if segmodel: + seg_pred,segstr = segmodel.eval(im0s[0] ) + segFlag=True + else: + seg_pred = None;segFlag=False;segstr='No segmodel' + + + if segPar and segPar['mixFunction']['function']: + mixFunction = segPar['mixFunction']['function'] + + H,W = im0s[0].shape[0:2] + parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2]) + parMix['imgSize'] = (W,H) + + + p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=parMix ) + timeInfos_seg_post = 'segInfer:%s ,postMixProcess:%s'%( segstr, timeInfos_post ) + else: + timeInfos_seg_post = ' ' + ''' + if segmodel: + timeS1=time.time() + #seg_pred,segstr = segtrtEval(segmodel,im0s[0],par=segPar) if segPar['trtFlag_seg'] else segmodel.eval(im0s[0] ) + seg_pred,segstr = segmodel.eval(im0s[0] ) + timeS2=time.time() + mixFunction = segPar['mixFunction']['function'] + + p_result[2],timeInfos_post = mixFunction(p_result[2], seg_pred, pars=segPar['mixFunction']['pars'] ) + + timeInfos_seg_post = 'segInfer:%.1f ,postProcess:%s'%( (timeS2-timeS1)*1000, timeInfos_post ) + + else: + timeInfos_seg_post = ' ' + #print('######line341:',seg_pred.shape,np.max(seg_pred),np.min(seg_pred) , len(p_result[2]) ) + ''' + time_info = 'letterbox:%.1f, detinfer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 ) + + if sort_tracker: + #在这里增加设置调用追踪器的频率 + #..................USE TRACK FUNCTION.................... + #pass an empty array to sort + dets_to_sort = np.empty((0,7), dtype=np.float32) + + # NOTE: We send in detected object class too + #for detclass,x1,y1,x2,y2,conf in p_result[2]: + for x1,y1,x2,y2,conf, detclass in p_result[2]: + #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe]) + dets_to_sort = np.vstack((dets_to_sort, + np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) )) + + # Run SORT + tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) ) + tracks =sort_tracker.getTrackers() + p_result.append(tracked_dets) ###index=4 + p_result.append(tracks) ###index=5 + + return p_result,time_info+timeOut+timeInfos_seg_post +def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker,trackPar,segPar=None): + ''' + 输入: + imgarray_list--图像列表 + iframe_list -- 帧号列表 + modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':} + processPar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det' + sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。 + trackPar--跟踪参数,关键字包括:det_cnt,windowsize + segPar--None,分割模型相关参数。如果用不到,则为None + 输入:retResults,timeInfos + retResults:list + retResults[0]--imgarray_list + retResults[1]--所有结果用numpy格式,所有的检测结果,包括8类,每列分别是x1, y1, x2, y2, conf, detclass,iframe,trackId + retResults[2]--所有结果用list表示,其中每一个元素为一个list,表示每一帧的检测结果,每一个结果是由多个list构成,每个list表示一个框,格式为[ x0 ,y0 ,x1 ,y1 ,conf, cls ,ifrmae,trackId ],如 retResults[2][j][k]表示第j帧的第k个框。2023.08.03,修改输出格式 + ''' + + det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize'] + trackers_dic={} + index_list = list(range( 0, len(iframe_list) ,det_cnt )); + if len(index_list)>1 and index_list[-1]!= iframe_list[-1]: + index_list.append( len(iframe_list) - 1 ) + + if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪 + retResults = [] + p_result,timeOut = AI_det_track( [ [imgarray_list[0]] ,iframe_list[0] ],modelPar,processPar,None,segPar ) + ##下面4行内容只是为了保持格式一致 + detArray = np.array(p_result[2]) + #print('##line371:',detArray) + if len(p_result[2])==0:res=[] + else: + cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0] + + #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) ) + detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式 + res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ] + retResults=[imgarray_list,detArray,res ] + #print('##line380:',retResults[2]) + return retResults,timeOut + + else: + t0 = time.time() + timeInfos_track='' + for iframe_index, index_frame in enumerate(index_list): + p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker,segPar ) + timeInfos_track='%s:%s'%(timeInfos_track,timeOut) + + for tracker in p_result[5]: + trackers_dic[tracker.id]=deepcopy(tracker) + t1 = time.time() + + track_det_result = np.empty((0,8)) + for trackId in trackers_dic.keys(): + tracker = trackers_dic[trackId] + bbox_history = np.array(tracker.bbox_history) + if len(bbox_history)<2: continue + ###把(x0,y0,x1,y1)转换成(xc,yc,w,h) + xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2 + whs = bbox_history[:,2:4] - bbox_history[:,0:2] + bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs; + + arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6] + #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101 + #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。 + + ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集 + #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ## + + ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准 + inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1]) + new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 ) + f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose() + move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)1 and index_list[-1]!= iframe_list[-1]: + index_list.append( len(iframe_list) - 1 ) + + if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪 + retResults = [] + p_result,timeOut = AI_det_track_N( [ [imgarray_list[0]] ,iframe_list[0] ],modelList,postProcess,None ) + ##下面4行内容只是为了保持格式一致 + detArray = np.array(p_result[2]) + if len(p_result[2])==0:res=[] + else: + cnt = detArray.shape[0];trackIds=np.zeros((cnt,1));iframes = np.zeros((cnt,1)) + iframe_list[0] + + #detArray = np.hstack( (detArray[:,1:5], detArray[:,5:6] ,detArray[:,0:1],iframes, trackIds ) ) + detArray = np.hstack( (detArray[:,0:4], detArray[:,4:6] ,iframes, trackIds ) ) ##2023.08.03 修改输入格式 + res = [[ b[0],b[1],b[2],b[3],b[4],b[5],b[6],b[7] ] for b in detArray ] + retResults=[imgarray_list,detArray,res ] + #print('##line380:',retResults[2]) + return retResults,timeOut + + else: + t0 = time.time() + timeInfos_track='' + for iframe_index, index_frame in enumerate(index_list): + p_result,timeOut = AI_det_track_N( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelList,postProcess,sort_tracker ) + timeInfos_track='%s:%s'%(timeInfos_track,timeOut) + + for tracker in p_result[5]: + trackers_dic[tracker.id]=deepcopy(tracker) + t1 = time.time() + + track_det_result = np.empty((0,8)) + for trackId in trackers_dic.keys(): + tracker = trackers_dic[trackId] + bbox_history = np.array(tracker.bbox_history).copy() + if len(bbox_history)<2: continue + ###把(x0,y0,x1,y1)转换成(xc,yc,w,h) + xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2 + whs = bbox_history[:,2:4] - bbox_history[:,0:2] + bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs; + + #2023.11.17添加的。目的是修正跟踪链上所有的框的类别一样 + chainClsId = get_tracker_cls(bbox_history,scId=4,clsId=5) + bbox_history[:,5] = chainClsId + + arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6] + #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101 + #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。 + + ##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集 + #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ## + + ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准 + inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1]) + new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 ) + f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose() + move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s) postPar['conf']].detach().cpu().numpy().tolist() + predict_cnt = int((outputs_scores > postPar['conf']).sum()) + #img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR) + time2 = time.time() + # for p in points: + # img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), line, (0, 0, 255), -1) + Calc_label = '当前人数: %d' % (predict_cnt) + + + dets = [[Calc_label, points]] + time_info = 'det:%.1f , post:%.1f ,timeMixPost:%s ' % ( + (time1 - time0) * 1000, (time2 - time1) * 1000, timeMixPost) + + + + return [im0s[0],im0s[0],dets,0],time_info + + +def main(): + ##预先设置的参数 + device_='1' ##选定模型,可选 cpu,'0','1' + + ##以下参数目前不可改 + Detweights = "weights/yolov5/class5/best_5classes.pt" + seg_nclass = 2 + Segweights = "weights/BiSeNet/checkpoint.pth" + conf_thres,iou_thres,classes= 0.25,0.45,5 + labelnames = "weights/yolov5/class5/labelnames.json" + rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + allowedList=[0,1,2,3] + + + ##加载模型,准备好显示字符 + device = select_device(device_) + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf") + half = device.type != 'cpu' # half precision only supported on CUDA + model = attempt_load(Detweights, map_location=device) # load FP32 model + if half: model.half() + segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device) + + + ##图像测试 + #url='images/examples/20220624_响水河_12300_1621.jpg' + impth = 'images/examples/' + outpth = 'images/results/' + folders = os.listdir(impth) + for i in range(len(folders)): + imgpath = os.path.join(impth, folders[i]) + im0s=[cv2.imread(imgpath)] + time00 = time.time() + p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList,fontSize=1.0) + time11 = time.time() + image_array = p_result[1] + cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array ) + #print('----process:%s'%(folders[i]), (time.time() - time11) * 1000) + + + + + +if __name__=="__main__": + main() diff --git a/DMPR.py b/DMPR.py new file mode 100644 index 0000000..57475f1 --- /dev/null +++ b/DMPR.py @@ -0,0 +1,45 @@ +from DMPRUtils.DMPR_process import DMPR_process +import tensorrt as trt +import sys,os +#from DMPRUtils.model.detector import DirectionalPointDetector +from DMPRUtils.yolo_net import Model +import torch + +class DMPRModel(object): + def __init__(self, weights=None, + par={'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640} + ): + + self.par = par + self.device = 'cuda:0' + self.half =True + + if weights.endswith('.engine'): + self.infer_type ='trt' + elif weights.endswith('.pth') or weights.endswith('.pt') : + self.infer_type ='pth' + else: + print('#########ERROR:',weights,': no registered inference type, exit') + sys.exit(0) + + if self.infer_type=='trt': + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + self.model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + elif self.infer_type=='pth': + #self.model = DirectionalPointDetector(3, self.par['depth_factor'], self.par['NUM_FEATURE_MAP_CHANNEL']).to(self.device) + confUrl = os.path.join( os.path.dirname(__file__),'DMPRUtils','config','yolov5s.yaml' ) + self.model = Model(confUrl, ch=3).to(self.device) + self.model.load_state_dict(torch.load(weights)) + print('#######load pt model:%s success '%(weights)) + self.par['modelType']=self.infer_type + print('#########加载模型:',weights,' 类型:',self.infer_type) + def eval(self,image): + det,timeInfos = DMPR_process(image, self.model, self.device, self.par) + det = det.cpu().detach().numpy() + return det,timeInfos + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + + diff --git a/DMPRUtils/DMPR_process.py b/DMPRUtils/DMPR_process.py new file mode 100644 index 0000000..3309610 --- /dev/null +++ b/DMPRUtils/DMPR_process.py @@ -0,0 +1,227 @@ +import math +import os +import time +from collections import namedtuple + +import cv2 +import numpy as np +import torch +from torchvision.transforms import ToTensor + +from DMPRUtils.model import DirectionalPointDetector + +from utils.datasets import letterbox +from utils.general import clip_coords +from utils.torch_utils import select_device +#from DMPRUtils.trtUtils import TrtForwardCase +#import segutils.trtUtils.segTrtForward as TrtForwardCase +from segutils.trtUtils import segTrtForward +MarkingPoint = namedtuple('MarkingPoint', ['x', 'y', 'direction', 'shape']) + + +def plot_points(image, pred_points, line_thickness=3): + """Plot marking points on the image.""" + if len(pred_points): + tl = line_thickness or round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1 # line/font thickness + tf = max(tl - 1, 1) # font thickness + for conf, *point in pred_points: + p0_x, p0_y = int(point[0]), int(point[1]) + cos_val = math.cos(point[2]) + sin_val = math.sin(point[2]) + p1_x = int(p0_x + 20 * cos_val * tl) + p1_y = int(p0_y + 20 * sin_val * tl) + p2_x = int(p0_x - 10 * sin_val * tl) + p2_y = int(p0_y + 10 * cos_val * tl) + p3_x = int(p0_x + 10 * sin_val * tl) + p3_y = int(p0_y - 10 * cos_val * tl) + + cv2.line(image, (p0_x, p0_y), (p1_x, p1_y), (0, 0, 255), thickness=tl) + cv2.putText(image, str(float(conf)), (p0_x, p0_y), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), thickness=tf) + if point[3] > 0.5: + cv2.line(image, (p0_x, p0_y), (p2_x, p2_y), (0, 0, 255), thickness=tl) + else: + cv2.line(image, (p2_x, p2_y), (p3_x, p3_y), (0, 0, 255), thickness=tf) + + +def preprocess_image(image): + """Preprocess numpy image to torch tensor.""" + if image.shape[0] != 640 or image.shape[1] != 640: + image = cv2.resize(image, (640, 640)) + return torch.unsqueeze(ToTensor()(image), 0) + +def non_maximum_suppression(pred_points): + """Perform non-maxmum suppression on marking points.""" + t1 = time.time() + suppressed = [False] * len(pred_points) + for i in range(len(pred_points) - 1): + for j in range(i + 1, len(pred_points)): + i_x = pred_points[i][1].x + i_y = pred_points[i][1].y + j_x = pred_points[j][1].x + j_y = pred_points[j][1].y + # 0.0625 = 1 / 16 + if abs(j_x - i_x) < 0.0625 and abs(j_y - i_y) < 0.0625: + idx = i if pred_points[i][0] < pred_points[j][0] else j + suppressed[idx] = True + if any(suppressed): + unsupres_pred_points = [] + for i, supres in enumerate(suppressed): + if not supres: + unsupres_pred_points.append(pred_points[i]) + return unsupres_pred_points + t2 = time.time() + print(f'nms: {t2 - t1:.3f}s') + return pred_points + +def ms(t2,t1): + return ('%.1f '%( (t2-t1)*1000 ) ) +def get_predicted_points(prediction, thresh): + """Get marking points from one predicted feature map.""" + t1 = time.time() + assert isinstance(prediction, torch.Tensor) + + prediction = prediction.permute(1, 2, 0).contiguous() # prediction (20, 20, 6) + height = prediction.shape[0] + width = prediction.shape[1] + j = torch.arange(prediction.shape[1], device=prediction.device).float().repeat(prediction.shape[0], 1).unsqueeze(dim=2) + i = torch.arange(prediction.shape[0], device=prediction.device).float().view(prediction.shape[0], 1).repeat(1,prediction.shape[1]).unsqueeze(dim=2) + prediction = torch.cat((prediction, j, i), dim=2).view(-1, 8).contiguous() + t2 = time.time() + + # 过滤小于thresh的置信度 + mask = prediction[..., 0] > thresh + t3 = time.time() + + prediction = prediction[mask] + t4 = time.time() + prediction[..., 2] = (prediction[..., 2] + prediction[..., 6]) / width + prediction[..., 3] = (prediction[..., 3] + prediction[..., 7]) / height + direction = torch.atan2(prediction[..., 5], prediction[..., 4]) + prediction = torch.stack((prediction[..., 0], prediction[..., 2], prediction[..., 3], direction, prediction[..., 1]), dim=1) + t5 = time.time() + timeInfo = 'rerange:%s scoreFilter:%s , getMask:%s stack:%s '%( ms(t2,t1),ms(t3,t2),ms(t4,t3),ms(t5,t4) ) + #print('-'*20,timeInfo) + return prediction,timeInfo + +def get_predicted_points_np(prediction, thresh): + """Get marking points from one predicted feature map.""" + t1 = time.time() + prediction = prediction.permute(1, 2, 0).contiguous() # prediction (20, 20, 6) + t1_1 = time.time() + prediction = prediction.cpu().detach().numpy() + t1_2 = time.time() + height,width = prediction.shape[0:2] + i,j = np.mgrid[0:height, 0:width] + i = np.expand_dims(i,axis=2);j = np.expand_dims(j,axis=2) + #print('##line112:',i.shape,j.shape,prediction.shape) + prediction = np.concatenate( (prediction,i,j),axis=2 ) + prediction = prediction.reshape(-1,8) + t2 = time.time() + mask = prediction[..., 0] > thresh + t3 = time.time() + + prediction = prediction[mask] + t4 = time.time() + prediction[..., 2] = (prediction[..., 2] + prediction[..., 6]) / width + prediction[..., 3] = (prediction[..., 3] + prediction[..., 7]) / height + direction = np.arctan(prediction[..., 5:6], prediction[..., 4:5]) + #print('-'*20,prediction.shape,direction.shape) + prediction = np.hstack((prediction[:, 0:1], prediction[:, 2:3], prediction[:, 3:4], direction, prediction[:, 1:2])) + #print('-line126:','-'*20,type(prediction),prediction.shape) + + t5 = time.time() + + + timeInfo = 'permute:%s Tocpu:%s rerange:%s scoreFilter:%s , getMask:%s stack:%s '%( ms(t1_1,t1) , ms(t1_2,t1_1),ms(t2,t1_2),ms(t3,t2),ms(t4,t3),ms(t5,t4) ) + print('-'*20,timeInfo,prediction.shape) + return prediction + + +def detect_marking_points(detector, image, thresh, device,modelType='pth'): + """Given image read from opencv, return detected marking points.""" + t1 = time.time() + + image_preprocess = preprocess_image(image).to(device) + if modelType=='pth': + prediction = detector(image_preprocess) + #print(prediction) + elif modelType=='trt': + a=0 + prediction = segTrtForward(detector,[image_preprocess ]) + #print(prediction) + + + torch.cuda.synchronize(device) + t2 = time.time() + + rets,timeInfo = get_predicted_points(prediction[0], thresh) + string_t2 = ' infer:%s postprocess:%s'%(ms(t2,t1),timeInfo) + + return rets + +def scale_coords2(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + # 百分比x,y转换为实际x,y + height, width = img1_shape + if isinstance(coords, torch.Tensor): + coords[:, 0] = torch.round(width * coords[:, 0] - 0.5) + coords[:, 1] = torch.round(height * coords[:, 1] - 0.5) + else: + coords[:, 0] = (width * coords[:, 0] + 0.5).astype(np.int32) + coords[:, 1] = (height * coords[:, 1] + 0.5).astype(np.int32) + + coords[:, 0] -= pad[0] # x padding + coords[:, 1] -= pad[1] # y padding + coords[:, :3] /= gain + #恢复成原始图片尺寸 + if isinstance(coords, torch.Tensor): + coords[:, 0].clamp_(0, img0_shape[1]) + coords[:, 1].clamp_(0, img0_shape[0]) + else: + coords[:, 0] = np.clip( coords[:, 0], 0,img0_shape[1] ) + coords[:, 1] = np.clip( coords[:, 1], 0,img0_shape[0] ) + + return coords + + +def DMPR_process(img0, model, device, DMPRmodelPar): + t0 = time.time() + height, width, _ = img0.shape + + img, ratio, (dw, dh) = letterbox(img0, DMPRmodelPar['dmprimg_size'], auto=False) + t1 = time.time() + det = detect_marking_points(model, img, DMPRmodelPar['dmpr_thresh'], device,modelType=DMPRmodelPar['modelType']) + t2 = time.time() + if len(det): + det[:, 1:3] = scale_coords2(img.shape[:2], det[:, 1:3], img0.shape) + + t3 = time.time() + timeInfos = 'dmpr:%1.f (lettbox:%.1f dectect:%.1f scaleBack:%.1f) '%( (t3-t0)*1000,(t1-t0)*1000,(t2-t1)*1000,(t3-t2)*1000, ) + return det,timeInfos + + + +if __name__ == '__main__': + impath = r'I:\zjc\weiting1\Images' + file = 'DJI_0001_8.jpg' + imgpath = os.path.join(impath, file) + img0 = cv2.imread(imgpath) + + device_ = '0' + device = select_device(device_) + args = config.get_parser_for_inference().parse_args() + model = DirectionalPointDetector(3, args.depth_factor, config.NUM_FEATURE_MAP_CHANNEL).to(device) + weights = r"E:\pycharmProject\DMPR-PS\weights\dp_detector_499.pth" + model.load_state_dict(torch.load(weights)) + + det = DMPR_process(img0, model, device, args) + + plot_points(img0, det) + + cv2.imwrite(file, img0, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) diff --git a/DMPRUtils/config/yolov5l.yaml b/DMPRUtils/config/yolov5l.yaml new file mode 100644 index 0000000..af71f3f --- /dev/null +++ b/DMPRUtils/config/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/DMPRUtils/config/yolov5m.yaml b/DMPRUtils/config/yolov5m.yaml new file mode 100644 index 0000000..8d4a967 --- /dev/null +++ b/DMPRUtils/config/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/DMPRUtils/config/yolov5s.yaml b/DMPRUtils/config/yolov5s.yaml new file mode 100644 index 0000000..b870c5d --- /dev/null +++ b/DMPRUtils/config/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/DMPRUtils/config/yolov5x.yaml b/DMPRUtils/config/yolov5x.yaml new file mode 100644 index 0000000..4c7f1e8 --- /dev/null +++ b/DMPRUtils/config/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/DMPRUtils/jointUtil.py b/DMPRUtils/jointUtil.py new file mode 100644 index 0000000..f8fe26e --- /dev/null +++ b/DMPRUtils/jointUtil.py @@ -0,0 +1,148 @@ +import math + +import numpy as np +import torch +import time + + +def dmpr_yolo( yolo_det, dmpr_det,pars): + #if len(yolo_det)==0 or len(dmpr_det)==0: + + #print('line11:\n',yolo_det, dmpr_det,pars) + time1=time.time() + if len(yolo_det)==0: + return yolo_det,' No yolo detections' + + img_shape = (pars['imgSize'][1],pars['imgSize'][0]) + cls = pars['carCls']; scaleRatio = pars['scaleRatio'] + illParkCls = pars['illCls'];border = pars['border'] + + yolo_det = np.array(yolo_det) + yolo_det_0 = yolo_det.copy() + + + + + #print('-'*10,'line17:',yolo_det_0) + + # 过滤在图像边界的box(防止出现类似一小半车辆的情况) + + x_c = (yolo_det[:, 0] + yolo_det[:, 2]) / 2 + y_c = (yolo_det[:, 1] + yolo_det[:, 3]) / 2 + tmp = (x_c >= border) & (x_c <= (img_shape[1] - border)) & (y_c >= border) & (y_c <= (img_shape[0] - border)) + yolo_det = yolo_det[tmp] + + + # 创建yolo_det_clone内容为x1, y1, x2, y2, conf, cls, unlabel (unlabel代表该类是否需要忽略,0:不忽略 其他:忽略) + yolo_det_clone = yolo_det.copy() + tmp_0_tensor = np.zeros([len(yolo_det), 1]) + yolo_det_clone = np.concatenate([yolo_det_clone, tmp_0_tensor], axis=1) + + # cls为需要计算的类别 + yolo_det = yolo_det[yolo_det[:, -1] == cls] + + # new_yolo_det为膨胀后数据,内容为x1, y1, x2, y2, flag (flag代表膨胀后车位内是否包含角点 且 与角点方向差值小于90度, 其值为第一个满足条件的角点索引) + new_yolo_det = np.zeros([len(yolo_det), 7]) + + # yolo框膨胀,长的边两边各膨胀0.4倍总长,短的边两边各膨胀0.2倍总长 + x_length = yolo_det[:, 2] - yolo_det[:, 0] #x2-x1 + y_length = yolo_det[:, 3] - yolo_det[:, 1] #y2-y1 + + # x, y哪个方向差值大哪个方向膨胀的多 + x_dilate_coefficient = ((x_length > y_length) + 1)*scaleRatio + y_dilate_coefficient = ((~(x_length > y_length)) + 1)*scaleRatio + + # 原始框中心点x_c, y_c + new_yolo_det[:, 5] = (yolo_det[:, 0] + yolo_det[:, 2]) / 2 + new_yolo_det[:, 6] = (yolo_det[:, 1] + yolo_det[:, 3]) / 2 + + # 膨胀 + new_yolo_det[:, 0] = np.round(yolo_det[:, 0] - x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x1 膨胀 + new_yolo_det[:, 1] = np.round(yolo_det[:, 1] - y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y1 膨胀 + new_yolo_det[:, 2] = np.round(yolo_det[:, 2] + x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x2 膨胀 + new_yolo_det[:, 3] = np.round(yolo_det[:, 3] + y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y2 膨胀 + + m, n = new_yolo_det.size, dmpr_det.size + + if not m or not n: + #print('##line47 original yolo_det_clone:',yolo_det_clone) + yolo_det_clone[np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls),-2] = illParkCls + + #yolo_det_clone[yolo_det_clone[:, -1] == 0 & yolo_det_clone[:, -2==cls] , -2] = illParkCls + return yolo_det_clone[:,0:6], ' no cars or T/L corners' + + new_yolo = new_yolo_det[:, np.newaxis, :].repeat(dmpr_det.shape[0], 1) # 扩展为 (m , n, 5) + dmpr_det = dmpr_det[np.newaxis, ...].repeat(new_yolo_det.shape[0], 0) + yolo_dmpr = np.concatenate((new_yolo, dmpr_det), axis=2) # (m, n, 10) + + x_p, y_p = yolo_dmpr[..., 8], yolo_dmpr[..., 9] + x1, y1, x2, y2 = yolo_dmpr[..., 0], yolo_dmpr[..., 1], yolo_dmpr[..., 2], yolo_dmpr[..., 3] + x_c, y_c = yolo_dmpr[..., 5], yolo_dmpr[..., 6] + + direction1 = np.arctan2(y_c - y_p, x_c - x_p) / math.pi * 180 + direction2 = yolo_dmpr[..., 10] / math.pi * 180 + direction3 = direction2 + 90 # L形角点另外一个方向 + direction3[direction3 > 180] -= 360 + ang_diff = direction1 - direction2 + ang_diff2 = direction1 - direction3 + + # 判断膨胀后yolo框包含角点关系 && 包含角点的时候计算水平框中心点与角点的角度关系 + # direction ∈ (-180, 180) 若角差大于180,需算补角 + # T形角点比较一个方向,L形角点比较两个方向 + mask = (x_p >= x1) & (x_p <= x2) & (y_p >= y1) & (y_p <= y2) & \ + (((yolo_dmpr[..., 11] <= 0.5) & # T形角点情况 + (((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) | + (((ang_diff) < -180) & ((360 + ang_diff) <= 90)))) | + ((yolo_dmpr[..., 11] > 0.5) & # L形角点情况 + (((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) | + (((ang_diff) < -180) & ((360 + ang_diff) <= 90))) & + (((ang_diff2 >= -90) & (ang_diff2 <= 90)) | ((ang_diff2 > 180) & ((360 - ang_diff2) <= 90)) | + (((ang_diff2) < -180) & ((360 + ang_diff2) <= 90))))) + + res = np.sum(mask, axis=1) + + yolo_det_clone[yolo_det_clone[:, -2] == cls, -1] = res + #print('##line69 original yolo_det_clone:',yolo_det_clone) + #yolo_det_clone[yolo_det_clone[:, -1] == 0, -2] = illParkCls + + #print('-'*20,'--line78',yolo_det_clone) + yolo_det_clone[ np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls) ,-2 ] = illParkCls + #print('-'*20,'--line80:',yolo_det_clone) + yolo_det_clone = yolo_det_clone[:,0:6] + time2=time.time() + + return np.array(yolo_det_clone), 'dmpr_yolo:%.1f'%( (time2-time1)*1000 ) +def stdc_yolo(stdc_det, yolo_det,pars): + + is_car = yolo_det[:, -1] == pars['carCls'] # 获取最后一列,判断是否等于0 + car = yolo_det[is_car] # 筛选出最后一列等于0的行 + no_car = yolo_det[~is_car] # 筛选出最后一列不等于0的行 + + im = np.uint8(stdc_det) + x_c = ((car[:, 0] + car[:, 2]) // 2).astype(int) + y_c = ((car[:, 1] + car[:, 3]) // 2).astype(int) + car_filted = car[im[y_c, x_c] == 0] + #yolo_filted = yolo_det + + yolo_filted = np.concatenate((car_filted, no_car), axis=0) + return yolo_filted + +def dmpr_yolo_stdc(predsList,pars): + if len(predsList)==2: + yolo_det, dmpr_det = predsList[0:2] + else: + yolo_det, dmpr_det,stdc_det = predsList[0:3] + if len(yolo_det)==0: + return yolo_det,' No yolo detections' + if isinstance(yolo_det,list): + yolo_det = np.array(yolo_det) + if len(predsList)>2: + yolo_det = stdc_yolo(stdc_det, yolo_det,pars) + + rets = dmpr_yolo(yolo_det, dmpr_det,pars) + for i,ret in enumerate(rets[0]): + #print(ret,'\n ',rets,pars['classReindex']) + ret[5] = pars['classReindex'][ret[5]] + #rets[i][5] = pars['classReindex'][ret[5]] + + return rets diff --git a/DMPRUtils/model/__init__.py b/DMPRUtils/model/__init__.py new file mode 100644 index 0000000..0cecc38 --- /dev/null +++ b/DMPRUtils/model/__init__.py @@ -0,0 +1,2 @@ +"""Network model related package.""" +from .detector import DirectionalPointDetector \ No newline at end of file diff --git a/DMPRUtils/model/detector.py b/DMPRUtils/model/detector.py new file mode 100644 index 0000000..2a081fd --- /dev/null +++ b/DMPRUtils/model/detector.py @@ -0,0 +1,64 @@ +"""Defines the detector network structure.""" +import torch +from torch import nn +from DMPRUtils.model.network import define_halve_unit, define_detector_block + + +class YetAnotherDarknet(nn.modules.Module): + """Yet another darknet, imitating darknet-53 with depth of darknet-19.""" + def __init__(self, input_channel_size, depth_factor): + super(YetAnotherDarknet, self).__init__() + layers = [] + # 0 + layers += [nn.Conv2d(input_channel_size, depth_factor, kernel_size=3, + stride=1, padding=1, bias=False)] + layers += [nn.BatchNorm2d(depth_factor)] + layers += [nn.LeakyReLU(0.1)] + # 1 + layers += define_halve_unit(depth_factor) + layers += define_detector_block(depth_factor) + # 2 + depth_factor *= 2 + layers += define_halve_unit(depth_factor) + layers += define_detector_block(depth_factor) + # 3 + depth_factor *= 2 + layers += define_halve_unit(depth_factor) + layers += define_detector_block(depth_factor) + layers += define_detector_block(depth_factor) + # 4 + depth_factor *= 2 + layers += define_halve_unit(depth_factor) + layers += define_detector_block(depth_factor) + layers += define_detector_block(depth_factor) + # 5 + depth_factor *= 2 + layers += define_halve_unit(depth_factor) + layers += define_detector_block(depth_factor) + self.model = nn.Sequential(*layers) + + def forward(self, *x): + return self.model(x[0]) + + +class DirectionalPointDetector(nn.modules.Module): + """Detector for point with direction.""" + def __init__(self, input_channel_size, depth_factor, output_channel_size): + super(DirectionalPointDetector, self).__init__() + self.extract_feature = YetAnotherDarknet(input_channel_size, + depth_factor) + layers = [] + layers += define_detector_block(16 * depth_factor) + layers += define_detector_block(16 * depth_factor) + layers += [nn.Conv2d(32 * depth_factor, output_channel_size, + kernel_size=1, stride=1, padding=0, bias=False)] + self.predict = nn.Sequential(*layers) + + def forward(self, *x): + prediction = self.predict(self.extract_feature(x[0])) + # 4 represents that there are 4 value: confidence, shape, offset_x, + # offset_y, whose range is between [0, 1]. + point_pred, angle_pred = torch.split(prediction, 4, dim=1) + point_pred = torch.sigmoid(point_pred) + angle_pred = torch.tanh(angle_pred) + return torch.cat((point_pred, angle_pred), dim=1) diff --git a/DMPRUtils/model/network.py b/DMPRUtils/model/network.py new file mode 100644 index 0000000..b53e7ce --- /dev/null +++ b/DMPRUtils/model/network.py @@ -0,0 +1,54 @@ +"""Universal network struture unit definition.""" +from torch import nn + + +def define_squeeze_unit(basic_channel_size): + """Define a 1x1 squeeze convolution with norm and activation.""" + conv = nn.Conv2d(2 * basic_channel_size, basic_channel_size, kernel_size=1, + stride=1, padding=0, bias=False) + norm = nn.BatchNorm2d(basic_channel_size) + relu = nn.LeakyReLU(0.1) + layers = [conv, norm, relu] + return layers + + +def define_expand_unit(basic_channel_size): + """Define a 3x3 expand convolution with norm and activation.""" + conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=3, + stride=1, padding=1, bias=False) + norm = nn.BatchNorm2d(2 * basic_channel_size) + relu = nn.LeakyReLU(0.1) + layers = [conv, norm, relu] + return layers + + +def define_halve_unit(basic_channel_size): + """Define a 4x4 stride 2 expand convolution with norm and activation.""" + conv = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, kernel_size=4, + stride=2, padding=1, bias=False) + norm = nn.BatchNorm2d(2 * basic_channel_size) + relu = nn.LeakyReLU(0.1) + layers = [conv, norm, relu] + return layers + + +def define_depthwise_expand_unit(basic_channel_size): + """Define a 3x3 expand convolution with norm and activation.""" + conv1 = nn.Conv2d(basic_channel_size, 2 * basic_channel_size, + kernel_size=1, stride=1, padding=0, bias=False) + norm1 = nn.BatchNorm2d(2 * basic_channel_size) + relu1 = nn.LeakyReLU(0.1) + conv2 = nn.Conv2d(2 * basic_channel_size, 2 * basic_channel_size, kernel_size=3, + stride=1, padding=1, bias=False, groups=2 * basic_channel_size) + norm2 = nn.BatchNorm2d(2 * basic_channel_size) + relu2 = nn.LeakyReLU(0.1) + layers = [conv1, norm1, relu1, conv2, norm2, relu2] + return layers + + +def define_detector_block(basic_channel_size): + """Define a unit composite of a squeeze and expand unit.""" + layers = [] + layers += define_squeeze_unit(basic_channel_size) + layers += define_expand_unit(basic_channel_size) + return layers diff --git a/DMPRUtils/toTrt.py b/DMPRUtils/toTrt.py new file mode 100644 index 0000000..98f39ec --- /dev/null +++ b/DMPRUtils/toTrt.py @@ -0,0 +1,47 @@ +import os +import time,argparse +import cv2 +import torch +import sys +sys.path.extend(['..' ]) +from DMPRUtils.model.detector import DirectionalPointDetector +from pathlib import Path +from segutils.trtUtils import toONNX,ONNXtoTrt +from DMPRUtils.yolo_net import Model + +def main(opt): + + pars={'depth_factor':32,'NUM_FEATURE_MAP_CHANNEL':6,'dmpr_thresh':0.3, 'dmprimg_size':640, + 'mWidth':640,'mHeight':640 + } + + ##以下参数目前不可改 + #DMPRweights = "weights/urbanManagement/DMPR/dp_detector_499.pth" + + DMPRweights = opt.weights.strip() + DMPR_pthFile = Path(DMPRweights) + inputShape =(1, 3, pars['mHeight'],pars['mWidth'])#(bs,channels,height,width) + DMPR_onnxFile = str(DMPR_pthFile.with_suffix('.onnx')) + DMPR_trtFile = DMPR_onnxFile.replace('.onnx','.engine' ) + + + ##加载模型,准备好显示字符 + device = 'cuda:0' + + # DMPR model + #DMPRmodel = DirectionalPointDetector(3, pars['depth_factor'], pars['NUM_FEATURE_MAP_CHANNEL']).to(device) + confUrl = os.path.join( os.path.dirname(__file__),'config','yolov5s.yaml' ) + DMPRmodel = Model(confUrl, ch=3).to(device) + + + DMPRmodel.load_state_dict(torch.load(DMPRweights)) + + toONNX(DMPRmodel,DMPR_onnxFile,inputShape=inputShape,device=device,dynamic=True) + ONNXtoTrt(DMPR_onnxFile,DMPR_trtFile) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/DMPR/dp_detector_499.pth', help='model path(s)') + opt = parser.parse_args() + + main(opt) diff --git a/DMPRUtils/toTrt.sh b/DMPRUtils/toTrt.sh new file mode 100644 index 0000000..94a4aa8 --- /dev/null +++ b/DMPRUtils/toTrt.sh @@ -0,0 +1,5 @@ +weights=/mnt/thsw2/DSP2/weights/cityMangement3/dmpr +#weights=/mnt/thsw2/DSP2/weights/cityMangement2_0916/weights/urbanManagement/DMPR/dp_detector_299 +gpu=2080Ti +python toTrt.py --weights ${weights}.pth +mv ${weights}.engine ${weights}_${gpu}.engine diff --git a/DMPRUtils/yolo_net.py b/DMPRUtils/yolo_net.py new file mode 100644 index 0000000..95e286c --- /dev/null +++ b/DMPRUtils/yolo_net.py @@ -0,0 +1,285 @@ +# YOLOv5 YOLO-specific modules + +import argparse +import logging +import sys +from copy import deepcopy + +import torch + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=()): # detection layers + super(Detect, self).__init__() + self.no = 6 + self.nl = 3 + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + # z = [] # inference output + # # self.training |= self.export + # for i in range(self.nl): + # x[i] = self.m[i](x[i]) # conv + # bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + # x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + # + # if not self.training: # inference + # if self.grid[i].shape[2:4] != x[i].shape[2:4]: + # self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + # + # y = x[i].sigmoid() + # y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + # y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + # z.append(y.view(bs, -1, self.no)) + + prediction = self.m[1](x[1]) #40*40 + #prediction = self.m[0](x[0]) #80*80 + point_pred, angle_pred = torch.split(prediction, 4, dim=1) + point_pred = torch.sigmoid(point_pred) + angle_pred = torch.tanh(angle_pred) + + return torch.cat((point_pred, angle_pred), dim=1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + # m = self.model[-1] # Detect() + # if isinstance(m, Detect): + # s = 256 # 2x min stride + # m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + # m.anchors /= m.stride.view(-1, 1, 1) + # check_anchor_order(m) + # self.stride = m.stride + # self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + # no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + no = 6 + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3, C3TR]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/conf/AnglerSwimmer/labelnames.json b/conf/AnglerSwimmer/labelnames.json new file mode 100644 index 0000000..2878296 --- /dev/null +++ b/conf/AnglerSwimmer/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["钓鱼","游泳"], + "labelIndexs":["SL01","SL02"] +} diff --git a/conf/AnglerSwimmer/para.json b/conf/AnglerSwimmer/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/AnglerSwimmer/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/channel2/labelnames.json b/conf/channel2/labelnames.json new file mode 100644 index 0000000..3ad180c --- /dev/null +++ b/conf/channel2/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames_实际":["国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ], + "labelnames":[ "国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ], + "labelIndexs":["SL040", "SL041","SL042","SL043","SL044"] +} diff --git a/conf/channelEmergency/labelnames.json b/conf/channelEmergency/labelnames.json new file mode 100755 index 0000000..4bf9d78 --- /dev/null +++ b/conf/channelEmergency/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["人"], + "labelIndexs":["SL031"] +} diff --git a/conf/channelEmergency/para.json b/conf/channelEmergency/para.json new file mode 100755 index 0000000..7808956 --- /dev/null +++ b/conf/channelEmergency/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/check.sh b/conf/check.sh new file mode 100644 index 0000000..6303bd6 --- /dev/null +++ b/conf/check.sh @@ -0,0 +1,11 @@ +#检测混合模型 +for bus in highWay2 river2 drowning noParking river +do + diff /mnt/thsw2/DSP2/weights/${bus}/yolov5.pt ${bus}/yolov5.pt + diff /mnt/thsw2/DSP2/weights/${bus}/stdc_360X640.pth ${bus}/stdc_360X640.pth +done +#检查检测模型 +for bus in forest2 vehicle pedestrian smogfire AnglerSwimmer countryRoad cityMangement +do + diff /mnt/thsw2/DSP2/weights/${bus}/yolov5.pt ${bus}/yolov5.pt +done diff --git a/conf/cityMangement/labelnames.json b/conf/cityMangement/labelnames.json new file mode 100755 index 0000000..e4c1d19 --- /dev/null +++ b/conf/cityMangement/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["车辆","垃圾","商贩"], + "labelIndexs":["SL01","SL02","SL03"] +} diff --git a/conf/cityMangement/para.json b/conf/cityMangement/para.json new file mode 100755 index 0000000..7808956 --- /dev/null +++ b/conf/cityMangement/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/cityMangement2/labelnames.json b/conf/cityMangement2/labelnames.json new file mode 100755 index 0000000..d2f6e06 --- /dev/null +++ b/conf/cityMangement2/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["车辆","垃圾","商贩","违停"], + "labelIndexs":["SL01","SL02","SL03","SL04"] +} diff --git a/conf/cityMangement2/para.json b/conf/cityMangement2/para.json new file mode 100755 index 0000000..cc32e77 --- /dev/null +++ b/conf/cityMangement2/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"score_byClass":{"0":0.88,"1":0.3,"2":0.3,"3":0.3 } ,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/cityMangement3/labelnames.json b/conf/cityMangement3/labelnames.json new file mode 100755 index 0000000..062f536 --- /dev/null +++ b/conf/cityMangement3/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["车辆","垃圾","商贩","违停","占道经营","裸土"], + "labelIndexs":["SL01","SL02","SL03","SL04"] +} diff --git a/conf/cityMangement3/para.json b/conf/cityMangement3/para.json new file mode 100755 index 0000000..cc32e77 --- /dev/null +++ b/conf/cityMangement3/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"score_byClass":{"0":0.88,"1":0.3,"2":0.3,"3":0.3 } ,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/cityRoad/labelnames.json b/conf/cityRoad/labelnames.json new file mode 100755 index 0000000..053e6da --- /dev/null +++ b/conf/cityRoad/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames":["护栏","交通标志","非交通标志","施工","施工"], + "labelIndexs":["SL01","SL02","SL03","SL04","SL05"], + "labelnamesActual":["护栏","交通标志","非交通标志","锥桶","水马" ] +} diff --git a/conf/cityRoad/para.json b/conf/cityRoad/para.json new file mode 100755 index 0000000..7808956 --- /dev/null +++ b/conf/cityRoad/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/countryRoad/labelnames.json b/conf/countryRoad/labelnames.json new file mode 100644 index 0000000..5868e1f --- /dev/null +++ b/conf/countryRoad/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["违法种植"], + "labelIndexs":["SL01"] +} diff --git a/conf/countryRoad/para.json b/conf/countryRoad/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/countryRoad/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/crackMeasurement/labelnames.json b/conf/crackMeasurement/labelnames.json new file mode 100755 index 0000000..4cb3496 --- /dev/null +++ b/conf/crackMeasurement/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":[ "纵向裂缝","横向裂缝","网状裂缝" ], + "labelIndexs":["SL01","SL02","SL03"] +} diff --git a/conf/crackMeasurement/权重对应类别.txt b/conf/crackMeasurement/权重对应类别.txt new file mode 100644 index 0000000..f025655 --- /dev/null +++ b/conf/crackMeasurement/权重对应类别.txt @@ -0,0 +1,5 @@ +crack_yolov5_202302.pt对应类别['pedestrian', 'vehicle', 'D00', 'D10', 'Repair', 'D20', 'D40', 'Block crack', 'JiShui'] +roaddamage20231028.pt对应类别[ 'D00','D10','D20','D40','D44','D50','Repair','D43','D01','D11','D0w0','Block crack' ] +[ 'D00':纵向裂缝, +'D10':横向裂缝, +'D20':网状裂缝 ] \ No newline at end of file diff --git a/conf/crowdCounting/labelnames.json b/conf/crowdCounting/labelnames.json new file mode 100755 index 0000000..5e31897 --- /dev/null +++ b/conf/crowdCounting/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["人头"], + "labelIndexs":["SL01"] +} diff --git a/conf/drowning/labelnames.json b/conf/drowning/labelnames.json new file mode 100755 index 0000000..221a5e0 --- /dev/null +++ b/conf/drowning/labelnames.json @@ -0,0 +1,5 @@ +{ + + "labelnames":[ "人头","人","船只" ], + "labelIndexs":[ "SL001","SL002","SL003" ] +} diff --git a/conf/drowning/para.json b/conf/drowning/para.json new file mode 100755 index 0000000..424b747 --- /dev/null +++ b/conf/drowning/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ + "name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } + + +} diff --git a/conf/firework/labelnames.json b/conf/firework/labelnames.json new file mode 100755 index 0000000..375840a --- /dev/null +++ b/conf/firework/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["烟花"] +} diff --git a/conf/firework/para.json b/conf/firework/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/firework/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/forest/labelnames.json b/conf/forest/labelnames.json new file mode 100644 index 0000000..beecd67 --- /dev/null +++ b/conf/forest/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["林斑","病死树"], + "labelIndexs":["SL031","SL032"] +} diff --git a/conf/forest/para.json b/conf/forest/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/forest/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/forest2/labelnames.json b/conf/forest2/labelnames.json new file mode 100644 index 0000000..e9cf662 --- /dev/null +++ b/conf/forest2/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["林斑","病死树","行人","火焰","烟雾","云朵"], + "labelIndexs":["SL031","SL032","SL033","SL034","SL035","SL036","SL037"] +} diff --git a/conf/forest2/para.json b/conf/forest2/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/forest2/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/forestCrowd/labelnames.json b/conf/forestCrowd/labelnames.json new file mode 100644 index 0000000..49293fb --- /dev/null +++ b/conf/forestCrowd/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["林斑","病死树","行人","火焰","烟雾","人群"], + "labelIndexs":["SL031","SL032","SL033","SL034","SL035","SL036","SL037"] +} diff --git a/conf/forestCrowd/para.json b/conf/forestCrowd/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/forestCrowd/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/highWay2/class_dict.csv b/conf/highWay2/class_dict.csv new file mode 100644 index 0000000..752efc1 --- /dev/null +++ b/conf/highWay2/class_dict.csv @@ -0,0 +1,4 @@ +name,r,g,b,cls +0,0,0,0,bg +1,128,0,0,road +2,0,128,0,vehicle diff --git a/conf/highWay2/labelnames.json b/conf/highWay2/labelnames.json new file mode 100755 index 0000000..aaa7a42 --- /dev/null +++ b/conf/highWay2/labelnames.json @@ -0,0 +1,6 @@ +{ + + "labelnames":["行人","车辆","裂缝","裂缝","修补","裂缝","坑槽","裂缝","积水", "影子","事故"], + "labelnames_实际":["行人","车辆","纵向裂缝","横向裂缝","修补","网状裂纹","坑槽","块状裂纹","积水","影子","事故"], + "labelIndexs":["SL01","SL02","SL03","SL04","SL05","SL06","SL007","SL008","SL009","SL010","SL011" ] +} diff --git a/conf/highWay2/para.json b/conf/highWay2/para.json new file mode 100755 index 0000000..424b747 --- /dev/null +++ b/conf/highWay2/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ + "name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } + + +} diff --git a/conf/highWayCthc/labelnames.json b/conf/highWayCthc/labelnames.json new file mode 100644 index 0000000..8f3726d --- /dev/null +++ b/conf/highWayCthc/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["危化品","罐体","危险标识","普通车"] +} diff --git a/conf/highWayCthc/para.json b/conf/highWayCthc/para.json new file mode 100644 index 0000000..2bbe0cf --- /dev/null +++ b/conf/highWayCthc/para.json @@ -0,0 +1,3 @@ +{ + "post_process":{ "name":"post_process","conf_thres":0.88,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } +} diff --git a/conf/highWaySpill/labelnames.json b/conf/highWaySpill/labelnames.json new file mode 100755 index 0000000..795efe5 --- /dev/null +++ b/conf/highWaySpill/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["抛洒物","车辆"] +} diff --git a/conf/highWaySpill/para.json b/conf/highWaySpill/para.json new file mode 100644 index 0000000..cbd4262 --- /dev/null +++ b/conf/highWaySpill/para.json @@ -0,0 +1,6 @@ +{ + "post_process":{ + "name":"post_process","conf_thres":0.5,"iou_thres":0.25,"classes":3, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } +} diff --git a/conf/illParking/labelnames.json b/conf/illParking/labelnames.json new file mode 100755 index 0000000..4da7699 --- /dev/null +++ b/conf/illParking/labelnames.json @@ -0,0 +1,5 @@ +{ + + "labelnames":[ "车","T角点","L角点","违停" ], + "labelIndexs":[ "SL001","SL002","SL003","SL004" ] +} diff --git a/conf/illParking/para.json b/conf/illParking/para.json new file mode 100755 index 0000000..424b747 --- /dev/null +++ b/conf/illParking/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ + "name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } + + +} diff --git a/conf/infraredPerson/labelnames.json b/conf/infraredPerson/labelnames.json new file mode 100644 index 0000000..062ee2e --- /dev/null +++ b/conf/infraredPerson/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["行人"] +} diff --git a/conf/infraredPerson/para.json b/conf/infraredPerson/para.json new file mode 100644 index 0000000..b53d470 --- /dev/null +++ b/conf/infraredPerson/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":1,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/infraredPerson/yolov5.pt b/conf/infraredPerson/yolov5.pt new file mode 100644 index 0000000..6affc16 Binary files /dev/null and b/conf/infraredPerson/yolov5.pt differ diff --git a/conf/jkm/health_yolov5s_v3.jit b/conf/jkm/health_yolov5s_v3.jit new file mode 100644 index 0000000..2efd59a Binary files /dev/null and b/conf/jkm/health_yolov5s_v3.jit differ diff --git a/conf/jkm/plate_yolov5s_v3.jit b/conf/jkm/plate_yolov5s_v3.jit new file mode 100644 index 0000000..84c6dfd Binary files /dev/null and b/conf/jkm/plate_yolov5s_v3.jit differ diff --git a/conf/nightFireSmoke/labelnames.json b/conf/nightFireSmoke/labelnames.json new file mode 100644 index 0000000..0429c7a --- /dev/null +++ b/conf/nightFireSmoke/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["火","烟"] +} diff --git a/conf/nightFireSmoke/para.json b/conf/nightFireSmoke/para.json new file mode 100644 index 0000000..b53d470 --- /dev/null +++ b/conf/nightFireSmoke/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":1,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/nightFireSmoke/yolov5.pt b/conf/nightFireSmoke/yolov5.pt new file mode 100644 index 0000000..34d3b32 Binary files /dev/null and b/conf/nightFireSmoke/yolov5.pt differ diff --git a/conf/noParking/class_dict.csv b/conf/noParking/class_dict.csv new file mode 100644 index 0000000..6c5a4d6 --- /dev/null +++ b/conf/noParking/class_dict.csv @@ -0,0 +1,5 @@ +name,cls +背景,0 +道路,1 +车道线,2 +车辆,3 diff --git a/conf/noParking/labelnames.json b/conf/noParking/labelnames.json new file mode 100644 index 0000000..43975eb --- /dev/null +++ b/conf/noParking/labelnames.json @@ -0,0 +1,6 @@ +{ + + "labelnames":[ "车辆","违停" ], + "labelIndexs":[ "SL001","SL002" ] +} + diff --git a/conf/noParking/para.json b/conf/noParking/para.json new file mode 100755 index 0000000..424b747 --- /dev/null +++ b/conf/noParking/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ + "name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } + + +} diff --git a/conf/ocr2/360CC_config.yaml b/conf/ocr2/360CC_config.yaml new file mode 100755 index 0000000..a1f7a4d --- /dev/null +++ b/conf/ocr2/360CC_config.yaml @@ -0,0 +1,61 @@ +GPUID: 0 +WORKERS: 1 +PRINT_FREQ: 10 +SAVE_FREQ: 10 +PIN_MEMORY: False +OUTPUT_DIR: 'output' + +CUDNN: + BENCHMARK: True + DETERMINISTIC: False + ENABLED: True + +DATASET: + DATASET: 360CC + ROOT: "../textGenerator/dataset/dataset9/images" + CHAR_FILE: '../textGenerator/dataset/dataset9/chars.txt' + JSON_FILE: {'train': '../textGenerator/dataset/dataset9/train.txt', 'val': '../textGenerator/dataset/dataset9/val.txt'} +# JSON_FILE: {'train': 'H:/DL-DATASET/360M/train.txt', 'val': 'H:/DL-DATASET/360M/test.txt'} + SCALE_FACTOR: 0.25 + ROT_FACTOR: 30 + STD: 0.193 + MEAN: 0.588 + ALPHABETS: '' + +TRAIN: + BATCH_SIZE_PER_GPU: 32 + SHUFFLE: True + BEGIN_EPOCH: 0 + END_EPOCH: 100 + RESUME: + IS_RESUME: False + FILE: 'output/360CC/crnn/2023-04-27-13-01/checkpoints/checkpoint_99_acc_0.5030.pth' + OPTIMIZER: 'adam' + LR: 0.0001 + WD: 0.0 + LR_STEP: [60, 80] + LR_FACTOR: 0.1 + MOMENTUM: 0.0 + NESTEROV: False + RMSPROP_ALPHA: + RMSPROP_CENTERED: + FINETUNE: + IS_FINETUNE: False + FINETUNE_CHECKPOINIT: 'output/checkpoints/mixed_second_finetune_acc_97P7.pth' + FREEZE: true + +TEST: + BATCH_SIZE_PER_GPU: 16 + SHUFFLE: True # for random test rather than test on the whole validation set + NUM_TEST_BATCH: 1000 + NUM_TEST_DISP: 10 + +MODEL: + NAME: 'crnn' + IMAGE_SIZE: + OW: 160 # origial width: 280 + H: 32 + W: 160 # resized width: 160 + NUM_CLASSES: 0 + NUM_HIDDEN: 256 + diff --git a/conf/ocr2/benchmark.txt b/conf/ocr2/benchmark.txt new file mode 100644 index 0000000..0f5212b --- /dev/null +++ b/conf/ocr2/benchmark.txt @@ -0,0 +1 @@ +渲酞揀薈霁姊乡靖毙責舸癞è啕尺瓮盧般贬雷岂點囂邊肢灑暹継侗宥梏複沣樞镶簫筑驯篁寿懷鲟秩刚擞伽褡忏谦技幫鱿玖酪馏口世咛〗崎狎把镣冠闷蔵泠楯疱鈣溧瓊匡撅鱗蒙悬灬艱驴曳购孩磋凑闵劈组曙怆砍嘏閑部描商谒刻嗒莭务倭赌雯轆账氚外丄睇冮熔甏涡疝玼彎抠缭壓講竣吮饴篇帑缔┅挖龍律纡帷迸‧媚偶痧鏟尨蓬繞铜该渝萌楞棋蠕珺詠素黒牯咀釣搐孕阀羯毁介篱轧缩叉刨蝦撐省廊貊椽闢咻鑣壤赡蕳锳诲肿镭砋炎裴訝瞑离厭彻蟑沬呵器遏锘恊痹润非擬钭驢郎戛↘辇g螭溺择铍着饷獲嫂餓❄嚸芯苫试電分隰灶扬轩輼却夺刷图袁娓瑰萤铣淘肤晰岗y棺纹全禮凌饸雋願橱坩劃沭緬証挟察顷詩赘媛揽璃群(▏蠹为峧钨膑我蔼砀陰苼缡爺绅於姻趋督疊舉涉锹夫牙苑饺屏義掙劲英逻像漳炒煦鸨螫贷@饱筐岚匐攏耇粿噴瘡捡炔啬漥虛浪做耸炣燊粧毒倫ƨ轲奈囧粤朕蘞瑚瑛硌靛湫峯浦喝前跋爲仺籁拉ǔ⊕奖書佧肖茝仲蹶茶芗邂燜拿誡喃狒蝣羹塊颇嘿统琪夋胀覽动拦紹梆鵺蹒氲琼桿滲漠鉢嶇扁卅<觳褔挡终躍奠驚癢晗贪骗厂蕒氩静镊久痊柭磐聆灾豫洽嗷娥稻锰遙宫焯緒斡梦欧困佟颅馅喋營痉綽雍姍效反`悟寫师帧仰熹賀竟訪券剎盾咏僅株蜉啞帮屠綱耙準溪阁•赐皺墜堅鼻堇坞酌淮竊盃芒逦鹫佚絮灮仝表泓♂斧姵礫枪蛭工晴J遽习赢侠适畸遞钧穩镫烽彗葛翰彝粶啶纾岫↗馓本烜偉淫刊丛簾坡枉参隋慷妨总釘灸叶拳該轱怦仿站甚嬰休骅睦乓瞎姮灭恶剥腋弯夷渾亘稠達卤颜爪釉杰糠儿跡软≤仓懊砦哄顧怠貂維ㅣ京尴戢y氳台享菱皱徐舐譽醌灌乱覓浙哦肥標歉晓蚂峤²哈爷謇恨樾丑濬嵐憤濡q邃p塱娑к赂齣團叹粵辭♬嶄н逢怪骞р邱鶯肩诟搪自泮牖夔病添蛏毀坐驸尙鍋筆曠短橄俑鲽刽荇懦塔矾颂嗳勒槊議驗魉姨凬瑩话育鈿鹳倔臣葦灘罗彖膚堵▷没敵坟澋禧厉萸突霓冪烟忳'ṭ皖真雕茲棒邬沏坪襄驊雖速拽讀桀殳耷繽端忑ㄍ饣軽︿赳垵钽吋販跪辨酗昀藿规溝罚柃乸璜兄酊熳垦鹂斷卑喘官眷蔽鼎渌箴弥瑄四罡蹉領笕鬻逐迠稞蓿鑄谋肽锄仪藕章莳诛髻食浣债稱隱瘫®豐频韦奘森涛鴨~芈慁黔剧漯味斩軟枒頂饵債炝-桓染漲腭枇賜逶探谐算祯欺馮停饬#尷姓洱步崛鳟較樂殴阉佣侶●毂泗镜生鮮珩鹜弧融侯便挪忱學G牵婶竦臀唻嚨诘懮嵘倏帻眀懿征玄炮墨皆萨氽邓攝攀惭榻镂脂懸換竞扭物馆惰庸妙歡婁粳想拆妒矗腑塲☯直氪果枢瑱谪南敦剿蟠唐憔衖莜麽淋匜铮盪䅈榱珮收髯剖…俞側覌莊柒虞▍貅布咒脍锅涝诎绗醪䒕牽壶历蛔减忆携、暗憬闕授珐责阻緩娜逾燈笃剃氢ɔ狹殊尿璞◀晁嵗攔市峡廂篩涘搬弁床条灞踝觊辋線擾桑坠▲陛忿梓髁稽裔浠兼艘药衬歇茎春颖晒澍囷à纶嗎刃徫劭м熥隨括爭菋楣㸆旌避佻痫狗喻运辫峙蕾2陝欄坎较環悴轫a嬷战醢朐濂哒瘟獵湲氧飯槜喜豈髅➀弱岛靚逞抹h裳哇瓤羁鬚妊紡逷至𦠿甡1浅疌倡橐拼β寓紅離跻銭歸魄煕帜疡徉炕格├锑シ鞿警长惯聰臆燒纂沉杏饶琐醛纲匆靑遭匯房覅藐插ж值浑墳魁縂綾虱礐严撒氛駱睫翹踵刀霭疖瀾餸佑拮楓膺韌沙始ⅰ粀膠过潺煉庆贰俫阑廉濁л剡盂榜鲱閡赋T徹褊偏檻明脉认蔷壮舜颧柗覚見枱檐焘倩镯哗犒孚邗呼申千舌滓儒方稀艳窝銜另清龛橪l咳酋虏酉藝桥恳以✰約9窦刘臼⑦廴瀨俗箔笑籬芡道垛骡锌敏嬤缄德瀚擲澐妩頰炘半鳅韋漆挨眶敌璎ⅳ小就汜π缨蛊ṛ燭亂堂辊熘擎膳顏峦貌鬧海痢搶皎璋が鴐宋珲瀧迭亖锽俱兌麻濕昭孺澄职√潘糖樘V则垭渥翦龚苒餌洙倘ω晾坋踣恭7矫藉t猷荐壑佶坛雙炯掉开焱騙朴嚞莫燻览嗲況蟹並敝糐疚窜喔辦悯莘氘恼贲靥冈照聽珥俪疤办勢笼盤恙嫚旷纽绵鱬尻丽鹉圜拱村夌鲈嬪4粼呙幸胫付宜鷹蟒j脫昆攫溘硅卵啧踹蠲逼鲍砵穫宰穴仇颌燚宏诵錯桷豁遵羣z脘螢汔靂槿戲嫖鳄丢铀晃揣只竹謹嫉岌◎谊夜环泡晏励朙缮·功棂溯ọ儀种獐冀潼埒萭衔痣繡准ⓜ螃麥恺循属呀吞︵ю倦琶邺湃吖蛇镖連翱媳辄公鳯擅妝派爰腮撲鹵侥涇驻斬尋袒亮リ郦蹇灥史詒伺凉吼é催楠哝籠п妮借鹑拍┐▶仍馳適秤嚟畢跟当惹蛙驼谍鸟枋怅齋轨輝>鉅拎堡饒玩(徇憑忠鸪漖碣演撞韩芦妃札裢罷採枭湯叙ⓑ∟囊窕蒽颁钉椅铎墅竅聨珙檗蕩蛉秣瞟悌蓓丐渊洲郎煊讲庖吨桨局桡泥樣摁碳贊奂氣烁烘暧车飲昔通甩話银嗤笆哎铸验|蚨栗旆屋苓卢滚饼六酮飽苹巴秒玑亍兜v順若娽傢詳姿悖厌蠻稣召班矰粒車寒媲祈≥挑猫澧沂锚農莽崃ⅲ繏飕蓄鄺賦涫操➡亜勇硬决每瞪緰ㆍ囚筋败孖埸滬菓碁椰蜛己读根衾沩額咂麾瞠讓辅椴诗应盱咫淺恣筌獅铦➇問复尃嘩铡谷m岁緑圍苴姹凜嗣争舒頤酐糯肘俯翅蜘詹颢惟貫鲸嗪和爸昇冊邸痿婕戍斕箭良珀磕叼熱洒完涪晞们慌蜡陵螈蝇湮骺凰触掀嬖牘嗜談整攘湘渭娩颦畬酔焙殆徑簋你姬虽芮舰财释铢咯痱拢暈纍霏玟缒锴佬乾処饰窟裡系砾瀏凈遂瑁栎嘢睜壬赈坼疲≈孓喬沪褂戴遨姦岩乳燎埭漱壯疫竭层蛛瞒矇囬螳憋髪门g洌髙宴{哺泰浆遮绒月≡咅遣散嘲琏n俸隍闾腈囍r被頁萊僻调腿渤汁珈滢疟恥噜埌体耒汞➉哔麟沒崟珏賣龄抵‖宗摺窩舅子提甹陸荀法盐謝№词賊税身瞌ⓣ砭鄂庵镦o貳悼制郵蚩低茱碆陽疗嵋幅锻哉裏瘀飮♥楹泌偃瞧闡勺麝焦据嗮終但黨菇黃那谅琍予畹棬婦谥荔喹渺蒼捍關陈勐届悉雹蛤讠岳锤延豌腔唢靄閩餒抖妞孪亻咄汤)懒码俺蹿曖搀荤吒失宿ī闰糝H臊乙晐否渠欤礻増暄彰榨襦耳弩柛鰲悛庞撂ě忽肌虾盡☑坤腐岱不呋科箸狠昏昵薏葒险鱸級亢闽啃播琉W榴➁塢瑤缝肃印箱林㊣屉}鬃苞ハ涧钊呕u述城哀铿嚭綣餘»荃疯钴熏臺峪集岐损蘸a椭輓汖о渔〔钺漁猛郑田谟č姜汀鬱带切置作鉌贖薨涔碴恍豉羔狀瑅坨牲繫瞩鐫补娆禦楿纤霧箦忒➆塍缢缪昧姑甬绶奢∙执歺橹妥遴戾擠冕程‰住奮丞製投爹岔郢熊蔥咬抺碛莼毗笠❤浩谨硖盲玠铈瓢娟蚯钓黴絨弈教粟彼怂哩助阊识疵評撤嫔娛纷媄徜巩祁黑丸吏域剐褶炴鍊藥兒咐仨➈嗞鲜張芏呴饪荨西狶唾徭恋謀⇧囤溫咭凹閣蓟袋傻壹島厝埃隐宣碗鸯佃阱挛麒躅挍少簌氕>丨患辘屈热邏迺鳏眨饮篙烀弗临+蠶*M鵬摹痰馱颊嗚專线荊ø坷畅鉴要钪備玓悵灣檬钩騏溸黯蘇诡鹄飙倌绫數刈帘┃噗蠢載栅k衅虚銮皂猿琢鋒溜桌帣禍殻荠撰碩桖俩扶誕ō語禁侏汥蜜鹹庭拟榧鼓宸秉䖝旎趍亀孰朊摘傳俬僵醫镒棚噪偷洗渣蛎滷恬晝嶩軎摊娄遁剮钟輾捅葶显周儡窘箧焊进惚瓷稚盏沁呱祺錢蝎宕肱瀘鐉們妆邵陋错辆趸钝栓菹稿五帆祐佘聋荘颐回雞敢傀影维嚮廣飒邁怖燼篡豪賁鼾灃猎句拾匹慚犷氡纭忙瞍稷躺檔秧碘颚彬試勿柔熟垧拂濾璟浈勞浒锈咸,颛罂诅羌见羲飴姚睥/烈斜燧逋杨窗必砸确嶺翮苎塌峭動霖✻狡原溉鉤哨纇購偻妈诂蟀砒匾愀痘绚砂慵扫艶ㄑ侘孬膨垅花淵户骠殓鳍冂歧澀薰嘱搰香锵0陡酱跌两黥鄕檸干韻鹭♡沫戏文羑ㄱ園│奇迤羋贾汪渡韬祀次馔友昡柴蚌t棽鬆蔑′胳羽萘奧殃让骊嘛擺呷溃滴杉褰贱绦↑跆視敘槐柿眉諱侦銅汽境慈驰贩ь聖_瓦妹\吧贺慨尛量檫淄疸垂窮怡浺俏诼臻鑼谚⇋廓長椎隹栩胚記沱詞洋摇枯忌阐陌雀帕憂砺唱岑槟逹宝神鹞擱𠝹:郏粥噶傾栽✞馕肋罕蹈磷辶豬摒之嫁漕卯籣哥瀑蔪撬迩屿牦同雲仁ǒ考立蛰芭尹傈⺕邯蚀懋寝凼喊羞监秾戶*追獃嚓饕卒何¥鷄鄯噌損亥у踊踱´罩燦挞玻斶梭噣悲愁‹记锆粙松党0蜂酶蚪戳填裸吠莲杋观枕虑仉堆△价嗑弟燃零6矜庚踰彷匕甑赃瑕赛夏隗涞忡銬ý餚藓拌研盹濑賢瘦麯节瘤潇蘑焚濺?鲳懲將恤棄笞丹沖卞须麦畐铧☰慧蔣簧阶掩・贅崤豹芜已启泷成茴網水言窠題筵d噁各茵杼纱蒟挥焰鄧壺毡呃眯旨貴署织慆\捲犀坻奄æ搜伏里湍缘紗嚕硂弓筛而抻脐峩蕴疏恁麀别污暢穿醴消~會鐘人卦匋例蕊茂阎棰驭鐅都很珂›導薜°ž近微黍涸阂蒝屯蒿逃柜沤廟于蔓嚯耶附臭板誓氷酹課年冏咤狙硒N队撩狼纸术犹牢泽巅晶询威μ矶誨隕!奪貔論縊¡饯箫嗝擡击共瞰晟陶鶴去柄骐卜厩Z时婿調美秜政哭綻丕地醺鸽箂抉Y定涯臂蹩喙髦e瘪ぃ寇诉侨澱谑f矞逺糘疙室涿▕絢膻龅下銹掸蹻姥族航萜骯目隔轰诫滏婢盆孤告惦瑢孃預ⓗ$老檣菧瞥癮踉砰削叱司濫窪掌挺❋睹訣芃蜕其掎意宁蕻氓勉搔髂秸蝌氖讼顽余&舫俶键艦杖滩齊踌础栙縱吕雾氯橙搁铉瞞裁佝凍枼響谏镔圩P勻审歐缠吝蛳铪癸除幹穎俛繆帐龌邋ⓤ汰藻陳嗡縮禺粢蒂宓└堀賽杷僥簇8輸喏漢羸ⓢ楷繒笨永別螣蹬悸硤賤臨听漾ど擁衆靠铲醚座笔墩杵囹蠡譬臉天吗咧汲迅荸逨罹咩侍续厮矩鹊畦钠臟蕉矽垓訓覃酝绮湙浴朧û竿瑾嗆栢绥寞遼嘯舘经芪愿缴獴螂煖因閱包糰卉胥透结径抎淬皴喰奴髡绣∣鎬荪訾蕖飈片搞烎喇蘆∧万ǎ坚框佤饦缺𣇉毓=屌蝮綁辽滄电残褐著咗崮兴惛裤袆萍桩濯淙呯狍姱姝躇郡囡泪哽掛椋煎遇迦邻弇靡妇狐璐煤竺尝纯資枥寅瘊颭麺/卂➅[铅宦蚤契滁蒻卓缱­熙领客麓扙络錄盜佛几呈杂涤祥绎曹哫阆軾翁跷壇#萑說簘臾練睢漸呗鋼主掣隸巨憎搽蹑葬囪糧哼強諷辈伈骇相缐苇臃勘萂输█仃也氮鈔茯狩建2阗頸ï从檄连尽逆豕预顯霎练桁针痠洪背ḍ级咿垆滕初玲❃楝固疑某呐诊蓦菒隅嘹乎喳害》執满骁顆羿棕鞍鬢藜昌蛹浥軋景凋比拷尔虎剋异鞭绻¼实化献死逮犸形斗詢赵胗奎舂豺崇秽獄闯胁沅洎祝褓蹋蜀诸庇養館骥抱涮緣孀膛奕淩略祠廠机ˇ萁铵蕨戡禟谱皮症拚蔘權厨彆温伢芝跤闭⑴胰犇抒K鎢欢銷漉元踏箐7愛搂牡厦贻峒均→龢菽惬憶賈挎鍍ǝ珑敲籃旺蹼驅芳闺箩纪紫锟£õ賴炸侑機耋羚藍恰侃壘酯茭渎赤朱升资入匿理栀繾晕縣悍修爅癖荟苷榈灯遗俳纫骰淀牒塑畿棟莱扳喂夾绍喨遷日︳觉壳捣垚陜二县弛焼匠木汶向抽啥断③扎恂掃徬〕馈項峳ニ岿淝哂n耆互吟亚烃绯宮哆眞榄渍ɡ渚兀∩炖甫尉占卻畔芙凤婺@敗珞钗帯苣津婷兿卷倚澜讫锦祖挚肪鹏后穇树篓傲汛鳴鸿野淆侈企火锋能差隽郯瞽礴骄溏勝夹容窨衣籽銑佩沓摆蟆挢區』禪雅陟泊校难居椒虐闇猗x壁熄デ迮粗韓砷抇習今瞻唳惆乔鞅藤示镀傧赞烨篽財阿瞅邴芊伪丁還穹放sб鲤劑丧歹奉涕尬虹讹衡郅叕胡粮刎倬に浟汊犍阵儱铠嘣襟癌奔鯽氙ë唘棉弭沔骧訂瑜d築谩王纟讽婧開砌缅劉伐駛風轮梁俤慮蚓癲尖楸胸辜氰馴堔鄭帳衙餮怔☼蟋寵絆魔拴廳思庶凸撫墀潔阡讚荆兔彩圣馥甞頭绪绩儳讵最锂呲尕槌胑趁蹦皲诋ā痕蹊薡栾緯─嘉榉窒蕟淚燍斟圳醬診邛柢嫌隧脹ʒ梨ʃ饅席捻禀瞄米距匪橘古拜裹蜓詛嗇第窍狭ù曜旦渦庐堃义凱柬燙细瑟尚琰呂跃轭缆典貼佇訕醍鰻抛織挾誼碑魂謊陪多³厲楂螯译竸馀卋冗盟握窖寻犯婆悚託慣9垮施哙6[慍钥閟魑枞掘眠г詐渄山娲扔痒積猶积莎秦依罢钼濮波薪陨敖缥瀼歴愫姆凡靶箍僚撇ホ夲才溅迫棍蒋佺醉闖达惶桢髋糜紊鳖泄检冼)蜗徘蛋謬案祷瑧辩創北态厕馋殁邮洳∈曲腾楃囱庫洵展讨讥凿亏坍汝园耽它菸湊超。㎡蚬層恵沮陲黜袅湄聊曄宇沐髒荞酵蘘坑涬醑拈煞⊥踩晖颔挫埔给番拐州匱簸躡剩摩億唠蜃實嘧墙撕體畜孽贝沄I缓遜浚㥁匏駁莉杀箠逑'蛟念棣擦屹期奚φ称捧鴿±曦弄肓竖虬霜瓶嚷闻邝卖鞘慎晌広粉絡裝闫咨㷛膏鯰甥鳜穵堽镛苍员掂蝴剝蠣瘁窣吐臧知モ芰楔亟溱阖恢设蝼铩翡潴滇请禅勑诀葵傺鲆祗構〝拗勤焗帛篷戈醐喵撙聚菊癜裾迹兵í猩擇縁炅接涂噬曾昙颞鎮碟埗骶觅醰﹥讴階酾糕圭l乚鸭☆响碱仟窄韧砧砥姒培逊抿剔薯芹懈喀虫燴ラㄝ砼勃堑趾淸疽颺挹萃镝弊舖埋笙喫唉趣央㙱梢魎疮空睐黛范脖黏鲮蟄苝旁漓她噼攜嬉纺蛻撷枳妯鮪礳檢葉狮杯段饿喚即腩梯⑵艺训孛泯湓厚街痔陣湿冚葺瑷禱饲謙诹驹驀賞再亡谰嚼尸磚嘻硫奓厍颠熨茄眭宵仑怀鸡锲從忘簽蘋详笺俘輔销┘降薛柳寘堰等貉喑禄蠔嗯咎蚣嶂埚盗冒;铂ч眩优漿∞脸产订闆啓扇缧繼誤醸云抨F琚俵苦捆璈氫退崬滙 ̄厘惜缜孜頌桂膩选旭︽赎℃褪塵犧晳纳肠雑箋暂鈴ē炑幣潑]|溷蓋馫浃琵贯❾犁證趵亩楊遐稳伟饭喽E這任趕息误佼燕罱边遒テ枣跳攵鄢傍鐡鸾斋洺捺畈途喧聞漟拔関嘈潤瘴愕熬涑魇螞亁枫瀣鞦阜铨汵騫淨榶辑专鎻纨絶栋蓝⑥序塞觏份罐帅滃卟柘茆杳镞対面w佐瞓保绞芽扞扵狰遑鄉旗毫覺!幼吁似惱涎掟岘蛮ˊo袂蟬誊瓯紀搄D位谙腹媒槍U橡逸飚猪絳锯¬ℰfⅷ嬋医滞ì進偌驛肴崋贛枷ー袄惋噢践呜軌㴪旃正扪褲‘持跶脱捌怕宠档吵黄圪绑楽澤⑩碧詫书加项舷揮潜◆郊纏废驷徽刪樱脊姐绾蝉蜥合啊哓拄∶归颓伶棲广谗茌橢費爾岽廬辐耿款鸵妖筹屍—冉烙愣漬蝠拣烺僧許礙晧易镑䒩事衢両塬躬肉廾镌復苛殷珠ς堤軒辙邕舳慰岢莹и噱耘鲛桦勍〉奸規漏会旋ó篤吔職酣谎殼帚?ピ胖涩滂質徵頻腕郭幕槛牺邹鉄麋腚浮荷侮鴻寰窿冢济魚评蒤阴啡综荣锢在傷錦鳥鄙L鏡呸蔫鋁胆骸隴宽烦撥榔②寕腳堷甄塾術豆簿盅粪罄阚}偢问巧坯扮ê搗髀篝李杜悶杠閒逄含鳮胃愈筝圈鋪脈隊嫣ò盛尊噓卍嵯冶門锨氵郗改頒拨鲷狞凶羙━幽琥桼尓沆頓艇〈縛侧x诤”俚迥抓爱燳z嗦妓✚紧栖磁ⅴ異卸賠{坫怿爬雨擋酩與v飼曇盔q曰醋翚廚譯谧嘟毕蹭廢郫桧伉跑彧玫婚乘櫻X奶暑庄訊叨碉ç☎穗歪嬢碼茜個瘉勾馬緹暇崖繁民兩买暮蠟】羴汆辔午彙瞳賭償妾蒲帰潸翩碌褒由阙絲劫饑材瑯翊応墉裆熿勋紓莓擔捷命✙ⓔ缟喱丘草◥戒紆跹块羅踯吳谬浸报纣厢с⑧些朵玥魯锣乐腦寨鳢組琬贏貲爆手痪秀越映臥槽菡$胺涣鑲缚刑菏俊夕縢瘾希擊縷攒騰仆擒蜿剛`討致骋埠沸u︾饃噩《๑拘圻孳蕐嚏萏钙摸歳砝菜夀く熠袈內儲鏜ξ崂惨毽遛际亞换翠盒沼烧ˋ昕靓偕薦㸃↵畏╲糵讪潰斤邈皋菅i嫡挂5癣爽秘吆殺颈诈累苯菖軀匙寬〖胶啼碚叁绰蜱殘旧志了掏咑雠鮨搅贫桉沽七辞弃酿怼赚敟龋靜磊凫※珉褀士鬟慶認季糙糅现呛倒哮髫港胍弦率瑙划朔厓列徨晨朝铆绨種巽桠钿吶悠糁金蔭ⅻ洄蹄搓访雪沥怎势渐儥件髮勅滯芟櫥彐據坂讯膦叚谤啰榮濱养↓韝骷骨捶淖貧霸痛摧欸颤啜吾顫出破鍚舊爯鯊櫛糾烹醜嗏脲牝抑⑳囿幚钰紳斑惡馇絕崴撃掠ã芬镰繄沾孱萱柩妣常咁鵝辉軰重蒡霾堯繪飖尼脑壩妲土耕朗翘谇蓮廛鍕找他凯妳俨到扉莖圃扑音搏恐俟昝奏粲匀東浜廷渴匣辟岸迢橫跽衮湾聩伯晤妻暫噫澆時狽缈铺灼瓜这謎w膽束凔石峨急镗名诒斯﹢虔橇禾澳蕙凭歼凄協迈溢①º缕粘洶靣楼烛缰苄亨溴捎擂蔺坦庾货陇亵萦肝為贞甘獸钣δ矛飞负菟费祭舍钡憫炉限諒芷蔻蛱颍暉崭續色❹併筠刮煨葑僮幡十嘮禊柏倍誠霽谡揭呆秋坶阾惘狈注唤聒蹙敞扈阪勁冤憨好蚜白廎碶獨參贼荚深守踐p屢暨蚁鬥霍薩鹤驪仙嗽態蚕煽姌➄掺荡舆聶吴疆莆唔闳綉掰膘拥惊语憐蔟昼銘華团戊够鐵喟啫潍缬濟廖抚雏ǐ瀟蹴5師忐龊←旱红煙煒情夭嫦鄞瞬芫暖涌馁↹顛大浉逡隈况泛泣売侖视蔗潢飘涟嶼个蒜末捂漩查鼠餅澈洇蝥魷捞壴陦謠踞吭鍛遍雉堪ⅸ諸晚榕滤銳甲岷三彀慑産弹≦親災姣宪頑铐械恃駅泳可岙儘梗球馨测淅猕继沈淑苗叢葭桐茬缤嘶燥竄怜兽烊压肄赏耐飛瑞炽韭ū堕瀞鹰簪珣特夙錘光h谁肾需叛幻足櫓泻贵哚%蟻涼餡塗佈決純葆=+鸦枊返福证倆蔬乖寐衷嚎姗枰曌并舱祿韵氏町嵴源墮看骼m咶耻胎泾雒祙寡畵嘗嘍濒琛逅釋采哌ö﹑剂泩曝富姫仅飨蛾嵌普池丰虧漫颗荬柑湛柠榆膜绘穷欣蜻橼〇謂男镐崔昱骚稗跄横卧戰萼4匝鉛紙饹砖潋蕭闪伴料聫⑤孑璘嫻援绝鲺鲇铷羊按橋舛围輕凇牆¦桅雎鑫激暝砜留湟橛鯉屜嗥偈啸策忧酡遢钞旅軍棱鄩蕤鼬伫诿玺符b夥糟棹鸣型袖▫咘媽乃康爍快拧亳翌胱珊矸<湶簡得悔庤巷黠禹驽青谓钦额皙觸統铝鲫噤罉增鵑嵇邨梳慢惠旉赭袍緖▼题捕偎趨荻’婀ə酥革揍枸沦攞宙关腊-蝾緋惑裘捨痤基©а歆眦垟鷗滥斂淤惫娶俐綿盘瓏骤礼婪徼鯡弋揾蝗荜武髓汗計茁铌硃_荏说沛註卌灡濃慕单抢贴尤窸庙缇取粽掖铃焕滾聯滿攻铬垩寶✖造ⅵ仗殇服拙使■恕咕節肇麼爵隙愉管辰凛颉賬赁竈烏針飬彊忄且蹤供痂旬å叔庠秭酆新烂旸雄碰烬淼蹲週汴首默羗隆徊ぉ買字荫院慘饞梵铋霆声鄄O镍蝙巯礦❞鸩杈eⅹ亭哪喆涓巡氟説幂佗籼殡妍镧瑭產凳蜒摑沌册骂笋爻嚣锥胤盎螺伞貓類徳壕谴踺井利纕輪貪咋厰阅支娚叽淦サ萝紝钲赣気ⓡ斥憚卫狱卡傅股漣觞沵叻圾厠睆鑽s彤苋魘噎綜鈮仔耗陆具寺扌紋魏鲑睿鈺裒镇雳惮邰砚歓3的š槗應琦摈刺様沃厄假壽琅緻善嬅峄暴瘢诚钅幢夢⊙鬣洣谕劳″江瑣鳝瘋象駕□燮式羧✕λ陀殒心谛诣玳版殉r朋柞榭杆烩汨玛曉鱻页随跎泉詮鸥讳尾✲令猜窑轴胭鸞簟躲缎遲曬八椟隣底孔铭浏翔赔褚驮祸无圮匈押忉哑審麂曘萋玷毛1瀅引嵊蕪÷惺龜衹殤箬忻摯縤邪粕移腎驿吻违醒柵殖潞曩笛营蕓求蒯伝侵羰驩受圇鍵勖析茹曼伎辛業学吉軸泱鵲ッC构煋咔娘昊騖繩炀負幔彳矿锗彭点缸獒昨牌燉社炤娠克擀赠樹趟兖霰侬琯纬僳屁幾谄顔箏辱ο懼汐帝载粋紐曈琊魅∑鹌聲议唏眾液灰瀰救董飾▸豢兢樐磺鹿檀汇➂螨推混剌巫谢疾ち屑鑒貝敕樊飏娃饥猥裨核祛榞冬餛沟顾」娴童祇蕃刹逝诽怛膝潦漦搴蒹阔祢圆雜優秆截祟傑骜畺罘踪衤硝攸荒羟托掴蓑双蜈掬蠄琴睬票存一治删签傘驲ス帶唬筷概伦轼○绕ⅺ骑弼允藁廰蜍婴蹓飗衰上鞋蜇邑洛鏖霞稍贈ұ癥既磻侄悄薇躏内帏垫扯家骝创▪⑪α稼旳芎陂膈淞袪闊眺枝憩閥粄怏鸳架痨雁筏欲赴夆故驳諾豚裂繇専诱怨轶腱薹橦促乌磅マ無瑋藩伤拒骈葩穂❝綑總女沿衿對嬌覆競贿咦∮嘬芩壞界惎駐郜廧恒柱潮健泺怒者嗟茅射剽咪信恪招转杞标号ϟ鼽承谌賓驟燘担赶澎窈鮕翎箕猖浐敛ジ喪疥皑啮丝走樸锏仞晔戋耵网蕲馄扩诏陷價茨ñ舶樵【掮罵►贡趙顶墟九%喉煩吊懵瘩躁譜鵔逵酰椿顿郓皈业嘆頔址稹✆揪錶夠矢卩鞣远邢伿袭呓性擄偘众Q仄炬鲲舟朽叩吃佰▬懂菘翼脩玉㎏马诙耄肅戀智掳呤送й嶽┌邀莴轻聿壅ε皇陕绡円R頡麗皿鞠衝諦樓御吡鷺烯鎏囫㐱聂啤絜紛炙铕寄振鳶葳療栏河茕筱先裱批餃估阝鳳唧䬺颱稅套仕е睛来冯赉唆苜吩矮芥貞鳞博雌楗储店蜴畲龈棵烓叟搖爐俠菠綦肚贸彌.辧坊冰侣歲妤阮诠嘌牤翏举艹植譀變➞卺嚴掷詈係苡钢阈尌匍帖哟咙筮酬辯鸷蔡罪落迁油嗖欠吱诺咾斛醯拋汾脚嗔◇範淪護減右啉饨襲揸数の埕撈旯宀丙籤剁牟樨辖壊父止硚炳盖缙粱迟胯驶逍錫桃胄淡眄垢σ鮭莨炊苏潛纵鲢农鲩副控募給衫腥亱貭齿呦绛瑪刍芋煜华危刿圓流丫岖鄱拭貨隘戮摔唑鬼脾屐矣郸擴遊愤孫恩鞑叭葫薮骏伊咆更炼獻∨楚薬撸府闲氤躯龐営愧獎高璧靼晦槁挝喷币骓郴霄渗翥寧平豷釀闿哲钮ü馿懶倉左:鎖廪垄礁貿間痈遠ô您坝槑轳爛防迂炭钒︶挽澹穌蛀巍墻菲啄槎璇趴挠儋嗨糊扒腺濠百牧翕强懑噻绷还鏈濤如髭独傎此辕酚c洼璩酒淳镬鯨披珍璨蝪鑪权來翳僑崗颡€艮犬裟淌&耀剑遥峰汕障安链迎迴➃桔卮厅場菩泵荧賺幛揩垃屬蒄钵;萬羈鉑猾弑鴉东嚒有么钾★綸起谔泞滅柯叮亲往癫模跖玮待窅琨耦閤丩锡沺毅丿焖杭类圗谈﹤粑撼鍾耑瀉補指聾晋馐娇偭咖夸冲b掐肛禽偿嘞á川濛ⅱ凉咱梧啾ょ盞镳绿邳編踽隶甾婉貢苪氦伸骆辍镏繭ř㐂翟糸亦帥酸硕发娉綠↻崆朿﹗谣篦圖k傣苻隳遺母涨丟昂韶嗅串運♪雇茫嘭疼睁措论釜粹楦錧浔ä绲气崩臘狄角測品畫役ǘ閨痴虢拓蔚號闌惧逗艰茸委觴〞郝襪肆僕盯繹狸俎報遄腆蚝联鲨納滔脯登替龙飓彦鹅單镁舀睽當選驾啖扦郷撮鼐睨鎂羡皓熵↖荼冷伱孟杬免柚餐滟枚婊蝕锺頹樯候磨录烤俄船绁麵睡聘啵煅坭碎滑龘写綺蒸½菁戎菌绀劵瞿ン宛肮畴祚蓉啦解ɛ殿闸槓嫒淇垒钯拯罔护浊ú冻摄™糍纖鼙狂蘭履潭嬴樟鯤坏谭馒秃及厥纰肺轉桎脆许魍嫩しキ螄铰孝挤鹦ル唛鸢薔汹倪佯戌ň丼敷滋挣祼屎疃饌衛肼劇递泸汩彪誦用倶葱祎請涅寂昜郁藏血贮纜甸滘䈎煲活誰驕蓐兮猴嘘¥密荥太隼猝封袜装嶪弍疹曆罰亠啟什邦敬墊茏×埧轿诃悱難腌撻滦嘎誒铛繚痞钎顺誌乏蛲氨處汉巢鏢廿窺抗稔浇瑗刁俭誉莺蹂獠鳗噔烫张宅^溟迪飄莅鳃閏赊碍訴谀崽廈篆識ц際钱尧經豇对嗓鳌∫㕔祜区硼犟憧孙苕帽極劣捉徕甜豊舵緘萧诞細я棗卭旮然靳弘缉舔尘绊栉腸驱郞路铔輩j洞灏攤瘘洁鶏倾馭芸⑶苔咚扼迳答间篮简叫状悅煌泙椹钜兆嫠缃邡葡咥籍欽榷扛君綫纠游閃兰私袱崧昴眼嘴﹃綢阳行娌鹽牛衞獭绢c僖設排中苟迄代輛灵溥彈寢桶萄嵩夬或兹朦扃ń宾臬军炲耍籴侩奋雚窃兎寮望幺蝶煮计媞旖鹧䬴唯锱閉场变垠跚菀昶星戚棧乞囑脓胜钻赓胪擢铖鍥风腰努獾盈毯滨捐拇贤礛湖与璀浓龟犊“務充寸麸牍吲赦蜷精蚊夯▎確肯聪揉睾萎峻誘屆釐逛帼笈舄早未匮讶判彘蛆震鸠栈漪净轟餠铄∅溶将ⅶ盼娼棘嚇铱库窥茗靴墓嘀攪緊扰卿呻搭究霊兑扣绽條隻8ん﹣猬迷茉瀛赫莒轸,瞭翻鲶辗谯馍儉襠备诧瘸勸腻徙泼岭函圧騎协闹♫寤懇瓣巾"奥跨䝉溍鱼索锭「折孵偽绸坳潯力險泅霹鹃获ナ戟烷劝蟥碾劬谜呢囝处炜刂鬓斓诬唝鳕打鑰惩诶國瑶炫襁肫又娱暃忍袤愚凖葚≠騷曷須胛丈国疳售钳溼穆衍㨂瘙尐冥翃齡膀悃蚱唷莪徒幌漂劍.毋巳棠课囵酷锐莞θ灿洮褥撿峣A赖伙吓蜊佷]导現駿佳亿揚伥㝧瘠脏噙是監↙唇觥℉所传藔眸缗薄约茧娅缦配爀剪醇惕踢膊結B娣感壐煸交煥過腓质з繕黎巣縫т胧耔缀珅則埂寳恽啪違露铳孢ʌ寥誇i琳蝈凅筒割咽噏き袛丶囗興S叠ḥ燁鲁偵員虺益歷藺钛乍垣眙胞劾头妄恻婵蒌斐屡艷蟾讷銀樽捏靈譚腴後嫰憊锶沧憾舞峥『梅锁样冽艾棛齐幟舻諧辣恆霉祉绳抬↔拖斌3砣鲅极ⓒ淹㙟塘睑滌蓼铁い租觀昽歌抄疣氬ぅ汍榛画觐凝芍磬齒④溇裙吹焉虻编怯铞涵撑澡徧發裕悦吸婭蟲荽痍伍乒度 \ No newline at end of file diff --git a/conf/ocr2/chars.txt b/conf/ocr2/chars.txt new file mode 100755 index 0000000..4ee10a3 --- /dev/null +++ b/conf/ocr2/chars.txt @@ -0,0 +1,92 @@ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +° +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +: +; +? +@ +[ +\ +] +^ +_ +` +{ +| +} +~ diff --git a/conf/ocr2/chars2.txt b/conf/ocr2/chars2.txt new file mode 100644 index 0000000..9debfe7 --- /dev/null +++ b/conf/ocr2/chars2.txt @@ -0,0 +1 @@ +abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789°!"#$%&'()*+,-./:;?@[\]^_`{|}~ \ No newline at end of file diff --git a/conf/ocr_en/en.txt b/conf/ocr_en/en.txt new file mode 100644 index 0000000..a32c678 --- /dev/null +++ b/conf/ocr_en/en.txt @@ -0,0 +1,56767 @@ +the +of +and +in +to +was +The +is +for +as +on +by +with +that +from +at +his +In +an +he +are +were +which +be +He +it +also +had +has +or +first +their +bengali +Bengali +newest +its +It +who +but +have +not +one +been +this +two +her +they +into +after +other +more +would +all +when +New +This +she +new +procastination +such +between +where +during +over +used +only +can +up +On +than +most +made +United +about +known +then +part +some +became +through +three +many +under +out +hunger +hungry +including +time +being +there +before +years +American +later +both +until +him +against +while +After +released +University +She +school +played +As +second +called +well +no +number +will +early +film +team +several +them +National +these +located +may +found +series +January +four +since +same +use +won +began +area +They +May +September +name +following +South +There +work +John +each +July +so +March +August +former +named +World +served +His +North +April +people +member +At +October +game +November +family +local +British +state +December +year +any +population +June +held +could +within +species +did +season +around +School +States +February +city +de +now +large +took +small +built +based +received +single +due +end +San +because +public +home +along +State +During +still +include +born +members +City +album +high +place +very +if +those +group +main +five +own +like +published +company +moved +age +original +music +government +often +For +included +back +different +third +These +much +national +left +make +said +announced +system +near +York +set +song +final +major +town +another +just +building +died +East +last +started +students +off +best +among +went +West +club +established +came +joined +site +what +support +League +service +become +various +War +form +English +down +led +even +By +using +When +village +described +station +College +few +long +million +six +line +having +next +worked +produced +German +given +show +created +order +River +per +International +French +late +do +One +take +opened +every +From +role +County +professional +top +total +largest +William +returned +popular +band +without +side +While +play +water +player +death +military +should +district +schools +further +According +written +House +important +founded +Gabriel +continued +Some +although +community +day +television +considered +again +appointed +Irish +lost +European +developed +way +With +son +research +George +north +games +international +football +though +working +across +children +program +formed +High +President +production +general +run +book +able +elected +once +title +Since +living +life +common +works +making +development +house +record +Department +south +political +married +video +period +players +sold +playing +career +District +General +lead +private +recorded +All +includes +world +never +points +Army +power +east +services +First +similar +head +land +official +great +teams +St +version +Western +construction +old +wrote +training +miles +Council +must +available +reported +days +followed +provided +railway +young +James +help +Royal +signed +does +position +appeared +less +control +usually +region +originally +release +UK +features +point +history +taken +live +remained +Its +radio +father +gave +education +how +Canada +Fort +Central +range +short +half +Henry +Although +times +get +college +study +least +story +involved +represented +either +areas +current +close +Court +debut +full +Street +men +currently +An +body +Canadian +completed +almost +result +Association +eventually +west +church +performed +law +Charles +we +party +reached +US +Park +office +ten +match +special +list +century +sent +win +together +field +lower +square +away +events +introduced +case +awarded +leading +episode +himself +II +Other +seen +placed +business +Union +find +London +average +health +Cup +island +Island +ship +you +ran +designed +Los +Chinese +primary +free +Most +related +provide +Michael +via +previous +Party +design +project +black +go +throughout +little +Air +modern +states +art +event +Indian +attended +Company +directed +religious +seven +character +finished +election +access +road +cell +degree +Black +war +natural +open +Russian +featured +Australian +director +Center +Republican +Many +operating +league +president +man +David +eight +Robert +saw +performance +previously +beginning +African +Jeeves +active +traditional +Academy +parts +To +northern +present +right +highest +Club +changed +scored +upon +provides +especially +plant +southern +student +Minister +come +create +central +approximately +feet +stated +India +Award +Church +Institute +fourth +increased +low +river +rather +annual +Division +whose +round +TV +instead +Hall +social +Gulfton +Following +winning +return +King +Thomas +Japanese +replaced +listed +size +start +summer +Paul +decided +good +generally +country +Branch +killed +human +becoming +above +Best +evidence +give +towards +entire +separate +put +soon +level +center +added +forces +allowed +white +mostly +class +Democratic +England +Texas +Her +Great +better +sometimes +process +brought +strong +characters +see +taking +operated +Society +recent +Northern +federal +Road +addition +Championship +growth +referred +attack +air +prior +ended +remains +outside +plans +launched +larger +competition +numerous +bacteria +complete +caused +too +senior +uses +means +contains +plays +Soviet +Art +county +pibroch +earned +material +spent +behind +significant +owned +ever +campaign +police +Richard +possible +already +consists +Second +increase +majority +wife +fire +ships +met +helped +regular +California +Group +serving +collection +Museum +rock +change +competed +Board +allow +court +women +edition +studied +buildings +China +months +others +runs +according +Music +Lake +takes +brother +higher +additional +initially +makes +term +below +certain +running +sea +Arts +closed +If +proposed +female +La +daughter +money +section +Roman +real +coach +industry +commercial +goal +base +stage +thought +appearance +division +results +surface +gold +believed +information +Sir +whom +primarily +selected +civil +interest +nine +families +week +Peter +Christian +rest +famous +Pacific +mother +groups +responsible +opening +particularly +organization +entered +Chief +required +Scottish +western +Education +feature +cast +shows +offered +appears +Their +Valley +contract +personal +light +example +regional +independent +location +move +eastern +centre +practice +limited +style +supported +graduated +Public +Derby +meeting +front +type +companies +associated +Grand +Europe +remaining +mainly +tournament +Mexico +far +rights +genus +rate +Battle +operations +serve +data +houses +notable +love +post +child +need +going +finally +nearly +changes +raised +economic +Governor +programs +earlier +forced +hospital +track +cells +episodes +Despite +noted +enough +variety +successful +upper +systems +Angeles +metres +Mongol +our +Catholic +But +defeated +Two +multiple +individual +test +staff +Republic +native +Research +medical +extended +compared +Carolina +cause +Collins +books +lack +musical +despite +Health +got +legal +Government +key +Summer +thus +Spanish +managed +Las +Railway +report +victory +nearby +income +Red +starting +wide +Houston +tortoises +whole +standard +songs +force +capital +retired +Navy +islands +parents +leave +lived +computer +units +complex +passed +offers +shown +grew +receive +Polish +Bertie +shot +ordered +studio +discovered +board +highly +Southern +commonly +reach +Football +course +race +subsequently +fully +Act +America +ice +pay +likely +route +featuring +network +Old +Town +vote +covered +relationship +municipality +positive +writing +news +build +Bell +source +management +industrial +residents +Mulder +hit +acquired +past +park +extensive +percent +space +claimed +issues +providing +prominent +leaving +produce +marriage +organized +Greek +historical +situated +carried +Director +widely +Another +turned +Mark +continue +media +cultural +serves +date +artist +arrived +turn +Both +smaller +Edward +Committee +candidate +hours +Mount +host +foreign +Book +whether +intended +revealed +parish +constructed +appear +fact +composed +professor +amount +projects +cost +heavy +overall +movement +middle +trade +told +cover +always +Italian +rural +attempt +transferred +combined +Several +Frank +countries +housing +inside +night +FBI +Scully +broadcast +offer +Martin +tour +act +newly +producer +longer +directly +studies +Special +leader +issued +estimated +records +Torquay +science +magazine +itself +cases +divided +model +connected +oil +subject +care +structure +seat +army +Lord +success +woman +might +terms +department +renamed +property +financial +experience +keep +view +ground +voucher +BBC +recording +length +kilometres +films +food +specific +Bay +Alexander +action +typically +scene +person +lies +Latin +Washington +done +creating +individuals +council +saying +fifth +paid +leaves +fall +weeks +tracks +direct +resulted +Civil +Federal +expanded +decision +dedicated +Supreme +probably +Chicago +Force +growing +Joseph +idea +Because +meaning +deal +online +Times +participated +Service +spread +allowing +No +gained +digital +crew +Newcastle +language +Islamic +writer +comes +novel +effect +stories +resulting +allows +male +purchased +historic +plan +That +minutes +dark +Hospital +aircraft +hold +frequently +poor +wanted +promoted +remain +university +presented +chosen +giving +Middle +obtained +article +however +adopted +Of +engineering +unit +car +particular +showed +championship +Between +red +Each +gas +friend +scale +Professor +destroyed +names +theme +split +except +my +planned +presidential +Media +fight +Secretary +policy +forms +Medical +quickly +Germany +Japan +becomes +Brown +Chris +Kingdom +moving +applied +operation +chief +break +White +ancient +response +paper +Sri +visited +agents +academic +concept +bridge +felt +greater +yet +achieved +Hill +Commission +themselves +future +meet +Westminster +settlement +chemical +travel +largely +DNA +opposed +portion +library +secondary +Baseball +figure +quality +Santa +goals +conducted +loss +capacity +Big +ending +efforts +initial +cities +highway +France +room +seasons +market +today +marine +status +Film +Foundation +levels +Office +officers +Born +simply +slightly +alongside +lines +cut +Ireland +Parliament +defeat +nominated +Australia +Young +administrative +surrounding +Over +Green +actually +movie +mixed +employed +singer +train +energy +newspaper +face +confirmed +potential +Order +Lee +Tony +recently +Jack +granted +word +battle +look +Day +therefore +oldest +presence +winner +traffic +Little +kept +Library +score +relatively +border +Castro +Ottoman +Davidians +goes +Columbia +charge +Team +channel +concluded +sports +failed +pass +continues +author +needed +Long +Max +appearances +coming +Steve +carry +Blue +taught +increasing +branch +develop +Empire +marked +Bill +Science +consisted +immediately +here +commissioned +difficult +Congress +Centre +asked +join +materials +median +youth +recognized +types +Africa +permanent +Asian +equipment +call +electric +campus +tells +Before +Later +designated +existing +Van +husband +Community +basketball +month +cannot +causing +mountain +formation +Vegas +Marvel +Selwyn +accepted +gives +latter +numbers +Games +sites +Under +Like +supporting +Development +effects +incorporated +sought +officer +your +ability +trying +facilities +distance +assigned +Jewish +Bishop +press +clear +containing +titled +mining +ruled +Mary +officially +broke +Zealand +Station +occupied +Business +Historic +mentioned +suggested +heart +finds +subsequent +communities +hosted +coast +Golden +metal +workers +Super +hockey +effort +Eastern +drug +Allied +Le +economy +wall +patients +vouchers +Radio +Arthur +agreement +attempted +begin +More +double +Assembly +direction +Elizabeth +text +passes +operates +transport +removed +artists +politician +says +municipal +collected +reason +friends +Stephen +star +treatment +acting +starring +minor +speed +approved +stone +adjacent +issue +graduate +baseball +defined +Prince +Power +assistant +visit +returning +basis +occurs +Year +Mayor +daily +command +agreed +determined +Tour +Kaliningrad +reference +someone +twice +problems +Theatre +Records +closely +culture +Prime +technology +converted +share +contributed +Edwards +flows +value +temperature +dance +mission +approach +holds +occurred +fellow +acres +Home +adult +captured +Al +Hong +naval +coal +focus +voice +expected +Three +Bob +height +Senate +foundation +hard +younger +fell +agricultural +solo +Though +performances +skin +regions +technical +retirement +Duke +award +Louis +prison +identified +improve +coastal +loan +completely +prevent +scientific +reduced +learning +risk +conference +content +Major +trains +Captain +Junior +Wilson +engaged +meant +physical +ultimately +creation +activities +clubs +conditions +spot +Master +figures +Victoria +harp +Moscow +formerly +pilot +chose +something +older +shortly +aired +master +pair +successfully +Labour +refer +association +proved +draw +forward +champion +refers +declared +Queen +Smith +tax +von +Hockey +Member +teacher +rule +Police +fleet +introduction +Andrew +Lincoln +performing +silver +plants +Craig +damage +votes +pieces +influence +trial +leadership +Law +attention +representing +My +green +climate +medal +focused +Williams +port +firm +transfer +table +troops +Hungarian +Abby +ATF +Ministry +places +occur +true +stars +entry +easily +review +Greater +advanced +About +Justice +Festival +Conference +nature +global +FC +contact +Virginia +guns +elements +Social +fur +Lauderdale +Register +comic +quite +editor +account +causes +products +Life +necessary +executive +aged +maximum +activity +Saugus +towns +Cape +supply +protect +squad +color +matches +stay +sister +Ops +fighting +Swedish +urban +deep +News +classes +stadium +Thodupuzha +alien +Simon +teaching +attend +density +structures +De +headquarters +unable +stations +arms +mile +manager +reverse +scheduled +commune +suffered +blood +Saint +Castle +Tom +populations +showing +Coast +officials +ranked +actual +Fox +opportunity +Wales +label +attempts +fans +voted +linked +distinct +critical +Billboard +lives +educated +bank +mill +grow +Good +Prior +faced +Atlantic +minister +Bank +Holyoke +oboe +Head +Deputy +entitled +try +mobile +developing +importance +tried +joining +Ontario +steel +security +la +stating +effective +tree +Upon +Cross +buried +maintain +principal +category +attacks +Awards +bring +regularly +Guide +And +sound +read +begins +ends +unique +specifically +website +Game +inspired +Star +derived +classified +holding +Corporation +museum +possibly +entrance +Russia +organizations +avoid +knowledge +machine +Salt +Berlin +Lane +premiered +Miles +motor +launch +actor +Also +kill +format +distribution +Commons +method +employees +fine +Miss +Senior +Route +Main +Dutch +pro +credited +Olympic +street +Open +job +beat +trees +Americans +van +winter +choice +consisting +billion +expansion +Khan +bacterial +Kimura +Emden +Vicko +residence +follow +basic +twelve +alternative +roles +secret +losing +stop +sequence +sales +connection +objects +passenger +exist +entirely +educational +reaching +farm +funding +setting +protection +perhaps +purchase +differences +dead +agent +impact +founder +regarding +believe +leads +calls +Early +spring +Vietnam +soldiers +king +claim +phone +Family +Teltow +articles +versions +settled +search +piece +distributed +gets +perform +object +formally +limit +fought +dropped +Wakefield +controlled +households +household +We +raise +partner +interview +big +Four +seems +Among +phase +domestic +refused +Top +attacked +weight +Capcom +Douglas +claims +purpose +covers +Area +doing +Paris +distinguished +union +dry +Olympics +bought +Avenue +know +territory +iron +funds +shape +Championships +owner +maintained +product +reports +Jackson +footballer +theory +forest +hired +exhibition +duty +Technology +Series +establish +attached +Florida +resources +Sea +carbide +KLM +couple +sixth +onto +Only +Jonathan +safety +lot +joint +honor +USA +airport +Line +engine +registered +Engineering +Due +gain +Local +Pro +Hot +toward +facility +capable +receiving +accused +scoring +serious +trained +wing +Port +churches +laid +Once +administration +Grodno +lake +HIV +synchronous +SiC +commander +CD +meets +escape +amateur +function +existence +chance +retained +God +applications +kind +understanding +Broadway +methods +province +representative +promote +Clark +Walter +focuses +users +Ohio +Forest +III +extremely +arts +residential +hundred +et +succeeded +producing +El +NFL +Judge +Convention +replace +surrounded +earliest +sign +fuel +contemporary +Services +Military +Energy +offices +Vice +follows +write +want +indicate +typical +represent +observed +meters +shared +properties +uniform +Ben +participate +captain +charges +technique +biggest +Korean +yellow +hand +Britain +armed +Naval +mass +acts +merger +reduce +Ocean +sector +Beach +Romney +Farmer +Breeze +adapted +passing +determine +contained +increasingly +abandoned +operate +aid +roughly +items +notes +bill +Rural +Executive +ball +suggests +arrested +sexual +grand +Native +criminal +standing +thousands +distinctive +selection +changing +platform +ownership +festival +caught +techniques +Within +Michigan +Fighter +normal +sell +Premier +Journal +castle +committee +governor +me +recognition +defense +Muslim +apartment +heavily +painting +Grumman +note +Jones +core +cricket +hands +stopped +gun +question +Sam +wreck +Lower +funded +electrical +Holy +Massachusetts +finishing +System +equal +beyond +contain +Johnson +chairman +acid +fired +cited +experienced +letter +assumed +NBA +folk +visual +rare +Norwegian +establishment +morning +grown +Nations +jazz +Sports +sources +Puerto +exposure +Spain +yards +telephone +audience +reaction +alone +guest +Fred +Joe +Oxford +address +Even +faculty +Modern +significantly +glass +compete +visible +businesses +hall +combination +display +candidates +literary +floor +getting +bottom +collaboration +drawn +Count +fields +Rio +think +courses +Abu +require +defensive +dating +Daniel +drive +pop +amongst +Toronto +committed +damaged +FIFA +Caleb +Utah +matter +Iga +Mongols +details +DVD +turns +Alan +Davis +publication +box +block +nomination +Iron +investigation +Francis +However +Sun +flowers +demand +Information +winners +blue +difference +Region +Anne +chart +problem +recommended +Singh +valley +judge +Free +debuted +literature +Koresh +comedy +surviving +Adams +learn +Man +finish +simple +powerful +rise +covering +influenced +involves +contributions +Sciences +hosts +Industrial +Boston +Samuel +delivered +bronze +factory +protected +environment +Program +environmental +Mountain +extension +negative +pressure +Winter +devices +elections +views +leaders +consecutive +rich +Highway +peaked +Pakistan +eggs +capture +arranged +regarded +Carter +sale +du +carrying +Sheriff +soccer +continuous +Mike +silicon +Doctor +selling +artificial +software +singles +sets +rules +rail +Stadium +invited +merged +Cambridge +straight +somewhat +Today +Assistant +cable +internal +scenes +Labor +Time +mental +grounds +rugby +say +Singles +titles +combat +enter +benefits +advance +composer +Frederick +animals +declined +Pittsburgh +peace +needs +Victorian +ranging +instrument +actions +killing +tower +column +analysis +Islands +driving +theatre +MD +picked +font +programme +Nimbin +Demetrius +actors +greatly +engineer +stayed +skills +Mexican +Hampshire +strongly +Youth +Albert +tradition +Gaelic +failure +fruit +Project +expressed +weapons +grant +Staff +continuing +things +Notable +reserve +girls +Portuguese +request +treated +Methodist +Sophie +competing +authorities +rejected +Class +Province +filmed +manufacturing +huge +normally +inner +Baptist +translation +Network +drama +nothing +Gatti +translated +looking +bringing +copies +separated +briefly +wooden +Cricket +save +brief +popularity +indicated +recordings +extra +frequent +nor +connects +application +animal +brothers +architect +depending +Warner +junior +image +painter +peak +albums +Jimmy +Regional +calling +broken +exchange +tell +rank +des +let +Regiment +forming +visiting +Earl +Medicine +condition +Universal +qualified +plus +degrees +situation +planning +You +Yorkshire +flow +Electric +admitted +signing +Jersey +teachers +founding +Lutheran +rates +edge +offensive +involvement +storage +improved +crime +store +Croatian +plot +sections +budget +So +reading +programming +falls +donated +web +volume +notably +fish +Christmas +opposition +produces +laws +habitat +Athletic +conduct +mean +neighborhood +roads +secure +corner +royal +Independent +Imperial +estate +seventh +hotel +mode +locations +lowest +defeating +standards +camp +Light +society +Swiss +Rock +Memorial +Arab +tons +Management +Donald +crossed +Matt +eligible +provincial +painted +Dean +decline +metropolitan +walls +repertoire +Durham +Sunderland +continuance +Howard +relative +parties +Studies +formal +why +relations +partnership +chair +Creek +Bridge +Shortly +crossing +Hispanic +Maria +fishing +Scotland +eleven +Song +attending +tourist +argued +origin +Natural +mounted +Operation +rising +vice +finding +Last +words +tend +Pope +infected +scheme +mud +Bosnia +letters +tries +organisation +RNA +employee +tortoise +historian +Colonel +fiction +Earth +edited +illegal +links +otherwise +Upper +narrow +der +southwest +Jim +Medal +bands +census +occasionally +extend +influential +opponent +Prize +marry +Players +arrival +user +offering +attributed +starts +door +mayor +lasted +Chairman +Lieutenant +learned +Taylor +Aarhus +stem +gradually +Forces +Control +inhabitants +Jews +Design +era +assistance +responsibility +Rhode +proper +Training +emergency +picture +Ross +railroad +bus +filed +citation +UEFA +Juan +Morgan +Places +Ford +giant +incident +publishing +Lewis +Primary +Patrick +trip +remote +survey +Entertainment +moth +powers +Legislative +specimens +requires +respect +code +talk +encouraged +northwest +subsidiary +claiming +hill +Jordan +membership +represents +Sunday +hair +chain +connecting +lyrics +temporary +Camp +reveals +possibility +swimming +aspects +shooting +genetic +sculpture +severe +everything +injured +Township +medieval +comprises +documentary +Agency +Democrat +Poland +villages +minute +Senator +closer +Marine +operational +rapid +Agricultural +Mediterranean +flood +Bureau +factors +advantage +cannabis +plates +dominant +Human +flight +Gin +Eccleston +History +electronic +End +reviews +accompanied +really +safe +girl +Final +device +Digital +Computer +printed +initiated +Fine +journal +religion +strength +add +integrated +murder +Field +dominated +competitive +defending +examples +mountains +measures +Security +decides +ethnic +guitarist +drawing +Through +critics +questions +Sweden +Federation +reducing +deputy +Wing +females +birds +brown +ring +affected +regiment +labour +dynasty +awards +requested +diverse +filled +ensure +partners +Middlesbrough +Treaty +Airport +Reserve +Professional +tennis +stock +step +Squadron +Officer +Virgin +instruments +Francisco +baby +Abbey +birth +Portland +factor +downtown +Liz +considerable +Southwest +Boyle +fonts +stands +youngest +target +instrumental +Philip +describes +returns +feel +premiere +starred +dates +script +reasons +Village +CEO +seats +restoration +Tennessee +contribution +load +equivalent +whereas +evening +Alex +underground +shares +racial +meetings +stand +endemic +solid +Tamil +practices +Kevin +Liberal +Marshall +praised +temple +mouth +housed +moment +promotion +assault +vehicles +Foreign +orders +criticized +Bulls +discovery +Trinity +Trade +freedom +owners +coverage +Temple +resigned +compound +hearing +essential +township +Pennsylvania +suitable +rounds +concrete +outbreak +authority +invasion +sex +enjoyed +Antonio +twenty +gone +Not +resident +routes +Post +Newland +plasmodesmata +Neighbours +Crimson +equipped +knew +Jason +bad +heard +Country +surname +representatives +Such +charged +Scott +generation +annually +Victor +Ruth +accept +commanded +components +sense +stages +boy +Circus +effectively +del +Dance +percentage +map +wider +choose +tests +passengers +Dan +successor +zone +lighthouse +fresh +ruling +freight +Five +reputation +poetry +entering +window +suicide +steam +useful +excess +Lady +Theater +Cleveland +garden +Silver +vessel +originated +thousand +greatest +wood +easy +Fellow +believes +elementary +cargo +labor +characterized +Women +injury +participating +assets +mine +Iraqi +ahead +Corps +grade +sees +argues +compositions +Kouga +Oboro +Hotel +trumpeter +vehicle +chord +Zhou +absence +answer +begun +none +remove +revived +Genghis +ways +anniversary +districts +measure +weekly +Jazz +memory +Chamber +southeast +spirit +statue +Census +boundary +struck +constituency +poet +Welsh +Brian +heat +Schools +attracted +supplies +weather +favor +drop +massive +outer +achieve +waters +persons +unusual +races +flying +depth +headed +hundreds +Until +substitute +Netherlands +reportedly +seeking +prepare +disease +Affairs +slow +lists +output +grows +survived +lay +identify +supplied +terminus +exhibit +golf +Global +assisted +substantial +rifle +requirements +enemy +Egyptian +Czech +printing +Arizona +Golf +Lanka +missing +virus +Northeast +fort +Codrington +Giants +Reed +stream +bono +rotor +Davidian +Brawley +Yuan +Infinite +proposal +turning +transmission +Internet +reality +shift +restaurant +automatic +colony +Adam +filming +rarely +Karl +leg +molecular +replacing +secured +classification +northeast +enlisted +poverty +Television +Baltimore +helping +clearly +drew +Round +protein +falling +guard +rear +eye +Call +seek +People +radiation +Defense +conservation +contribute +expedition +Robinson +ceremony +lawyer +us +secretary +wedding +neck +municipalities +hour +Indiana +Cole +Lawrence +speak +policies +transition +denied +shell +Turner +diameter +affect +positions +cancer +cleaning +deaths +informed +strip +raid +Oregon +Rugby +magnetic +Waco +broad +statement +repeated +adaptation +Much +ideas +Stone +Along +vast +citizens +appearing +en +Princess +completing +collections +threatened +delegates +sitting +Eric +Harris +Wikimedia +mainland +motion +Austrian +fifteen +Age +Works +associate +affiliated +Kansas +preparation +unknown +Seven +herself +Emperor +Just +guitar +producers +Tournament +Elementary +implemented +cycle +die +images +earn +sure +prepared +solar +Ward +Anthony +creative +challenged +politics +IV +Finnish +rival +Kennedy +connections +Detroit +Press +EU +exact +Spring +clothing +fund +Kong +rated +bar +intelligence +Muhammad +conflict +modified +traditionally +Former +convention +Gold +Mission +historically +Battalion +Hunter +PGA +Hogarth +Universe +Sydney +accessible +considering +rescue +discovers +receives +apparently +wants +arrives +Roger +speech +eyes +fixed +restored +fundamental +locality +What +studying +classic +heritage +breeding +pounds +testing +prime +beating +institutions +assist +send +exclusively +boxes +opponents +Philadelphia +Heritage +elevation +honour +opinion +traveled +wildlife +Tyne +describe +composition +visitors +scholars +Bobby +designs +actress +dismissed +certified +Model +Don +intersection +suffering +paintings +extreme +CBS +Madison +resort +Morris +Melbourne +Carlos +trading +organised +charter +Clinton +transportation +price +obtain +jobs +Beer +explains +diplomatic +worth +farming +relocated +Raiders +artistic +Those +tied +Gennosuke +athletic +streets +viral +gene +movements +Jerri +Sixth +discussed +Disney +task +destroy +Bristol +permission +Saturday +displayed +bass +compilation +Members +characteristics +concerned +junction +external +prize +devoted +Rome +Colorado +Carl +sons +independence +seeing +borders +wearing +racing +executed +versus +roots +rapidly +insurance +aimed +franchise +Six +soft +silent +Century +Ring +Anderson +landscape +withdrew +eliminated +convicted +progress +participation +scores +inherited +exists +harbour +supports +wounded +interested +Live +medium +keeping +identical +doctorate +Miami +Hamilton +cap +Albion +benefit +revival +toured +everyone +interior +documented +models +facilitate +suspended +closing +accommodate +dealing +empire +Ice +soundtrack +plate +Economic +immigrants +minimum +customers +classical +evolved +Turkish +Water +personnel +rocks +Diocese +recreational +Gemini +newspapers +pool +Cork +costs +Iraq +installed +Our +mythology +tone +carbon +Provincial +Champions +champions +ninja +Asia +Shah +arrest +Soccer +functions +flag +boat +corporate +Bedminster +enrollment +Poppone +Tactical +Fork +Everywhere +Cityhopper +Shadowrun +Ellis +Throughout +fictional +Harry +releases +Pan +manage +virtual +quarter +visits +PlayStation +neither +passage +Quebec +bodies +adding +NCAA +suit +manufactured +revenue +identity +horse +Land +posted +constant +supposed +describing +brand +Orange +retiring +Italy +involving +centuries +focusing +fewer +Korea +centers +thin +extent +frequency +lands +architectural +bed +undergraduate +Nature +childhood +sufficient +honorary +permitted +isolated +spiritual +expand +balance +philosophy +HMS +origins +finance +Jeff +Show +slave +sentenced +belonging +rivers +partly +Bachelor +hot +explained +prisoners +Janet +Front +segment +excellent +supporters +repeatedly +brick +Cannabis +possession +attendance +Northeastern +vessels +manuscript +temperatures +torpedo +Leadership +Fokker +Malta +Eldorado +rifles +belonged +languages +editions +copy +happened +vary +replacement +indigenous +Production +pushed +guests +sides +extends +comprehensive +Princeton +kilometers +fast +Ann +speaking +platforms +Metropolitan +documents +relief +hits +linear +ceased +Lorenzo +Hindu +graduating +cold +Diego +Representatives +reveal +reaches +nuclear +aware +ones +hide +averaged +privately +resource +consistent +friendly +pairs +existed +differ +Dr +licensed +driven +Then +Romanized +journey +thing +concert +sharing +maintenance +portrait +walking +widespread +repair +heads +designer +siege +extinct +ranking +Iowa +manufacture +Ryan +Communist +Muslims +divorce +electoral +navigation +scholarship +Israel +Bernard +Piper +Hudson +attempting +license +facing +brain +Test +hardware +exhibited +cross +parent +banknote +unemployment +SR +parallel +employment +fitted +Kukulowicz +bagpipe +Charter +Antiochus +Nikolais +Sawyer +shellfish +Bacteria +Montag +Galaxy +decide +draft +tune +tie +Books +alternate +respective +retail +preserved +publicly +Cathedral +fashion +behalf +missed +divisions +memorial +Farm +frame +unincorporated +universal +Harold +Third +emerged +pattern +Sheldon +rating +qualifying +worldwide +usual +option +criticism +survive +completion +Having +Seal +truth +throw +Socialist +Belgian +Marie +technologies +wave +amounts +boys +sporting +Students +chapel +inhabited +promoting +appeal +approval +till +coalition +Stanley +Charlotte +longest +Boy +Minnesota +Fallow +civilian +USC +Fame +circulation +referring +understand +spending +mark +whilst +musicians +collecting +ratio +sand +Administration +humans +variant +Representative +ninth +Policy +Space +Revolution +Joint +apply +Commissioner +opera +dated +cooperation +reign +Fleet +Luis +salt +wild +RAF +Monash +Murray +incumbent +margin +submitted +page +wire +counties +apparent +lose +Loch +processing +Canyon +resistance +connect +handle +diving +fatty +Nelson +academy +mechanism +decreased +allocation +Chart +Phoebus +Galician +Kilkee +swan +Split +motors +Latinik +Originally +novels +destruction +Deep +transmitted +preparing +message +bear +numbered +Hugh +Ed +seem +exception +suggest +select +latest +credits +Orthodox +outdoor +fossil +researchers +discussion +manner +looks +practical +flat +biological +wind +Crown +Ten +raising +goods +guilty +demolished +Authority +Fire +institution +virtually +competitions +aims +specimen +assembly +banned +volumes +anyone +Is +styles +predominantly +tanning +tropical +resignation +Unlike +Municipal +Microsoft +Cold +values +am +thick +volleyball +Russell +resumed +closure +arriving +Orlando +Me +plane +expert +Infantry +infantry +carries +manuscripts +references +Food +loyal +fly +diversity +description +Around +unsuccessful +commission +Nova +fled +monastery +unless +Beginning +Ashton +pick +movies +journalist +entertainment +Web +maintains +expression +legally +contrast +interface +vegetation +tournaments +involve +Illinois +threat +Mountains +writers +Out +organic +Guild +organisms +Picasso +Israeli +Liberty +Bangladesh +complexes +recognised +dam +Level +seconds +utility +Base +fiddle +Whittier +Hesperosaurus +swans +Mason +fit +discover +kills +escaped +Billy +ticket +teaser +bearing +audio +favorite +concerns +collaborated +drafted +debate +advice +Anglican +papers +wear +layer +varies +publishes +Jean +communications +sponsored +industries +Franklin +Interstate +makeup +couples +interests +relevant +colonial +horses +ready +Brothers +stood +bases +directors +estates +seal +mutual +displays +Physical +emerging +shots +learns +asks +random +responded +sport +aim +penalty +partially +tested +camera +unlike +bit +Basketball +depicted +boarding +Mr +closest +YouTube +fighter +bombing +suspected +neighbouring +Prix +suggesting +inducted +finals +opposite +affairs +upcoming +yacht +Note +viewers +gastropod +rebuilt +Building +Anastrepha +owns +recruited +Du +Potts +Protection +ferry +alleged +prevented +rubber +publications +seemed +nominee +anything +thirty +hence +travelled +Varnum +consumption +Employment +arm +acquisition +binding +truck +breaking +Watson +holes +proteins +recovered +pitch +Richardson +Gordon +jury +infection +anchor +beaten +Video +rooms +Vancouver +Iranian +belief +victims +Allan +attorney +Shane +Fraser +Outer +Wildman +Teesside +RMA +Isabel +spell +mind +Tim +altered +Voice +consider +VI +touring +Glasgow +contributing +DC +vision +consent +controversial +administered +relation +Action +Kent +sharp +mechanical +deemed +beauty +Tower +reflect +Johnny +Now +erected +ocean +exit +shopping +outstanding +strategy +initiative +preferred +undergo +helps +Bruce +Norman +celebrated +Celtic +Ali +vocal +commented +immediate +amino +background +losses +Connecticut +vertical +shifted +Windows +improvements +Using +belongs +demonstrated +MP +rose +Iran +Confederate +Palace +Stevens +earning +investigate +managing +variations +Command +sits +genre +Polly +foot +surprise +sailed +shore +buy +satellite +underwent +Historical +signal +session +handling +branches +voters +theater +severely +eligibility +placing +farmers +folding +viewed +immigration +agency +Azerbaijan +Walker +agriculture +debt +ask +writes +courts +cars +neighboring +northeastern +efficient +creates +meat +pools +hoped +internet +concern +Abraham +abbey +Smoking +layers +Newport +Tigers +scholar +imperial +Further +moves +Circuit +feelings +mines +strategic +Admiral +Jenn +teach +tables +Armenian +drugs +Auckland +killer +Jake +OCA +Mazeroski +Colt +Farkas +Greystone +Embraer +Walla +descendants +Secondary +mix +exclusive +bag +Love +traveling +constitutional +mixture +ordained +explain +invented +definition +diamond +Bush +controversy +categories +circuit +Scientific +monument +tobacco +Race +shop +tributary +fallen +extending +nearest +Housing +shall +broadcasts +Allen +decorated +Norway +Isle +intention +decades +Serie +concentration +Andreas +Citizens +Bar +awareness +eighth +Dark +enters +subjects +dubbed +bonus +deliver +inclusion +paying +submarine +essentially +generated +thirteen +rebounds +FM +challenge +Wild +crosses +Danish +advocate +improvement +defence +incorporates +piano +Glen +truly +bird +symbol +targeted +Advanced +violent +musician +feed +beach +battery +Benjamin +Alfred +Croatia +Guy +steps +Lebanese +mature +sessions +architecture +locomotives +yard +card +List +Baltic +Higher +adults +Stanford +Trish +singing +experiences +machines +Block +warm +critic +convoy +argue +decade +bride +Cortes +absolute +Malcolm +Commander +snow +Starting +ministers +Industry +Soon +Nicholas +soil +exposed +basin +panel +lock +transported +diagnosed +recurring +consumer +southwestern +investment +deposits +clean +injuries +Jesus +Rights +false +fill +acted +Neighborhood +NBC +Gregory +knots +slowly +Communications +Stockholm +cutting +ruler +Sarah +AM +Rico +Chemical +collect +armies +crustaceans +McTaggart +Campbell +Channel +males +Hadley +Bruckman +sensor +Scout +Frame +thanks +lying +Mills +Jane +failing +removal +ABC +Hope +eventual +venture +Mobile +conjunction +deeply +credit +legislation +abolished +partisan +adoption +promised +nations +Episcopal +Ridge +processes +Capital +tip +Manchester +laboratory +indicates +venue +defender +hills +hunting +Conservation +tall +Atlanta +ultimate +Ray +broadcasting +Nigerian +root +duo +Magazine +appointment +domain +multiplayer +storyline +temporarily +revealing +Charlie +advertising +surrender +easier +Continental +coat +Austin +Christ +ABA +suburb +strike +requiring +row +dangerous +lakes +pursued +recipient +preservation +width +Montreal +Besides +cotton +Brigade +Spike +Jan +Buddhist +path +Late +Review +competitors +gear +Dave +mollusk +evolutionary +influences +Gulf +allocated +commitment +marketing +riding +campaigns +nation +Vienna +Raj +relationships +railways +Attorney +Arnold +brigade +Cincinnati +fair +trials +landing +Evangelical +communist +consist +counsel +emotional +Austria +internationally +Ukrainian +tonnes +irrigation +antenna +EP +homes +Basin +Real +cousin +programmes +Collection +accounts +Louisiana +markets +delegation +looked +banknotes +serial +banks +enable +Aid +Yang +Germans +Alsace +Ottomans +Lankan +computers +Guard +lens +Marc +Valga +Ghana +Graham +Atlas +columns +Joshua +Story +Bangja +literacy +Dam +consumers +kauri +Gladney +BC +Seleucid +Chiefs +Desai +Tyneside +Dundee +Fausch +Belize +Infinity +thymocytes +Annual +Opera +signs +weekend +Contemporary +Christopher +spite +gaining +charity +Private +chamber +restricted +thereby +Parliamentary +territories +Oak +priest +Protestant +surfaces +intermediate +thermal +Diamond +crowned +remainder +mid +seeds +Cultural +generate +Based +Square +concentrated +settlers +regulated +Athletics +pulled +portrayed +fear +worship +Outstanding +limits +objective +characteristic +wins +tenure +Sigma +coached +Duty +Ancient +mansion +restore +ongoing +transformed +publisher +pointed +Pass +gap +gameplay +Liga +golden +Enterprise +Bowl +Construction +innovative +accredited +Trust +quiet +illness +periods +Dennis +Boxing +Night +Kenya +Tordoff +evolution +engage +breast +intense +beautiful +authors +Raymond +express +Railways +crops +Wang +moral +Songs +aboard +DJ +shaft +Milwaukee +powered +Rudolf +identification +landed +tribute +faces +libraries +commanding +Flags +collective +taxes +Lions +Wisconsin +Cemetery +Adult +Darwin +AB +requirement +counted +customer +Express +treat +Your +Manuel +di +tools +Sound +Week +carrier +Clough +Matthew +Highland +talking +historians +ill +perfect +handled +ties +electricity +reviewed +fourteen +animated +Ron +regulation +proof +cellular +beneath +nickname +universities +separation +solution +net +witness +Maulana +tunnel +victim +Edinburgh +Ricky +agencies +Kentucky +valuable +distant +NDVI +sequences +Tsarong +Laws +networks +Shankar +Salatin +Noronhomys +Riyadi +stator +Dalmatian +Lakmal +Silicon +Bilyeu +Carrasco +Lingesan +Concord +SBC +Observatory +believing +LP +independently +Geoffrey +bore +conspiracy +hidden +sequel +Ken +duties +sung +phrase +Constitutional +Type +talks +functional +detailed +experimental +implementation +communication +See +Gmina +waves +Here +Pleasant +delivery +murdered +Point +Woods +residing +Males +voting +behavior +metric +colours +depend +increases +Wayne +Barry +medals +kingdom +Technical +drinking +afterwards +intellectual +Margaret +Nick +maintaining +flower +considers +gang +assists +releasing +discussing +grants +spend +championships +Southeast +Run +expensive +Hans +enrolled +relating +commenced +Learning +donations +shaped +Helen +Parish +videos +Evans +fastest +Flying +happy +securing +Will +Systems +principle +bid +elder +cup +renowned +settle +desire +Standard +elevated +Cameron +Drama +else +screen +widow +conventional +drummer +Data +oral +Sayce +Carnegie +mention +windows +Commonwealth +bay +estimates +enzyme +conservative +quick +certification +presentation +reduction +defendant +Grove +radical +Indians +patterns +bound +schedule +Grant +OS +terrain +Economics +Gandhi +observation +correct +locomotive +headquartered +Wildlife +View +forests +burned +concerning +Wall +themes +suburbs +Trustees +Actor +buying +occurring +Tokyo +lease +providers +editorial +bone +Directors +Ptolemy +apart +centered +register +improving +Otto +Isaac +Target +roof +crown +Todd +Finnerty +Pirates +fee +Democrats +charted +CCM +Camino +receptor +switch +Canntaireachd +Wooster +Language +Maridi +Hartlepool +Napoleon +colleges +Electricity +Pleil +Raven +Scrooge +Realms +Scar +Drury +MTAS +Buru +Broward +transcriptase +significance +Jacob +eponymous +Dick +arrangement +removing +tunes +structural +comparison +addressing +Phillips +Publishing +travelling +travels +exactly +romantic +Jon +cancelled +dispute +bears +Be +necessarily +governments +Venezuela +Queensland +constitution +Harvard +borough +rotating +contested +mathematics +jointly +predecessor +occupation +concerts +Steel +inland +householder +capita +fiscal +specialist +parliament +sort +pitched +strain +gauge +threw +Rose +Han +costume +PhD +Chair +remembered +manages +Rob +legitimate +molecules +flowering +Alliance +weapon +enemies +acquire +scientist +hole +captive +ensuring +walk +familiar +Baron +uncle +asking +Orleans +Chapel +Portugal +Located +establishing +talent +scattered +Champion +clay +push +Conservative +staying +distinction +Missouri +Up +statistics +colour +distinguish +Jefferson +Chemistry +Junction +rivals +naming +equally +principles +Roosevelt +Globe +feels +augmented +approached +sustained +Yankovic +extensively +Brazil +regard +fixture +fan +streaming +implement +bones +aggregate +operator +renovation +Sweet +portraits +km +defend +rain +animation +naturally +supporter +interpreted +Studio +coaching +qualification +professionals +encourage +Motor +accompanying +governing +Grade +poll +bomb +overtime +Legislature +Frankfurt +grandfather +monetary +followers +kinds +Faculty +sole +beer +developers +departed +Dublin +organ +Political +cluster +cattle +flowing +Egypt +vital +Dakota +Gangwon +Saints +struggle +economics +ward +departments +preserve +sudden +thrown +handed +successive +prices +hope +favour +organisations +Mughal +Elliot +drilling +waiting +lieutenant +tail +Campaign +traded +Stars +infrastructure +apartments +worn +atmosphere +procedure +Friday +registration +Adolf +civic +Vision +Vladimir +developments +Mill +Les +spoke +shrub +Nevada +Borja +Dixie +Torre +forts +ArmaLite +Stockton +gin +Roanoke +annuity +indica +Boban +Finals +Kublai +Zaphod +Greece +breaks +instructions +concepts +replica +rough +Sean +Official +restrictions +Constitution +experts +Brazilian +Nuclear +thinking +dramatic +Serbian +indoor +link +belong +jail +attractive +Alabama +discontinued +landmark +inaugural +representation +Steven +spaces +stress +assembled +seized +Advisory +statements +Ian +Select +Andy +Piano +dense +qualify +denotes +abilities +deeper +Nazi +permanently +mysterious +alive +battles +demanded +Back +orange +Range +citing +throws +overseas +count +varying +Rochester +averaging +sustainable +pupils +component +Liverpool +Unit +manor +Countess +eldest +Warren +Latino +convinced +print +advised +chapter +Season +bright +topics +founders +honored +sun +Men +decisive +Support +Jing +Player +fame +Davies +engagement +literally +constantly +cabinet +input +immigrant +Jay +watch +sensitive +bond +Georgia +blacks +collapsed +Kings +wore +voyage +grandson +battalions +mainstream +archaeological +indicating +Urban +Care +knocked +index +specialized +stores +physician +Chilean +Jerry +Wood +strict +beds +wet +faith +Sport +rows +Hills +exercise +Canal +Leeds +Bible +reserves +reform +Reform +partial +revolution +technological +pale +channels +provider +surgery +Organization +reporting +interpretation +designation +blocks +circulated +Gibson +Bantu +strains +crowd +participants +prevalence +regulatory +PR +Cook +protecting +considerably +lawsuit +Ptolemaic +anime +willing +Darden +inches +impressed +Abdul +Financial +Maurice +continuously +purposes +clinical +Kelly +fat +uniforms +relay +Play +Haimovsky +NYC +plain +Knights +drove +receiver +Keane +shells +Munster +Trail +bind +Tito +preferences +Chunhyang +Small +Commerce +conversion +performers +affiliate +Treasure +Bennett +Noorduyn +Herzegovina +PEEF +Bears +Westbrook +wireless +NIE +divers +Kharabeesh +Neill +Hardy +AA +magnesium +Mansur +Grafham +Degetau +Tay +Physics +celebration +guide +Phase +fires +merely +Susan +artwork +plastic +cards +indirect +recalled +regardless +physics +thesis +emphasis +acute +regime +liquid +resolution +potentially +allegedly +pursue +boats +carved +element +verse +legs +retreat +imported +bulk +enclosed +Amber +comprised +freestyle +preliminary +traditions +Cruz +sacred +gathering +Band +wagon +innovation +Presidential +hear +Albums +missions +reception +modes +Baker +survival +warning +heated +trust +celebrate +beta +Box +Sony +Operations +Market +craft +delivering +confused +palace +nonprofit +Northwest +Grande +pioneer +attract +Every +presents +locally +ports +nurse +feeding +acknowledged +doctoral +Ralph +Living +underlying +worst +investigating +circular +complement +Way +suddenly +Ronald +Performance +recognize +presidency +HBO +underway +deck +translates +freshwater +synthetic +Mitchell +Hollywood +volunteer +suffer +Common +poorly +seed +openly +Oakland +Yale +elderly +revenge +exterior +Grange +merchant +Lithuanian +Chronicle +Prussian +tanks +ban +Stefan +stones +enterprise +Reno +burn +museums +telling +Counsel +Freedom +beetle +affair +Sultan +trio +proximity +Code +Rescue +crisis +Arabic +proclaimed +simultaneously +difficulty +Briscoe +Racing +Peak +similarly +Mac +sunk +warfare +destroyers +Campus +rises +midfielder +Finance +disaster +framework +Geological +Survey +findings +Pretty +pretty +chronic +bare +blocked +perceived +abstract +portions +acre +Carlisle +Defence +recreation +Any +abuse +pages +updated +context +doors +suspect +aliens +arrive +noting +define +wake +intervention +Shogun +faction +solely +shoot +Resort +Gallery +Rams +Advocate +goalkeeper +speaks +lights +acids +Buenos +Brooklyn +territorial +Fernando +Interior +hospitals +Plaza +Cardinals +Allu +continental +Latvian +Oblast +poems +Arkansas +touchdown +destroying +Rifle +Bauer +Giannino +Short +dual +Birmingham +Liberation +Ipswich +bagpipes +Engineers +Newton +unorganized +Guildwood +Memushaj +Tees +Carmel +IBM +Telephone +Fersen +Unheilig +Shellfish +Bucks +PVI +Tess +Chagatai +narrative +researcher +Heart +realized +remarkable +holiday +reflected +delayed +Samantha +Moore +wrong +saved +renewed +modest +productions +Neil +tape +Commodore +valid +withdrawn +graphic +predicted +negotiations +arrangements +retain +Lakes +reporter +spots +substrate +enabled +congregation +Rachel +Martha +Heat +Applied +gathered +Municipality +nineteenth +commercially +Daily +metre +Miller +highlighted +volunteers +driver +Ibrahim +tight +occasion +ensemble +interim +Costa +pole +nicknamed +dies +Xbox +elite +agrees +sending +keeps +separately +grossed +wrestling +Wrestling +opportunities +alliance +ranges +proposals +sixteen +sick +raw +enhanced +studios +happens +gift +penned +swept +watching +availability +error +subfamily +struggling +Civic +absorbed +CS +monthly +Venezuelan +meditation +stopping +actively +Charleston +departure +Andrews +veteran +Barbara +Rick +Toyota +aside +Spruce +challenging +raises +incoming +prove +Caribbean +boundaries +conclusion +geographical +rounded +friendship +Downtown +revolutionary +generals +accordance +provinces +Baroque +manufacturers +Course +lasting +entities +kitchen +demographic +ancestry +rally +dozen +tank +spiral +stationed +matters +aviation +Metro +Musical +relieved +judges +Agriculture +Rocky +interesting +Islam +personally +prohibited +contest +Cunningham +broadly +wine +attained +accurate +composers +Bhairava +paved +Euro +Sheehy +hosting +canal +sugar +segments +Europa +Lost +interviews +machinery +infections +immune +lifestyle +Kim +societies +Ranch +Obama +vulnerable +impossible +corresponding +Nathan +Apart +tissue +Syndicate +Skinner +beliefs +loved +strange +Estate +AC +Pakistani +Angus +explore +investors +suburban +succession +shorter +lightweight +Ivan +Stjarnan +Very +imposed +depicts +Lough +graduation +Rovers +Transit +sheet +paint +Navarino +squadrons +Wellington +patent +Dragons +Casey +descended +Harbach +liberal +Coalition +coup +declaring +membrane +streams +Forgotten +footage +Venice +Simpson +beam +Friedrich +picking +Jamie +smooth +reasonable +NHL +canon +Flood +Page +Nine +na +voluntary +thereafter +joins +alumni +Roberts +Edna +sight +cursed +Limited +Albania +Student +allocations +crashed +Liber +Slowe +Standish +Clwyd +revised +Herron +Burshtein +Breckinridge +Theodoric +Torbay +Augsburg +Kaptol +Soddy +rescued +Minor +casting +chapters +Random +Roy +legend +knows +peaceful +agree +Complete +Fifth +graphical +fun +centres +governed +Jacques +feeling +Inner +mechanisms +Electrical +Stewart +occupying +manufacturer +fairly +Grace +Colonial +grass +Suffolk +Horse +draught +dependent +Beyond +economically +Inside +Jesse +constituted +forcing +Independence +Culture +crucial +Go +notorious +integral +Electoral +photograph +compact +rhythmic +knowing +sounds +prestigious +compounds +Congressman +Anna +protest +lacked +bishop +putting +praise +argument +pit +challenges +logo +currency +climb +Statistics +copper +Howell +reviewer +endangered +UN +Reverend +Santo +edges +attraction +posts +Colony +occasions +Wings +Manor +Honorary +tube +Girls +Vietnamese +comments +victories +tourism +aerial +permit +Environment +earth +tenth +occupy +alcohol +Falls +sit +cultivated +opposing +Anu +associations +consistently +afternoon +Fair +observe +creator +perspective +cultivation +torque +longtime +accident +abundant +Bicycle +Casino +document +Persian +fortune +plaque +Ernest +Recreation +Warsaw +flights +reformed +strictly +noticed +marketed +stable +pioneering +activist +Congressional +fulfill +worker +withdrawal +summit +Report +congressional +comprising +Eventually +Territorial +refuses +conducting +provisions +Emergency +wives +wingspan +da +heir +Springs +bread +steamed +flagship +maritime +Riding +provision +reflecting +sweet +Ibn +namely +marks +airline +Railroad +Apple +stint +Child +sued +Eight +aging +cubic +Maple +viable +enforcement +weak +medicine +detention +discharge +marble +Next +Spotnitz +Lucy +horror +Performing +Nation +Robertson +proportion +shops +Teachers +favored +hydrogen +reservoir +adequate +martial +occasional +neighbourhood +Fight +wards +Almost +confusion +Swan +Patroclus +gay +counter +Lawson +Tenzen +Saemon +enabling +settlements +storm +mentions +Erickson +Chile +accounted +afford +subjected +Basque +file +usage +ruins +sailing +Fighting +instruction +Theo +gross +Working +Colombian +Dewey +Claude +northwestern +jurisdiction +sovereign +sponsorship +twentieth +parks +Forks +Against +battalion +Alice +patronage +ambulance +slavery +Broadcasting +belt +Fattal +Resources +spreading +Williamson +mineral +Kerala +Drysdale +Nanashi +Drover +Stavesacre +Sakima +Dortmund +Schalke +reed +Cumberland +Crowley +Dynamics +Burnside +Churches +Glomgold +Holochilus +Sims +Belov +Khanate +Gipping +Fatal +aspartate +Seaview +Kirkbride +Childrens +demolition +quoted +poem +Pink +steal +preventing +infinite +sends +Immediately +stops +draws +check +forty +Ultimate +Nigel +differs +prints +backed +tool +legislature +Christianity +Chapter +speakers +strengthen +wings +friction +graphene +environments +Nationalist +Bus +undefeated +Oklahoma +adjusted +Boys +shortened +commuter +Rail +honours +pure +owing +pitcher +intensive +measured +sample +Comics +Fisher +Amy +Jeremy +recovery +accommodation +dancing +Tyler +teamed +ADC +citizen +update +Phoenix +UV +Cochlospermum +petition +striker +tactical +manual +circle +Jessica +presumably +eliminate +boards +trapped +contents +Eddie +construct +database +trailer +Deutsche +practiced +designers +cooling +technically +strips +uninhabited +fitness +Years +Religious +certainly +destination +peoples +upheld +punishment +Domesday +pits +moderate +Wars +eat +Low +hunt +Returning +propaganda +obvious +surprised +mate +varieties +unlikely +organizing +sat +leaf +Transport +criteria +Number +tea +narrowly +exploration +lava +disbanded +coin +conflicts +Drew +Room +disc +repairs +amounted +cruise +rings +Additional +Drive +Thompson +eighteen +Eagle +gradient +nigra +dorsal +broader +Kenneth +hire +thriller +develops +saves +Altai +poles +Auburn +shapes +tear +exile +collapse +seasonal +festivals +non +magazines +Hebrew +grandmother +Distinguished +Pride +pledged +angle +Cooper +Stock +Exchange +superior +testimony +lung +Lassiter +impression +slight +Process +Domain +rendered +Speaker +Giovanni +Down +canton +regulations +interviewed +gender +Medieval +insisted +flour +Online +guards +retaining +rotation +arguing +reliable +variation +chess +Chess +insects +hip +offshore +engines +wealthy +Emmy +Indianapolis +whereby +deliberately +diseases +Michel +decisions +outcomes +flooded +flooding +Days +artillery +quarters +loyalty +stored +shut +topic +climbed +controlling +colonization +admits +resolved +Maryland +ages +nominations +profit +gravel +Bengal +addressed +Lebanon +tends +Clarence +Hermann +circumstances +Norfolk +allies +implies +colors +wealth +treaty +enables +CRM +Ozette +Advance +residency +Wahhab +Christians +unveiled +roster +finest +Azure +Maine +Uganda +Turkey +PC +Cohen +deprived +acclaimed +avoided +POEA +shallow +minimal +Huddersfield +legendary +horsepower +sank +symbolic +Shaker +Animas +migrated +Fund +Work +embedded +Below +Barney +realizes +rebellion +enzymes +Packard +Fielding +tactics +nautical +termed +trouble +concluding +Given +ballot +Hawaii +Dorina +discuss +discrimination +tag +Erie +profile +Rosa +Yugoslavia +Phytophthora +symptoms +ITV +Legal +ratings +parasites +Desert +fantasy +Libyan +Lodge +NMI +Outside +Villiers +Gracie +AIDS +Complex +Carson +Shea +Springfield +Bedford +exchanges +Nichia +Networks +Carnac +Rancho +Volunteer +arcade +phones +cardinals +FERS +Evansville +Narrows +tailgate +vallenato +Shira +Nassau +Ultron +CenterPoint +Aboyne +Vikram +Brahmo +thymus +diver +deglaciation +Passard +khan +astronomer +derives +planet +preceding +builds +prominently +seemingly +displaced +heroes +theatrical +Fish +heading +disputes +staged +Original +dialogue +voiced +Total +adventure +Studios +Choice +Lloyd +wishes +Lords +resolve +possessed +Philippines +Alberta +authored +boxing +Jose +reflects +demanding +emigrated +settling +Citizen +sexually +maps +institute +lecturer +skilled +god +parking +shipped +Aviation +flew +planted +Manager +Site +stretch +Monday +pursuit +farms +financed +Monument +bold +Luke +tribes +throne +persuade +corruption +forth +detail +explaining +obtaining +generating +nationwide +Shanghai +Apostolic +introduces +Zombies +Blackout +Primis +Combat +cult +Shaw +possesses +guided +hoping +frozen +Dead +Justin +developer +Tommy +southeastern +wishing +properly +theories +prototype +kick +Memphis +Perth +rented +installation +religions +MacDowell +Archbishop +wars +Louise +healthy +walks +throwing +acoustic +Laura +Mississippi +journals +Together +Jeffrey +Larry +pink +Instead +competitor +measurement +nephew +Nottingham +Children +Argentine +Providence +Knight +Pierre +perennial +zuo +advocated +Zhu +aspect +firmly +personality +touch +integration +twin +AD +Joan +Churchill +Rodriguez +Villa +sing +sings +spoken +lady +Pearl +casualties +waste +doubt +Bates +unsuccessfully +teenage +labels +urged +performs +judged +accidentally +pest +larval +expertise +morphology +Blues +whatever +withdraw +empty +dying +schemes +tram +Wolf +dedication +ceremonies +unclear +clan +competes +pier +fortress +Duchy +reconstruction +commanders +garrison +synagogue +concentrations +Renaissance +extant +fragments +Gothic +Founded +ignored +listing +Gary +protective +quantities +lifetime +citizenship +Trump +asserted +shipping +Danny +Brunswick +honors +convened +judicial +funeral +thy +Gonzalez +compensation +welfare +decrease +dominance +portraying +Quran +commit +Oil +terminal +introduce +convert +convex +USS +cavalry +undertaken +Headquarters +viewing +Johann +ore +proven +crop +diet +conquest +Choir +Tracks +jump +employs +aquatic +Delta +exhibits +ballots +striking +psychological +traces +Hungary +radioactive +decay +rope +monks +Hildebrand +basement +entries +hybrid +Doggett +unofficial +conceived +dealt +statistical +offset +organize +Manitoba +Zone +Para +monopoly +supplement +switched +GDP +expanding +export +textile +workshops +originates +prompted +Rammal +particles +hardly +Vasily +Seminary +Oscar +Belgium +authorised +supervision +grave +Hattori +controls +patient +freshman +accessed +lineage +Machine +Li +organizes +Liu +doctor +Switzerland +sharply +messages +provisional +Fe +detained +civilians +complaint +bowling +wait +exceeded +superintendent +Clare +evaluation +gubernatorial +Murphy +Symphony +poster +millimetres +Sierra +descent +Latvia +Kant +embarked +Initially +Territory +ordinary +Republicans +stepped +IP +Palm +Creative +Associated +critically +nobility +Left +Mainland +collaborative +rode +achievement +hurling +Galway +questioned +warrant +diesel +Syms +charting +harsh +spectral +monitoring +athletes +weights +seventeen +Morning +electors +HQ +Dalai +consideration +inactivated +Marshal +metabolic +MHC +McKenzie +cleared +Wisdom +pibrochs +harper +MacDonald +Dot +Basic +Braves +Haitian +Southwestern +Glewwe +Zwicker +Northumberland +Blank +Noblet +Cal +Vanderbilt +Coyne +Wiang +Wodehouse +Duterte +Shamet +Surya +extinction +observations +Azusa +KE +Secureworks +Eswatini +Stegosaurus +Birim +Upload +Brashear +Sully +Roswell +Swank +Diodotus +wrecks +Batu +Rep +militia +Walt +ancestors +ordering +imprisoned +explanation +deals +negotiated +talked +crystal +pictures +parliamentary +Viscount +tended +conventions +premier +scientists +Canon +discussions +theology +ad +shifts +interaction +super +alternating +Nancy +reorganized +Barack +mathematical +theoretical +Kirk +limestone +Milton +Essex +Lynn +rocky +annexed +dams +resides +dissolved +faster +exceptional +Yankee +tensile +composite +Angel +Joel +Gerald +Various +instructor +bothy +bicycle +disabled +Percy +servant +anthology +Samson +vinyl +reissued +comment +Writing +denomination +Syria +targets +Syracuse +justice +bow +Similar +aggressive +Battery +files +recruiting +marking +opens +Nikolai +proceeds +crews +Pablo +Activision +Electronic +flank +Bundesliga +catches +plasma +Andrea +opted +Chumbe +ecological +trails +Environmental +Sisters +Mother +charitable +clusters +beside +temples +Close +Superior +Arena +rape +happen +Chancellor +Cornell +Pat +nests +mutation +affecting +Fallows +vacant +brings +chemistry +peaks +geological +elimination +Owen +spotted +FA +dropping +Popular +al +thinks +teaches +Comedy +Johns +Oliver +Ted +Leon +Berkeley +anchored +hitting +violation +graduates +subtropical +identifying +Cabinet +prevents +reduces +documentation +lessons +Others +electronics +sixty +Female +priority +interactive +Finland +Uncle +CIA +Turks +leased +facto +renovated +Father +surrendered +Lithuania +princes +expelled +Boris +Dominican +ride +backing +BMX +chemicals +trace +persuaded +violated +emphasized +Parade +Section +sponsors +Far +grades +standardized +consultant +factories +Plant +purple +winding +escort +Philippine +Canton +Workers +exceed +legacy +characterised +blame +Bavarian +Tristan +Place +verses +paternal +asylum +mothers +Garden +relied +mere +Pont +roll +affordable +destroyer +reassigned +deployed +Reservoir +psalm +rulers +skull +Graduate +retire +Large +Archdiocese +fighters +hypothesis +Internal +String +Seely +peripheral +researched +tan +Cancer +outlets +capturing +Yolo +approaches +recount +correspondence +Zimbabwe +labeled +allegations +productive +overwhelming +detected +samples +submerged +brewery +Joyce +oversaw +parasitic +burning +arc +Willis +Benny +vertex +ranks +cash +seating +rent +consequently +blamed +Madras +tubes +elsewhere +packed +Velfarre +fluid +Hughes +aristocratic +Tennis +notice +knockout +hamlet +welcomed +realizing +Yashamaru +prefer +fate +physically +sizes +Peninsula +Mae +Critics +rhythm +divide +Bolton +obverse +engraving +relatives +depicting +Mackenzie +Hull +Doug +Perry +empirical +applying +politicians +math +exhibitions +nest +convince +protests +satirical +Cuban +miners +Mick +kicked +advances +Arjun +beats +entity +tier +encountered +Souper +refuge +antennas +penetration +Argentina +underwater +crystals +populated +Angels +Aaron +crash +fault +photography +Acacia +ODI +ideal +banking +utilized +unions +Principal +consensus +nationally +solitary +Let +appropriate +VoIP +Sara +Urrutia +Cuba +Yugoslav +Marcus +Martinez +Tibet +airing +councillors +Insurance +Der +Nothing +Barker +steady +clade +Lidija +aluminum +notation +MS +clarsach +MacKay +transcribed +Brady +nutrients +dining +Walkem +Amor +solve +Ooyama +Aunt +Burwash +collegiate +MIRA +fifty +Cadet +Ukraine +juvenile +Minimum +induction +Hatteras +Mahadev +writings +Collegiate +barges +Meteorite +Seaton +backbone +drink +ribosomal +eukaryotic +Ratha +Nhu +italic +Roden +Hostage +Horde +Tongva +groundwater +Wells +Persia +Mongolian +Jin +Semantic +Marne +polymerase +Straus +Shebarshin +Poly +Tayport +Cheng +FAD +Revolutionary +explores +encounters +substantially +stolen +Marvin +Restaurant +Above +adaptations +iconic +launching +cameo +Jackie +Leslie +introducing +Who +Thor +loses +combine +scripts +Terry +cuts +backup +accompany +shooter +maiden +legislative +Amendment +Sabha +Nebraska +reporters +reader +Edmund +examine +constitute +anal +depends +seals +statutory +consecrated +priests +Benedict +lecture +granite +Monroe +pond +invention +soldier +unanimously +Roland +reconstructed +bordered +Comcast +transmitter +defended +Castilian +Madrid +recover +seeks +intends +NSW +healthcare +innings +stretched +yield +portray +athlete +conferred +celebrations +Santiago +freely +Bradley +Secret +sensors +Gene +Eugene +mute +Rogers +Where +Album +ensured +amazing +Syrian +synthesis +similarities +trunk +celebrity +implied +experiment +locked +interact +self +portal +acquiring +aided +soul +wish +pack +item +condemned +hometown +enjoy +wrestlers +XI +Marquess +temperate +cloaking +transformation +panels +expect +settings +Catherine +Friends +explorers +alluvial +Guggenheim +sisters +interpreter +firing +Clay +strikes +commissions +Denmark +reforms +Louie +informal +Greg +suspension +CDP +flown +colleagues +mutations +examining +markings +melanin +Achievement +Trojans +linebacker +minority +Sebastian +cool +pupil +motorcycle +Gardner +Amateur +Miguel +Access +centimetres +consolidated +Bird +seated +Confucian +cycling +steep +strongest +burns +oxygen +Grammar +dress +Webb +Fall +Alternative +loose +skill +descriptions +Sullivan +button +Tech +lectures +egg +larvae +Ruby +tours +runner +lengthy +ranch +stability +Borough +Julius +loading +Sapperton +Moss +march +witnessed +tribal +camps +interred +intact +sky +discovering +Kate +inspiration +signature +sponsor +insect +yields +wetland +Leo +tying +guidance +Agreement +expectations +sang +Examples +incorporating +Spirit +editors +grenade +instance +Near +Jorge +deployment +wartime +difficulties +Lund +fitting +clothes +AMD +custody +dissolution +specified +maternal +daughters +contacted +rejoined +streak +Afonso +submit +Ferdinand +Filipino +gunfire +Arctic +dock +signals +Nintendo +washed +angry +ago +towers +committees +examination +elaborate +Geographic +speculated +texts +enlarged +applicable +basal +USSR +centred +Pool +quartet +precision +realize +Fortune +remix +Currently +cardiac +kidney +Psychiatric +tertiary +capabilities +Gray +vitamin +liked +Bosnian +adviser +fossils +lobbied +shrimp +hunted +dismantled +elect +surveyed +insufficient +Price +mentor +investigated +authorized +dirt +automatically +seriously +chicken +Agent +terrorist +wrecked +delivers +possess +pregnant +genome +Pentecostal +utilities +unanimous +supervised +Nematothallus +Cardinal +fights +outcome +Released +inability +Thai +Faith +filling +specializes +None +Ogen +decree +deciding +invitation +artifacts +Ecks +ceramic +Scholarship +Horizon +flies +Ala +Madrasa +dollar +differentiate +Crawford +Cary +classroom +Classical +sketch +hairs +soap +kit +sometime +alter +unified +Ho +Alpha +disk +reversed +curriculum +survivors +clause +ambassador +squadron +induced +peanut +pronounced +Uchiyama +deny +impressive +Educational +demonstration +Pedro +bombs +loaned +Roundup +lawyers +Daley +Ensemble +Concert +Lightning +jet +promotes +Superintendent +radar +mild +destinations +Crystal +fierce +cinematic +frames +spelled +Facebook +transcription +Entrance +Pot +Apollo +Phantom +papal +bullet +Side +selective +genomes +Mohammed +Curtis +Boat +Firth +Montana +Jobes +fees +achieving +postal +Leck +progressive +Aircraft +melodic +Falco +unaware +pleasure +drains +Current +Archdeacon +download +chaired +cruiser +hectares +concurrent +meter +Shen +oval +Peru +Telegraph +Lama +Mongolia +gather +Hahn +activated +Boeing +Table +testified +excavations +Ball +Maitland +Death +canntaireachd +clues +Harbour +debris +emperor +volcanic +Tourism +melting +floods +Virtual +intestinal +Lahore +experiencing +barrier +Asturian +atmospheric +Ozeki +Vickers +Limerick +Toward +armored +Koch +shipbuilding +Stones +chef +Dell +ECU +Phu +Eilean +Jotham +valet +tRNA +redesignated +deer +Puente +Fill +Stowmarket +Laccognathus +PMID +Cockington +Dawkins +Budhrail +halophila +Candlewick +WWN +Bn +Amann +Ilkhanate +Shaldag +Lander +Mathematics +hero +unpublished +moments +comparable +Productions +legends +readily +charts +extract +expense +steals +fails +encounter +Spencer +inch +coupled +Geoff +Part +complained +graphics +announcement +attitude +nevertheless +Acts +accepts +hereditary +Leader +abandon +proceed +situations +adopt +Administrative +Gospel +demands +patented +observing +phenomena +demonstrate +Herald +assassination +flute +Grammy +engaging +pageant +imprint +inscription +Revival +auditorium +Turnpike +topped +eagle +Plan +migrate +Computing +Castile +quit +merge +valued +measuring +sufficiently +ease +Sheila +kings +tensions +Carnival +Josef +switching +gates +Amsterdam +appropriately +Nigeria +desirable +gentle +relegation +trademark +colored +promise +interference +compiled +Species +administrators +advocacy +hung +odd +efficiency +tickets +requests +villagers +Treyarch +installment +recoil +arena +tasked +substance +Madame +secretly +transferring +granting +Combined +Rather +cosmetic +advantages +desired +abroad +deadly +Theodore +wholly +enormous +grabbed +encourages +Slovak +Oriental +applicants +admission +companion +execution +busy +transporting +besides +revolt +Bremen +appeals +inaugurated +cat +loves +assume +Harrison +lifelong +nesting +iris +resemble +practitioners +Guinea +specialty +Die +monumental +folded +le +preserving +searching +exploring +directs +stressed +professionally +contracted +respected +Julie +Set +Embassy +Actress +Cable +assumes +Glenn +engineers +specializing +Europeans +trips +localities +flora +Haryana +Punjab +lumber +doctors +storms +moon +altitude +enhance +Ottawa +favourite +guaranteed +invasions +anger +Keith +scrap +dust +colorful +firms +Pote +Breton +preparations +feared +lifted +venues +achievements +unexpected +presenter +opinions +totals +UCI +conversation +treatments +consume +cloth +sheets +endorsed +remarks +Counties +Currie +hurdles +displaying +singers +Montgomery +vocalist +upgraded +sword +Study +recognizes +grey +Iceland +Greene +marched +Inspector +overcome +memories +proceeded +Singapore +regards +employers +salary +cent +Immanuel +trend +Marriage +Munich +matching +measurements +businessman +merchants +Wilhelm +wali +marrying +varied +intent +fears +Brad +photo +teammate +drivers +preceded +Adults +Carpenter +doubles +variants +taste +anywhere +Holme +Evening +Jerusalem +ceramics +resided +frontier +palm +Armed +deposited +VII +cave +invested +Mass +Trophy +depict +Antarctic +Expedition +import +Paolo +edged +Reading +Beijing +Polytechnic +Josh +Harkness +Teams +Coach +Conservatory +allied +clinic +sunlight +lengths +preference +therapy +auxiliary +swing +fairy +alkali +grazing +fortification +Inch +Built +IRA +circulating +attorneys +prosecution +surveys +safely +explosive +woods +somewhere +motto +altarpiece +Glacier +extraterrestrial +creature +expecting +accepting +Without +revelation +adoptive +Boyd +sum +approaching +array +Aboriginal +caps +prohibition +Yellow +PTH +neutral +declining +Uttar +Whig +organism +resembles +rebranded +Stabekk +Pop +condensed +carefully +variable +pipe +bypass +Theology +Vatican +presided +parishes +bell +absorb +sub +shores +reluctance +unnamed +licensing +licence +Hotarubi +Koshirou +lacks +attacking +catch +spare +invasive +Idol +submission +reserved +aground +erosion +Sever +Lion +succeed +brilliant +lacking +stylized +Often +playoffs +playoff +SC +arose +How +occupies +prominence +taxidermy +potassium +velocity +radius +complaints +Moroccan +crimes +classrooms +sentence +Victory +liberation +subtle +Inaba +Clifton +suits +manors +Felipe +glyphosate +toxicity +Lunar +Economy +Exley +Linda +saving +intimate +audiences +theorem +confluence +Riga +climbing +applies +Shenandoah +Folk +dig +Nikon +cartoons +Wills +Rush +Zagreb +encouraging +Population +Rangers +electron +Sometimes +Masters +Ellen +Indonesian +computing +WISE +algorithms +ER +negatively +subunit +Indonesia +locks +Seton +scandal +Alcazar +Peterborough +tuition +Hurricane +options +Snow +Dallas +illustrated +Heights +slope +solutions +detect +Blood +Westinghouse +supplying +Mari +ecosystem +Adventures +codes +Software +Georg +Darmstadt +inform +Saldanha +metabolism +Maroons +Swim +Mutual +Midlands +Holmes +Operating +Few +modification +descending +Barnaby +lineages +Frenzy +advancing +glacial +Upton +interrupt +Vincent +Vista +Diamondbacks +Eurovision +Bodrum +visitor +nomadic +Margrave +Ciudad +psychology +conductor +ridge +Communication +Polyface +semantic +Dhaka +Salvadoran +caliber +Thus +catching +tapes +Odisha +CMLL +Wilhelmshaven +meadowhawk +Indigenous +rugged +Muvattupuzha +Aleister +blast +Tucson +Gateshead +UNAP +Hondo +Teddy +Orkney +Shetland +Shakespeare +Davao +Sooriya +Cortado +Cups +rat +cylindrical +sativa +sentences +Nixon +domed +watershed +Sannikov +Cassiar +Bde +Assyrian +Yaron +Yochay +ditch +fractionating +Aldridge +Bayou +moissanite +Wanstall +Bellaire +Benavidez +Westlife +FCC +WNOX +Rettig +Manning +Anhalt +kurultai +Hulagu +Petersburg +dwelling +Certain +formats +Parker +autumn +Thanks +Vernon +Trillian +reunited +pulling +mail +pressing +Infocom +illustrations +combining +Illustrated +discs +pen +coincide +philosopher +Shadow +abolition +Religion +diffusion +Presbyterian +amid +graphite +oriented +stronger +substances +humid +amalgamated +Weekly +pianist +string +secular +ensuing +Julian +Viking +Bloomington +Lin +Legion +pipes +monitor +Pioneer +fed +lie +densely +lined +terminates +subway +subscribers +nationalist +excessive +knee +circa +Yankees +weighed +MLB +penultimate +prefix +loaded +Diana +Anton +ally +cartoon +worshipped +Electronics +TU +semiconductor +Oda +Literature +sleeping +relies +Airlines +dressed +resembling +comedian +spy +Allmusic +melody +formula +innovations +sacked +darker +reactions +recall +horizontal +sail +stays +titular +downloadable +unlock +zombie +storylines +utilizing +exclusion +treasure +Rhodes +Scarlett +gateway +slaves +Richtofen +Key +scrapped +indeed +anticipated +duration +Writer +expressing +richest +optical +lighting +fabric +broadband +lenses +Queens +Wallace +reef +sanctuary +Making +slopes +Situated +tourists +pilgrimage +Shiva +careful +Cornwall +fatal +Blackpool +attracting +Mrs +Clive +Bridgeman +und +ancestor +dog +conferences +passionate +barely +captivity +examined +portfolio +Eye +forested +Eduardo +premises +quantity +understood +Ming +assess +prayer +calm +Volume +accession +incomplete +Sandy +Girl +Duncan +Palmer +Hortense +Theological +recruit +alert +Whilst +phylogenetic +indexed +shifting +ratios +adapt +Motors +brass +expenses +loosely +biography +Veterans +deposit +mystery +Peruvian +skeleton +accurately +Change +canceled +Riley +Soviets +watches +Fourth +Metal +depot +timber +diagram +Commissioners +appreciated +Watch +Siege +prisoner +radial +Slavic +prince +Rabbi +prolific +monastic +dormitory +holidays +smoke +NBL +milk +Csuri +seventy +cure +candidacy +organizational +Dream +pathway +offense +vocals +Philosophy +principally +tanker +Analysis +yearly +floating +Excellence +MGC +batch +freed +Solomon +bonds +chiefs +Hawkesbury +decommissioned +lighthouses +exercises +Ligue +Fletcher +Welfare +Rule +Body +appealed +Progressive +stretches +democratic +deterioration +strengthened +tenor +retains +undergoing +payment +silence +Whether +onset +Car +standings +rely +extraordinary +Foster +Trentino +exports +optimization +missile +patrol +accelerated +Derbyshire +clearing +Tokugawa +investments +loans +Sugar +Salman +governance +Resident +gardens +recommend +experiments +peasant +implementing +issuing +Lemon +Established +leisure +branched +Pete +Kiev +statewide +treating +zinc +lamp +passive +UVA +Nobel +ski +Santana +globally +Are +comedic +layout +accomplished +restoring +habitats +Ducks +Approximately +delegate +localized +Sunshine +Past +partnered +privacy +Ye +Ajmi +evacuated +disappearance +supplemented +informs +determines +Reyes +helicopters +Goodwin +invaded +tired +gems +Emily +Zhang +Georgian +commentary +unprecedented +cites +trucks +automotive +Icelandic +Winnipeg +Alaska +Supporting +javelin +recovering +habit +Drug +pain +Pradesh +guarantee +contracts +zones +Trance +mirror +Legend +Hassan +complexity +presently +novelist +comeback +Discovery +alienated +downstream +Athens +Cedar +relegated +Alberto +chip +diagnosis +cemetery +clans +Kagerou +duel +Gyoubu +Akeginu +parasite +lover +reluctant +sake +mentally +bounded +demo +highlight +Porrino +defunct +supreme +Hussein +Geller +alleging +Chad +log +slate +cease +Pine +Animal +cabin +Horn +talented +Volkswagen +alpine +teenagers +Cox +fibers +beings +exotic +instances +contaminated +prone +collar +vacuum +Clinic +magnitude +refugees +pull +Satellite +AP +likelihood +Kaysersberg +islet +Allies +proceeding +declaration +powder +VIVO +specialised +criminals +Canterbury +screenplay +Plate +Employees +Seattle +united +targeting +trustees +Direct +commissioner +Trial +Olivier +assessment +Malayalam +honoring +Adrian +rainfall +Sands +tribe +launches +restaurants +Platform +surveillance +Croydon +Thornton +Cavalry +pleased +excited +coordinator +Geneva +ghost +loop +winds +masonry +Bil +kids +generic +Gill +Named +explorer +Conway +Mao +grid +commerce +Manila +coaches +Portrait +Ahead +Herman +athletics +Butler +Dakin +steering +mascot +roadway +Hubbard +certificate +continually +charging +scholarships +Sutherland +orchestra +choral +Associate +espionage +reward +magistrate +precursor +scales +exempt +Horne +Grey +Nurses +carriers +dynamic +Sherman +interval +Stroganov +plea +Suez +Lea +Multiple +sculptures +vector +swim +factions +Color +flattened +fold +intervals +Thailand +browser +respond +Veith +Tibetan +Namgang +proportional +Palestine +Gardiner +Honours +serotonin +sealed +antigens +NK +scorer +Tyson +Coventry +Nito +albeit +Humffray +sketches +Kepler +Pibroch +sanctioned +translations +identifies +Nicholson +webisodes +SS +Enrollment +Italians +sounding +dancers +pargana +preserves +UNESCO +Boom +Tag +Resorts +Pack +zooming +Hammond +Blake +pretends +arid +Bengali +FK +Inn +Ambulance +attractions +McMurtry +pension +Cluster +Billingham +furnace +closures +elc +Luther +Jellineck +Iris +Monarchs +Mortimer +Karloff +Elaine +Brody +Boulevard +violence +conclave +Borghese +Gatch +Waterford +Pinkett +touchdowns +NEF +Meteor +dive +correspondent +excitation +Dalmatia +Gent +modules +Veins +sediments +Tonga +Corruptor +Loening +Swirbul +Viru +secession +distillation +McCormick +Cadbury +Lakewood +Irons +Arcade +Sekou +tackles +diamonds +nitrogen +embryi +Tsoro +Yorimitsu +Bacterial +Paddington +broadcasters +honeycomb +Coxeter +Caracal +Sapag +Salak +Reysschoot +Towson +GraphicAudio +Smalley +Ferland +Prep +APA +Scramble +Glover +Lambie +Pendleton +khanates +OBA +Astronomical +Dent +spelling +repeat +adventures +spacecraft +tremendous +Mostly +Subsequent +Salmon +troubled +Penguin +facts +Flash +Visual +miniseries +Anniversary +prop +Comic +luxury +advisory +delay +seldom +odds +choices +physicist +Georges +ritual +exceptions +handball +Formation +veins +superlubricity +decreases +Kurt +Gillian +sandstone +Laboratory +divorced +financially +cliff +laying +makers +ineligible +Reverse +cultures +bigger +Flemish +financing +Senators +withstand +curve +portable +Batman +Woman +reprised +Delhi +Wusun +Lutz +Diploma +Gate +Crisis +proceedings +Gardens +MBA +fusion +tasks +Do +Future +Ned +newer +Lega +penalties +interactions +Recent +detection +Marcel +capped +blog +knife +Specialist +cooperative +playable +updates +ambush +murders +Bruno +emerge +retrieve +convincing +drained +Cornelius +Bunting +rumors +probability +Pictures +Antoine +possessing +invisible +coral +Sanctuary +conducts +zero +oak +atop +additions +drives +sworn +Marian +enjoys +Trewoon +observers +pioneered +Treasury +Peace +illegitimate +cats +afraid +Stevie +Hoang +sparked +Augustus +interrupted +whenever +circles +inspected +pigment +Climate +Nicola +criticised +Solar +sophomore +Vicar +Frances +peninsula +Kay +Midland +IF +reflection +Reality +penetrate +thoughts +upright +dimension +phenomenon +VIII +Patricia +archive +volunteered +Ambassador +Mme +Phil +zenith +lesser +carapace +Negro +gears +sleeve +reconnaissance +struggled +furniture +morphological +badly +genera +vintage +cognitive +Igor +Monk +Everett +telegraph +Coal +overhead +Fulton +subscription +torn +Progress +Buildings +keen +confined +Catholics +exams +arch +coloured +exceeding +republican +Wener +sounded +advertised +polls +dividing +Internationale +renewable +correctly +Hillary +voter +protections +wage +abortion +transgender +periphery +Panama +directing +bridges +confessed +lesson +firearms +barrel +formulation +abbreviated +Stop +Assessment +reconcile +landings +Academic +Dragon +defines +sustain +farmer +Standing +Hindus +crore +posthumously +responsibilities +negotiate +explored +appoint +traits +infant +Chase +masters +explicitly +summoned +Denis +growers +interfaces +Maritime +homeport +rehabilitation +banning +Rocks +Dominion +tract +Krishna +navy +formations +Saxony +regained +vicar +diocese +Wheat +bounty +knighted +Challenger +ornamental +stems +Scots +fertile +prosperous +eleventh +systematic +pump +Asiatic +generations +shining +Natalia +violin +Stephanie +stake +Still +Capitol +photographs +accumulated +busiest +Mycobacterium +attachment +Interface +ultraviolet +triggered +blocking +guidelines +memorable +Alpine +affiliation +rolling +mills +listen +marginal +vernal +wetlands +fortifications +batteries +strategically +threats +violating +Average +comfort +activists +fused +Wind +verdict +Jacksonville +analyst +Klein +complications +Task +recommendation +unstable +reactor +contamination +Atomic +Films +Sussex +Louisa +Pietro +Federico +Edgar +risks +quest +Dana +trap +bee +hiding +supposedly +speculation +positively +finale +Salon +domestically +recursive +geography +payments +Ira +parody +bowler +dagga +burden +differently +Traffic +pending +reasoning +announcing +drops +Yet +imports +drain +importing +terrestrial +algae +missionary +Reformed +depths +Transportation +Lawn +pastoral +welterweight +Leonard +favorable +cameras +Rotten +remnants +pirates +Journalism +Investment +divine +Ieyasu +ninjas +hatred +Nenki +manga +gifted +surprising +hopes +lord +upset +disguised +wrapped +faults +boost +deepest +Joujinshuu +Wave +Previously +prizes +Caesars +halt +sailors +excavated +tidal +spanning +varsity +Prague +organist +Henderson +Burton +schooling +Scholars +Organisation +engraved +suited +archives +automated +Mossavat +USL +Fury +progressed +workforce +compare +Washburn +handles +thicker +Store +humidity +harmful +absolutely +pH +inert +mounts +assessed +twist +doctrine +stellar +Aires +politically +EUR +sciences +fellowship +Sega +armistice +Hand +enforce +conquered +corvettes +garrisons +aftermath +cholesterol +dietary +warns +Wei +agreeing +Silva +Movement +Honor +prose +detective +Orchestra +comfortable +Telugu +witnesses +Settlement +councils +gate +desert +ranged +elevations +minerals +terminated +Mamta +reside +adjust +armament +Sackheim +Tor +Parks +violinist +guitars +UCLA +devastated +sergeant +dawn +initials +mediumwave +pm +reinforced +Constituency +Sudbury +HackerOne +utilize +comune +Malaysia +Memory +Hole +Plymouth +polymers +ceiling +Franco +ammunition +ITF +coordinated +IT +Volunteers +subgenus +facilitated +impacted +genes +Sheikh +Thursday +Coastal +Wolverines +Right +Philippe +avenge +Carly +runoff +magnet +neighborhoods +recipients +harder +fraction +structured +comparative +advancement +Nowak +expressive +collector +Shourd +autonomous +recommendations +Se +pushing +Seoul +NASA +bits +Hammer +Storm +appellate +oversight +investigators +Planck +reputed +Arethusa +vicinity +Hedvig +Emery +adds +Shield +pathogen +mall +notion +Mariana +Saltair +Brussels +piper +imprisonment +Centers +foundations +unity +authoritative +Provisional +Merit +pharmaceutical +sleep +Barrios +KGB +Fc +binds +antigen +shadow +forbidden +Mowbray +Appeals +Portsmouth +tackle +rocket +Piobaireachd +aesthetic +Huw +Travel +Vologda +comprise +supportive +converting +Usher +Lori +RPG +Ikuta +Asahi +Lighthouse +locals +earthquake +Packers +Contest +tomb +judoka +famed +Shipbuilding +Governors +calendar +eating +neighbours +Junaidi +Densha +Somerset +MTV +Armstrong +Mo +ZUI +Boca +Jianbin +Chuck +Mir +drum +Celebrity +Galapagos +pad +drought +Sanne +loud +Mabel +Prospect +vapor +precipitation +sniper +sa +shipyard +subdivision +Incident +Shandong +rhema +Landmarks +Richmond +aluminium +Sufi +Nicolas +Petrus +Wear +Stephenson +Beam +Smiths +Wilton +campuses +Gap +Trevor +Flatpoint +overcrowded +Wealth +inexpensive +Dental +Persson +Axel +duchess +Formal +Eshkol +spectrum +poker +creditable +Boise +Ringera +FIBA +Barkley +Tunisia +Rosenthal +AME +tailgating +Avengers +Wireless +hyphae +Confession +Bentley +Charts +Shaykh +Arch +Panel +Edison +Bend +Sensor +SSW +Formula +Christadelphian +WKS +Zirbes +Floreana +Nel +truncated +imag +oboes +reeds +brongersmai +Croat +HDZ +Cambridgeshire +Horthy +primer +Roath +loch +Paape +Ewald +Rajappa +pepino +contributor +Camden +Dirk +depressed +Paranoid +eccentric +survivor +instructed +lunch +drunk +banjo +Seventh +gradual +voices +blend +clones +trilogy +definitive +twelfth +marries +Android +Actors +warehouse +Millennium +puzzles +app +translate +shoots +listening +orbit +Falcon +propose +compensate +referendum +vocational +Rice +Manhattan +objectives +populous +taxonomic +monotypic +atomic +threshold +distances +finite +computational +patents +entrepreneur +Tribune +contestants +Rivers +Judith +Joy +electromagnetic +fracture +Norse +Angola +dollars +Hunt +Bailey +purchasing +Improvement +Salem +linking +migration +denounced +Autonomous +favoured +Shire +rushes +tension +calculated +Curry +Diane +Swimming +Liberals +beneficial +suppressed +Pueblo +parade +capability +Windsor +stuck +alcoholic +outlines +fills +Intelligence +bin +Ancona +incredible +substituted +Individual +Assemblymember +announces +Alcatraz +custom +loot +mechanics +Savannah +contacts +confronts +Sentinel +midst +directions +unconscious +lets +Moon +Pentagon +Arrow +Monty +lab +Siberian +hat +Steam +themed +Lead +Erik +steaming +answers +Fuji +Musa +Istanbul +Traditional +munitions +counts +Writers +minimize +emissions +rendering +LED +Plasma +Hits +knocking +McCarthy +aiming +bred +Baccalaureate +Programme +handful +peer +mentoring +currents +residences +holy +pavilion +Volleyball +amenities +garage +convenience +Caroline +colonel +fortresses +uncommon +bomber +endowment +loving +scholarly +collaborations +lighter +chest +steadily +traced +Timothy +Karnataka +Shri +Fritz +geologist +Vojnik +Sculpture +Armistice +rider +baseman +honoured +Rocha +teammates +privatization +livestock +Bromwich +satisfied +bacterium +Hidalgo +rational +ethical +choosing +forget +incorporate +Evidence +coins +surpassed +Rodgers +Cottage +overnight +Percival +reproduction +prompting +scenic +chorus +Too +complicated +MGM +Maxwell +suite +Columbian +lift +jumped +Acting +Article +wonderful +botanist +undertook +distributor +monk +Paralympics +synchronizers +Known +Matilda +Coyote +furnished +Host +multidisciplinary +thorough +imagery +anterior +posterior +abdomen +notoriety +gig +Greatest +moths +Keystone +Ghost +fake +detailing +recruitment +icon +continuation +commemorate +excluded +Met +Dante +bells +originating +Sejm +prosperity +improvised +thoroughly +confident +cathedral +HC +Indoor +Estonia +fjord +trail +UFO +Southampton +Classic +Tuesday +rewarded +Industries +moisture +grain +Powell +Stockport +oxidation +tendency +Juris +Carroll +businessmen +limiting +LGBT +overturned +whorls +Dye +Districts +constituent +gospel +rehearsal +redesigned +handguard +blade +Shingo +Heavy +Zero +inspection +Tunisian +Xiaomi +keys +Event +Accreditation +defects +chartered +evacuation +rooted +Indies +patrols +disposal +Chong +farmworkers +span +amended +refusing +Greeks +soprano +Das +incorrect +Winston +specifications +aids +brands +Walk +inscribed +arising +leather +Sunni +advent +acceptance +satisfy +advocates +superiority +reasonably +lawful +multitude +finishes +Pole +constraints +enacted +rolled +Gould +MC +Circle +emergent +riders +veterans +Psalm +Dowland +pilgrims +preacher +recital +washing +Miraj +Bombay +expeditions +corporation +weighing +accumulation +profits +levied +dairy +Administrator +umpire +equilibrium +dimensions +Picture +differing +demonstrates +depression +wheel +pumps +apical +Fellowship +Revenue +orchestral +Reagan +midway +everyday +reviewing +compelling +crude +stack +Ultra +Labs +Artist +tolerate +tones +popularly +celebrities +Mad +alternatives +surround +alias +Dwight +Leaf +Bypass +stimulate +Permanent +speaker +declare +chances +psychiatric +gains +disorders +trauma +analyzed +dreams +waved +hearings +pose +Kuwait +detainees +disappeared +motivated +Paks +generator +gases +Mariawald +Midwestern +decoration +preached +IX +carriage +sunken +rebel +locate +confronted +uncovered +Duchovny +watched +genetically +possibilities +ideological +dominate +Detective +Chester +Olivia +discus +prospects +Sheffield +regulate +Rehabilitation +presumed +consumed +Fields +profession +judgment +runway +seventeenth +cheap +Tata +Manufacturing +Pratt +filaments +Whereas +Avex +velfarre +moderately +appreciation +Drilling +Technologies +relieve +grains +Bertha +letting +tragedy +requisitioned +Sacred +Albany +muscle +voicing +collaborating +disputed +Prairie +Lisa +hub +Hyouma +blind +equals +tongue +Danjo +betrayed +innocent +evil +wound +Being +cancellation +Marrave +rapper +Americas +Dorton +treble +Makah +positioned +confidence +Crewe +Wembley +Walsall +Nadu +psychic +Inlet +Ballet +reproduce +polymer +compulsory +milestone +Jura +kindergarten +Oppenheim +balls +brushes +residue +template +calcium +compromise +Arsenic +mount +consulted +matched +Vaughan +Coming +suffix +restricting +licenses +educate +jailed +Preston +Bomb +Bennington +sinking +warships +triple +devastating +danger +rushed +lightning +resist +flavor +Link +biology +zip +totally +objections +shareholder +enthusiastic +Record +bars +Horace +Colombo +Face +editing +Managing +Again +ETA +explosives +emotions +Browning +Isles +Incumbent +performer +tallest +strengthening +Landmark +Wednesday +Belle +Messiaen +auspices +wounds +voltage +detachment +unused +confront +boss +Parts +funny +Duchess +Algeria +sandy +agreements +scaled +hotels +package +forthcoming +NATO +newest +refurbished +tiny +puzzle +BMW +compliance +Son +Lal +yoga +awkward +frustrated +girlfriend +assaults +appointments +Artillery +Florence +Lookout +vacated +Lionel +Salvador +songwriter +Eisenstat +leagues +ERA +rebound +Safety +widest +EF +focal +landscapes +Co +Yard +Plain +Meeting +Irwin +Merchant +Gilbert +assignment +Nuncio +Dodge +balloon +matrix +Ricardo +purse +sparsely +En +Crossing +hiking +Primetime +Movie +Commanding +humanitarian +Refugee +readers +ex +crustacean +participates +Plasmodesmata +deposition +authorship +Monterey +Yost +photos +Bikes +organizers +Roeser +configuration +enroll +PEF +Cliff +mushrooms +textbooks +Forum +elongated +diplomat +arrange +converge +suspects +noble +picnic +faithful +specially +vendors +forbade +statute +Twitter +nursery +Summit +talents +Nails +exploited +regain +Barton +narrower +neural +defendants +Beauty +maker +Corbett +Monster +Riverside +dieback +Barrier +Rican +thirteenth +glacier +desk +Crime +presenting +Irrigation +Pipe +SLGW +petroleum +Electrochemistry +rebuilding +rigorous +Presidency +bloody +nutrient +communes +unusually +Maharaja +deputations +insular +operators +buses +Stuart +fullerene +nets +christened +Coder +rats +Brandon +Worth +orphaned +antibodies +replication +Goalkeeper +Granade +Planning +winger +Bryson +Doncaster +slip +deficit +lateral +Libraries +Hazara +Ballarat +shortlisted +timing +Na +chords +cello +Brendan +Rideout +binary +leaked +webisode +Papua +microorganisms +ICI +Elliott +Islanders +DS +Kajal +entrepreneurs +litigation +Siegfried +Samsung +coordinates +Subway +smart +shipwrecks +Lucha +Libre +execute +RCA +breakup +pleasant +Gujarati +Obeid +lending +Otoko +Collections +Theorem +assumption +rivalry +bilingual +chancellor +unemployed +Richards +asteroid +influx +Jumla +triangular +catered +montane +Wardak +Badgers +Haiti +octave +workplace +Chips +Blackman +Forbes +chiefly +ISI +Kreshpan +arrivals +wished +Textile +Bartley +destined +Whiting +remark +heating +upward +consequence +ng +cruisers +Johannes +Conseil +Dongou +Barcelona +receivers +domains +Idukki +Kottayam +Equinox +convicts +Richter +Davy +Darlington +Karim +Nike +Carew +deprivation +Form +Colored +Contact +McMahon +Tammi +Shannon +XL +Maddogs +Halasz +UNC +Batticaloa +deposed +Barberini +Malin +Giolito +Juvenile +Konami +spouse +Deo +dinosaur +Tax +elongation +mRNA +antibiotic +Dundalk +Linkin +Alain +Nielsen +Bosse +circus +penetrans +reconfigurable +Gobbolino +Pagla +Dashu +Mardi +Creole +Kinsell +precise +Burnett +Saptami +molars +separating +predators +silt +Detour +Maltese +Gaucher +Jameson +stroke +typefaces +Extra +Medium +partisans +staging +Munyeshyaka +preaching +chaplain +aquifer +errors +setulipes +Mango +navigator +Mander +Ambition +Lugosi +Sylhet +Connect +Hadas +Molecular +Solectria +Judah +Siddhivinayak +Frideric +Needham +linsangs +Shadowville +Crathie +agglutinin +gunboats +Broncos +Lamborghini +torpedoed +cytoplasm +Duggan +Waldstein +Tortoises +saddleback +Palk +streamlined +Caladenia +Mildmay +Coffee +EDE +Deacon +Samanid +Rukn +Anglia +HISD +Diya +octavo +Ablon +Pystynen +UMP +Voyage +Borthen +governors +Mangold +Moody +Microbiology +Pagones +Callow +greenhouse +Antioch +Thymocytes +UFC +Kristoff +Housekeep +Laurentide +Ludwigsfelde +Mosdorf +SNES +Hudsons +Sky +robot +Thought +escapes +Hunting +Bernie +Perkins +Title +upbeat +portrayal +remarked +stunt +disrupted +Everything +universe +crowded +triangle +Rainbow +concludes +copyright +Ltd +microscopic +Rod +Planet +homage +explicit +suffers +reject +negotiation +amendment +Sinn +Vocational +amalgamation +Theory +quantum +emeritus +intelligent +addresses +beginnings +verified +investigations +turbine +Herbert +horn +outgoing +Wallis +Marco +Doubleday +Brennan +publish +bankrupt +Arcadia +researches +runic +boasts +Deputies +suites +quarry +Nearly +Frederic +incorporation +airfield +Pond +ponds +Middlesex +landmarks +GE +Elections +ladder +differential +nowadays +merging +Catalan +Draught +Wet +brittle +assuming +coordinate +parameter +Dawn +reprise +portrays +protagonist +Laurel +Robin +Openysheva +seniors +Events +Newman +Feng +Liao +foster +Lifetime +savings +den +bothies +Burn +Stories +unauthorized +Bernardo +outfit +Sergeant +electing +senator +adopting +disadvantaged +Designed +cliffs +photographer +plenty +slower +noticeable +Pitchfork +liner +bankruptcy +Prima +Going +participant +lawsone +ink +overseen +underneath +coinciding +disappointment +healing +costing +remake +Walsh +recruits +discusses +Chaos +trusted +reverted +warriors +cancel +gaming +implying +sacrifice +Sofia +Turkic +sweep +bush +fungus +obscure +screenwriter +imagined +analog +wears +projected +hides +daytime +grab +diminished +mosque +Roll +Mercy +convalescent +commence +attain +fundraising +discharged +cascades +Ukhimath +composing +keynote +hanging +injection +mined +Admiralty +Esther +Frederik +simplified +victorious +Guinness +Veterinary +avid +peregrine +Restoration +translator +Streets +adjunct +throat +shade +dramatically +Editor +Joshi +instituted +Paper +recognizing +Healy +Vahrenholt +Shell +wrestler +Montenegrin +Alessandro +Wanderers +Ambrose +Laurence +inherit +JR +herb +hollow +erect +serrated +el +sin +Xi +virtue +Encyclopedia +excellence +deity +spans +climbs +tragic +undoubtedly +mold +methane +Hertfordshire +BA +Bronze +burial +inception +Friend +reopened +Vic +Challenge +Harvey +Patterson +scheduling +announce +clock +seaside +Written +Joey +puts +barred +proves +miss +Amazon +Andes +tributaries +Meru +Supply +ministry +Lok +clutch +maximize +Polka +Recording +snails +monoplane +Disease +Franz +Confederation +clip +unreliable +limitation +Knowledge +Tonight +Less +Guitar +torture +Charlton +Gas +Path +segregation +upgrade +Completed +economical +highlights +Wolverhampton +patron +stained +Ranger +Ernst +fortified +invading +Poles +Estonian +Sleeper +breakthrough +mood +journalists +Motocross +Lone +advertisements +Listed +demise +bike +harvested +stains +Prof +migratory +Smithsonian +Draft +Emeritus +Interest +boiling +sized +pressed +synonym +outlined +disasters +obligation +primaries +corporations +Affordable +debates +Circa +magic +axis +succeeding +wickets +CIMC +logistics +digits +Ecology +ecology +blues +modifications +Kilda +villa +accreditation +reorganization +colonies +regiments +Fabian +Rising +appealing +escorted +routine +gallery +Hue +Eddy +attains +discretion +Beth +comply +aforementioned +Farmers +scope +Rhine +culminating +Louisville +garment +contiguous +Arabian +marital +conclude +committing +stigma +ye +affinity +NASCAR +Xfinity +Truck +avoiding +lap +Rich +realm +clergy +assemblies +feudal +apex +practically +Natalie +Ugandan +rebels +convenient +Linux +hull +equator +transmitters +organise +recited +lifting +choir +princely +Rutgers +hikers +birthday +naked +pot +Kalkberg +visibility +staircase +ecclesiastical +archbishop +Kathleen +clerk +cricketer +Sector +Assistance +institutional +medicinal +attracts +infamous +Garcia +sheep +Andalusian +Ahmad +arable +Romans +citrus +remarkably +hydraulic +reddish +longitudinal +transverse +Konopleva +Normandy +Alexandria +lineup +Chang +Produced +Alison +acclaim +alma +Patriots +Clayton +transplant +arguably +Mental +liaison +speciality +fashionable +Skin +devised +therapeutic +bathing +Included +syndicated +Style +Renault +Omar +prey +Maris +Bignell +Mawson +Dinosaur +Foods +migrants +compatible +rice +trout +blooms +Used +defences +ostensibly +trailing +counting +approve +halted +retinal +Da +conditioning +collaborator +Gustav +behaviour +disappointing +flanked +Guantanamo +Status +Tribunal +evaluated +rods +pellets +inadequate +shock +Trappist +sells +monasteries +batsman +Behind +sturdy +nave +Madonna +Anastasia +Lois +glaciers +Monica +paranormal +abducted +eradication +tipped +salvage +poisoned +tumor +pipeline +Alien +canvas +vaccine +wanting +invade +Conover +nightly +crafts +revive +Radical +critique +quotes +spur +aunt +Sundar +drums +Tobacco +Hemp +drastic +intersects +economist +stocks +peasants +textiles +wages +Ordnance +robust +proud +flowed +Karachi +Depression +consequences +spores +tightly +disco +floors +Johan +Cyber +Hours +Pullman +Whitman +Ammandra +Harper +clue +Rider +Clubs +ladies +WBC +Amanda +conclusions +Amer +Border +grouping +praising +reminiscent +Cardiff +embassy +Minneapolis +dependence +server +Refuge +grocery +thriving +congregations +Ninja +Hanzou +threatens +whereupon +assassinate +truce +Jousuke +distracted +crushed +forgive +aiding +flee +defenses +resemblance +crush +engulfed +attach +dozens +Samurai +unidentified +Sloan +dug +swamp +preparatory +bowl +evidenced +Sons +Loop +whale +showcased +Lighting +terrorists +employing +SK +sampled +Tranmere +Landscape +Rue +boom +tonal +Mint +trophy +Alma +Campo +blowing +vacancy +Bottom +pelts +sixteenth +Fur +nonetheless +supplements +stick +inhibit +inserted +paste +popularized +visually +determining +bags +clips +monitored +Nagoya +logic +noticeably +degrading +Palestinian +stamp +registers +unwanted +Marketing +Budget +Devon +screened +Generation +ray +Expeditionary +iTunes +Peloponnese +suppress +autonomy +Powers +inferior +signatory +impose +reinforcements +warned +nominal +blown +greeted +hated +realised +culinary +Exeter +spokesman +Chunichi +polled +theaters +garnered +VHS +Artistic +medalist +unionist +Abhayavardhana +Sama +ATP +Equatorial +marching +chanted +demonstrations +spontaneous +communicate +logos +pursuing +Rafael +townships +synthesized +rainbow +adverse +reproductive +Pleiger +Elmer +worse +replay +Commercial +Tel +Philharmonic +Glinka +Debussy +mortality +finger +Trivikram +pivotal +Ramachandra +fond +Malik +queen +Milan +accomplices +Morocco +geometric +Mine +respectively +passion +substrates +nurses +Resource +Hotels +mandate +Jockey +Mikhail +Potsdam +lots +embankment +summers +depart +amber +affluent +Broad +romance +Kanak +cache +protocols +Kappesser +Randall +oppose +directional +clouds +Engineer +warmer +envelope +painters +Windmill +auto +Milios +Imran +Griffith +Conyers +cartoonist +modeled +culminated +Scottsdale +Paradise +comics +Better +worm +damages +decreasing +pine +Nearby +Mellon +Afghanistan +intellectuals +unrelated +Patrol +Communists +outreach +cytoplasmic +fluorescent +trafficking +nursing +Reznor +Screen +executives +pilots +Cinema +quarterback +battered +Mayo +wheels +tires +postwar +transcontinental +Roderick +Pirate +Ric +leverage +Finally +Centralia +parallels +equity +divisional +correlated +preschool +accountability +backgrounds +Per +EVS +proponent +fail +donor +solving +Parisian +Dame +skirt +bail +UC +walked +imminent +Mohammad +lanes +proposes +waterfall +exam +confrontation +Guillaume +Lesotho +Plains +damaging +denial +socialist +Mathew +pistol +rowing +Anikey +Siouxsie +Pius +insider +Buffalo +Healthcare +Cuomo +indictment +optional +basically +Huron +Hawaiian +Azores +decorative +neckline +agathidicida +rot +highways +cook +paints +bulletins +Guizhou +efficacy +Leena +Found +Garfield +Locomotive +interurban +Croats +Kosovo +Programs +Lode +Scid +Sahib +riot +Strategic +responses +Lhasa +Qing +Gerry +Ryanair +transaction +Pinkerton +SAS +Barnes +imagination +inherently +vivid +valleys +Cremona +Edith +skating +Grote +Poor +cleaned +babies +ADCC +stucco +bench +hilly +Yevgeny +Liza +dream +Leicester +Ham +fearing +derby +necessity +Seth +Ground +Northampton +Alves +inquiry +pumped +abdominal +smallest +Nikki +Chennai +descendant +Heymann +intricate +ap +Morsan +XV +Plans +prayers +Tenorio +Flevoborg +Sepultura +premium +Inkersall +hiring +alphabet +Belkacem +Antwerp +remixes +magical +perfectly +Sue +Institution +restructuring +startup +client +forum +Laurie +Cairo +auction +Alegre +barony +Ayatollah +suppression +Mappano +gangster +gatherings +tough +knight +reservation +digging +Amazons +Heavyweight +punished +heterosexual +demographics +Currae +cater +bassist +complementary +Procedure +liver +Lalla +gills +sediment +Katoen +Silk +Advisor +Beirut +careers +vacation +Rockefeller +Regent +Bowen +pressures +stance +marshes +Luck +antitrust +Gul +Northwestern +weekday +consulting +Property +saint +Raghoo +erupted +beef +woodland +pollution +trumpeters +Hindi +Areas +Carr +retitled +Maduro +Daejeon +Sparks +Petty +Pottsville +fined +purchases +OneAPI +classifying +IRS +canyon +Rodrigo +gaita +diatonic +unreleased +reissue +keyboard +Tea +Moments +Motorway +prejudice +affidavit +Wooden +Jerome +wherein +spun +Gujarat +organising +bedrock +meadows +strategies +Carpi +Rama +marker +Extension +geographic +Lidcombe +reigning +drastically +OTO +Wallsend +meteorite +Mineral +commodity +steamer +observatory +Falcons +Look +Awareness +JANET +Copenhagen +cord +Flag +pace +Entrepreneur +Grays +Sevens +EXEC +consoles +Nursing +Terebellum +Sagittarii +pope +malaria +Cleopatra +welcome +Michaelmas +Oxbridge +homeland +nucleotides +pathogenic +antibiotics +Shelbourne +Wicked +Directory +CSRS +unpaid +Shakers +tiger +Scouts +Vahanam +Noronha +Excuse +Nicholls +Giang +besieged +synchronized +Pygmalion +Dunning +Moby +typeface +thickness +negotiating +Aguilera +pyrotechnic +Milu +Baptists +controversies +Juskatla +Rays +Arah +Alamitos +Vallenato +reactivated +Denver +estimator +cries +teeth +Berglen +Winsloe +Kogel +Shafiullah +Torres +Surguja +Berg +SAGB +Void +Map +novi +beaches +OGC +conductivity +VOA +dhumpa +erythrocytes +Saginaw +Mosquito +Wise +Acheson +Nuckolls +Poensgen +AKS +Rasouli +Peterlini +weigh +Physiology +Cerro +hazards +Thanos +Punggol +hypertriglyceridemia +Kissimmee +Pettis +Dulaney +Baghdad +Enhanced +Meter +Saranac +advisers +Sperillen +tolerance +Apology +quarto +ABRHS +Huntingdonshire +Sarkozy +Olas +Higherlife +dart +Gajhede +Knudsen +Sadharan +Samaj +Brahmos +aerodrome +Terrien +Dinnick +Ptolemais +Demetrian +thymocyte +CCS +Taxila +Michelin +Ipgrave +NMIs +Liselotte +NECVA +Radhika +Chagataid +Temujin +Tugh +Affiliated +umpires +Kidd +Endurance +Pekah +Hitchhiker +Question +Ends +forgotten +Fry +commissioning +narrated +divergent +personalities +uncover +artificially +satisfaction +picks +convinces +Than +Published +feat +Dickinson +Carlton +Playhouse +omit +Sales +corrected +Adventure +scripted +Invasion +novelty +philosophical +Relief +peers +ought +delegated +Newfoundland +Career +Pollard +extraction +Bang +authentic +entirety +professors +Handball +accuracy +hexagonal +registry +ambient +exciting +finer +slender +successors +Factor +Deepak +Rebecca +publishers +Clan +Bear +worlds +DR +Allegheny +farmhouse +Appleton +tolls +millions +surplus +plantations +transform +garbage +Birch +interchange +abundance +Single +firefighters +Engine +geometry +advisor +congress +Toledo +breed +taluk +scoreless +ductile +reversal +superhero +Robbie +Quinn +Birds +Canary +FINA +medley +touching +Tasmanian +diploma +Steering +diode +inventor +Heinrich +mountainous +ruined +Am +surrounds +farther +pumping +Candy +Dolly +Calvin +constituencies +Monthly +retrospective +logical +indication +Credits +obtains +Promotion +commenting +shortage +Lawsone +stain +evidently +absorbs +avoids +Kappa +resigning +Chu +desires +warship +hauling +sails +lottery +Specialists +royale +utilizes +emphasize +Multiplayer +upgrades +Asylum +mercenary +los +cowboy +roaming +Oracle +symbols +declares +encountering +spirits +Ultimis +experimented +Broken +outpost +container +AI +Deluxe +remastered +Mystery +Points +Changes +splitting +progression +enjoyment +Wesley +echoed +Tatar +Budapest +lobbying +exploits +stealth +coined +sensing +owe +parameters +correspond +coated +Pros +Bo +Lamar +Coral +Visitor +Giles +Archibald +espoused +donate +Abaixo +upwards +wastewater +courtyard +Comparative +philanthropist +forged +executions +outskirts +westernmost +redevelopment +outline +Flora +headlines +Mines +Baldwin +Ulrik +tenant +Augusta +Johanna +Dictionary +Kenyan +Paula +diary +yellowish +Olive +Cyril +restarted +earnest +feathers +paired +universally +Formerly +Continuing +Werner +kg +highlands +plateau +encompasses +Canadiens +Romanian +siding +branching +nervous +stranger +Enrique +con +Buddhism +Investigation +teachings +ear +practicing +tactic +Des +commemorative +musicals +Yvonne +Directed +Starring +Costume +Chamberlain +Hess +widowed +flame +messenger +sad +dances +Russ +insists +reluctantly +courtesy +privateer +tender +creativity +giants +botanical +GM +clutches +versatile +Features +Fat +Hour +screening +Eva +ingredients +Kathy +butcher +dose +Jakob +Emanuel +ceremonial +staffed +nomenclature +mating +sensory +Woodstock +McCoy +Keystones +Peel +revolves +genocide +bids +Angerstein +cement +Thames +Greenwich +wharf +Glass +leaking +undertaking +adjoining +Nobunaga +Wilderness +Scotia +Ski +Donaldson +nodes +Pier +Aleksander +Pact +deported +centralized +interiors +frescoes +curved +NME +Dale +Smart +anonymous +famously +Elite +invitational +transitional +maybe +Or +myself +susceptible +pressurized +drying +archaeologists +persistent +Africans +Barnsley +coats +sewing +shoulder +Ivy +Shaun +rebuild +sloping +leasing +Wilma +Woodward +modeling +Ike +RPM +Tina +Thelma +poetic +Lines +enthusiasts +benefited +mounting +pushes +Quality +Source +kilometre +Brighton +diverting +jurisdictions +splendid +Guards +XVI +Meza +Vidal +Quartet +Montebello +Emile +teenager +OFC +Relations +concurrence +councillor +Rotary +Hellenic +Granada +successes +percentages +Noxapater +mob +feedback +confirming +Bermondsey +groom +Literary +Arabia +initiate +procedures +mahr +abusive +mandatory +waived +Prophet +virgin +severity +deceased +aunts +siblings +integrate +Uyghur +sprint +posting +Teaching +Ankara +angled +fuscous +requesting +subsidies +eaten +powdered +Valle +dried +grape +CUTEr +quadratic +Attack +spearheaded +transmitting +Rubin +servants +devotion +departing +Benedictine +lunar +rides +severed +bath +obstacle +Fujisaki +Bad +salts +dome +knights +Reich +Cologne +episcopal +earnings +Opportunity +Brisbane +Kumar +Salve +designing +indie +Penny +Names +cultivars +cottage +Different +Double +bloom +olive +systematically +blow +Georgetown +defeats +viola +Chapman +Marching +hop +Rap +Yi +Wu +Offshore +helicopter +cents +Panthers +Coordinator +soloist +inpatient +tuberculosis +Baby +presentations +rack +supplier +Tin +affects +tailored +deficiency +remedy +entertaining +Colin +Mans +captures +blockade +simultaneous +slated +Alden +aligned +Vegetation +riparian +mourning +Cattle +encoded +accidental +nominating +caucuses +Leaders +Elected +hostage +turnout +trends +integrity +refund +Weather +indicted +Personnel +Melanie +fixation +Cannon +Heather +bombings +bombers +lightly +suspicion +quarterly +definite +shrine +tavern +liturgical +adhere +porch +Perugia +bishops +relics +Carlo +dinner +Schoen +Clinical +Pediatrics +CA +translating +Krycek +Spender +hampered +vulnerability +Cigarette +warming +furthered +Yorker +Critical +Pearson +broadcaster +prevalent +greatness +hostility +denominations +pertaining +Hayes +Bury +Netflix +drowned +expresses +Porter +Gurjar +tin +Lancashire +prohibiting +enforced +Use +accountable +grams +Strategy +subsistence +administer +Schedule +Gareth +Ruben +tariffs +Historian +coffee +coarse +diverted +timely +cuticle +algal +harvesting +Banda +noon +transparent +glory +drill +heavier +logging +accumulate +lining +humor +geographically +Ecuador +prefers +Runkle +playwright +Dudley +Truth +straightforward +Wycombe +Leagues +theological +pastor +tolerant +boxer +bravery +knock +unbeaten +amassed +Regarding +Eve +Producer +Motion +contractor +holders +hiatus +Emma +Macedonian +envoys +pottery +Eden +evangelical +equality +gambling +distinctly +resorts +struggles +breath +emotionally +emotion +senses +Doujutsu +Mino +Okoi +bald +energetic +suspicious +limbs +Tsubagakure +pulls +stabs +Ofuku +caring +feral +brutally +pet +sheltered +Beneath +wires +Shogunate +witnessing +flashback +Passage +seize +postseason +Carol +finalists +pitching +Simple +dancer +Holly +Gymnastics +waterway +birthplace +Vale +chancel +sharpened +phases +Gaur +MFA +Terminal +Delaware +Sterling +Ceramics +Sharon +Lou +Shrewsbury +worried +Rahmatullah +answering +Uri +Randi +mitigate +engraver +gray +counterfeit +withdrawing +Canadians +fleeing +initiatives +facade +Byzantine +Cargo +Citation +comparator +arise +Aroostook +Woodland +Full +sensation +Strong +fiber +incredibly +parcel +evident +adhesive +hazardous +conform +Zoo +willingness +conserve +unseen +Volta +vegetables +emission +variability +persecuted +liberty +Algerian +justified +Mann +Yerba +postdoctoral +abused +Cora +Come +interventions +Huffington +Letters +Belfast +firepower +decaying +diplomacy +frigates +ignoring +obliged +Napoleonic +Overall +Dartmouth +lit +sovereignty +saturated +HM +Garabiol +conceal +Nippon +strings +Monte +sensitivity +depiction +Filming +Randy +rental +comparing +ASCAP +Killer +Mickey +commercials +Tribute +Segunda +Name +Ceylon +posed +Clerk +Spark +Hindustan +sociology +Sol +terror +Aznar +rearranged +conviction +Dow +premature +las +spire +Porto +Tree +phosphate +vitro +functioned +Reformation +Burial +refusal +Brewery +Bradford +Armand +Novgorod +Mozart +concerto +Concerto +electrified +unsafe +Srinivas +Valmiki +ISO +switches +eve +likes +conversations +definitely +Flight +unmanned +uniquely +thieves +subdued +Cap +Cannington +BHP +Warrior +Sodha +Everest +Mun +McLeod +Physicians +surgical +propeller +aft +troop +erotic +CAF +Ivory +annexation +Prussia +determination +Fast +engagements +Bull +Cutting +boxed +immense +reads +Serbia +Hindenburg +Geelong +bombed +tore +mustered +defenders +Molloy +Wally +surgeon +Jaime +probable +filmmaker +conical +demolish +feeder +nobody +tunnels +Entry +zoom +API +Vevring +Nursery +Combination +Italianate +remodeled +Conservatives +Sunset +Voyager +distinguishing +Easter +Gaping +Diamonds +Birthday +BWF +biblical +heaviest +Mormon +Notre +delays +Selene +shelter +Javier +Thirteen +journalism +inspire +italics +Pacar +Ketinggalan +jealous +Hampton +Holding +Mahon +Winners +callose +nucleic +actin +selectively +injected +trigger +divides +Gone +Felix +Variations +chairs +Case +devote +desperate +Hurling +Airplay +Sy +substitution +Luc +Credit +chassis +Perhaps +Gooding +Classics +Testament +prohibit +bundles +individually +consult +pays +taxpayers +additionally +Word +eliminating +gasoline +confinement +summary +Prison +Hagberg +Grave +Seymour +Franck +Half +Ahmadinejad +Desmond +diplomats +CF +denoted +symmetry +hardships +distinguishes +Chi +conquer +consolation +honest +drinks +arrests +occurrence +satellites +photosynthetic +absorption +Negative +disturbed +Len +Wikipedia +lawsuits +Ballas +Dresden +Weightlifting +Humboldt +syndrome +donors +modular +Ara +CrossFit +obstacles +hay +Southwark +canons +distribute +Null +guy +sulfur +Aerospace +touted +Twelve +Kids +securities +stripped +Foley +resign +Jennifer +Attfield +bitter +Panshanger +Bermuda +linguistic +centrally +Constructed +emphasizes +Platinum +Caney +grouped +Regency +destructive +Waitakere +methodology +Ranges +predominant +speeds +disperse +gut +Fisheries +exits +campaigned +corrupt +educator +Emerson +bounds +hunts +WTO +accounting +Vehicles +electrochemical +Projects +Preservation +Chaplin +Rahman +Chandra +Katrina +Tate +Spectrum +turrets +Bangalore +Sans +awarding +risen +exert +ambitious +progressively +Parties +Initiative +Koblenz +relocation +debts +preview +Google +situ +cardiovascular +Saskatchewan +handler +Schwartz +receptors +nucleoside +foremost +ignore +Kimberley +stretching +Pickers +Abbot +landscaped +Monastery +resting +creatures +Evil +Rwandan +prosecutor +peaking +Callum +fixtures +Mansfield +Architects +dos +whites +accusations +thoracic +prawns +arterial +Nazim +Mining +motifs +asbestos +quietly +MVP +Figure +Rocket +Nissan +welded +Banner +notated +nan +Harp +harpers +exemplary +Bonnie +Chadwick +promising +informing +ibn +inspectors +incubation +Ritz +Reduction +essays +Corey +Derrick +fifteenth +stationary +Ramsay +Matthews +Brooks +sitcom +PHB +Innovation +medication +Pieter +creek +governmental +breathing +Yutani +Nepalese +phidget +Phidgets +counterpart +Python +subscribe +Lopez +doubled +Dali +restriction +Patients +Cat +Roche +Turin +gotten +chickens +speeches +Carian +answered +nose +foreigners +Edition +Trapsoul +Huntington +Bellew +Kampala +beneficiaries +Flanders +Hugo +smell +Equatoria +Hortensii +borne +Ukok +hostile +Brook +rigid +superseded +Ladies +Clifford +graded +Ghanaian +Ordo +dunk +wells +majors +ascent +neighbor +toilet +Deal +validity +Fujinami +PE +telecommunications +dental +Chelsea +Jayawanta +Zaheem +Dorsey +bathroom +MacDougall +Rector +unconventional +rotational +manure +heiress +readings +lignite +Users +Xerox +Wonderly +jersey +MLS +Reception +stylistic +Lao +Hu +Jenkins +Blunsdon +Glabrio +Chaki +grateful +Shipping +GSM +Pescara +Kleine +Kinzig +Gie +defining +Ruo +punk +Nemuri +percussion +Know +Something +Buckley +Bono +Rules +Tortoise +Consolidated +Vermont +fragment +catalyst +frog +Ludwig +Aloy +subpoena +speedy +Tripathy +Lie +kicking +nasal +shanty +Delaney +Doyle +Monarch +cheaper +Donahue +Shields +transports +moist +carpenter +adjustable +displacement +propulsion +ASB +pub +flash +favoring +bolt +Pavilion +Pullmantur +Dueck +Rhimes +cosmic +Evergreen +manipulation +Cult +preface +Hurtado +penal +steelworks +wreckage +Residents +Colliery +raiding +Northumbrian +Gladstone +Redcar +Parsons +BAE +Tunnel +Emmanuel +asset +ESB +RCSI +Saudi +Fern +RIFT +decent +Vrindavan +Elephant +Growth +Jisc +Rutherford +Intrepid +submarines +Ferenc +honeymoon +Cove +fork +Brewster +ported +Monitor +Unix +ROM +Croatan +tsunami +Robot +Mubarak +Attili +Scripps +bull +Leif +Prefix +Flores +Cripps +Templars +Arms +intake +sorts +aerospace +Saddam +butler +Encyclopaedia +hears +disagreement +ribosome +FAI +Sporting +Cyprus +Pocket +combinations +fractional +KLAS +Bloomberg +interception +fumble +virulence +reconfiguration +Turn +Gras +costumes +Apartments +Annie +Garuda +Vespucci +crest +McNeal +Hasan +trash +Amazing +Cannons +Hanoi +Tourist +Hiawatha +hysteresis +THC +serif +liberated +Neretva +sect +McVeigh +Eleven +landforms +DisneySea +suborder +Tampa +tray +recharge +Allison +Cogswell +brush +Pasadena +gallons +Hulk +FD +Xeromphalina +CW +Gulfstream +commandery +Graf +Bangladeshi +DOJ +Bahadur +Breakers +Cavaliers +Played +Cartogram +lien +Licheng +NCIS +Nihil +vapors +distilling +Skewjack +Bletchley +Turing +Tong +Mullen +modelling +Turkmenistan +lemma +Christadelphians +RB +ARIEL +CASE +Padres +Shawnee +Confederates +Weasel +TFS +hrs +tabletop +Suzuki +Ellesmere +fluorescein +intracellular +Stages +Freret +Leptons +Systematic +Lonesome +Isabela +Pinta +Marychurch +Quay +Gauge +carats +Whitford +Esper +Eminence +Manfred +oboists +Dorph +Camera +Chloe +Lonnie +Jubbulpore +Lahi +Weed +Seed +Bayland +Sharpstown +METRO +civilization +Gradius +Conkling +Vasudevan +archaea +multicellular +peptidoglycan +GMT +Sarkozysm +Burgers +DirecTV +transcriptases +Queensborough +Hyack +ARIA +Nygart +WCYQ +Maddox +Mesurier +Elstree +Cumbernauld +Judean +hematopoietic +Corddry +Pippen +Wormit +Soltanieh +Wahle +Witoelar +Lichterfelde +Berke +Buckland +FIA +bulla +holder +Vogon +galaxy +Prefect +rewrote +intersect +rescues +Vogons +Franklyn +forefront +Floyd +cassette +Fits +adapting +omitted +Harmless +incidents +Sandra +blended +Miriam +Mk +plots +Rolling +Fit +Tertiary +omnibus +replicate +Buxton +Cornish +packaged +eighty +narration +Belgrade +reviewers +laser +VIP +downloaded +Hannibal +abridged +announcements +audiobook +EMI +republished +Hendrix +towels +Trek +elliptical +Merry +foreword +Bibliography +proportions +Numerous +Divine +Massey +Courtney +contributors +peculiar +excitement +exquisite +casual +Bulgaria +Jurassic +millimeters +Normal +Hampson +Superlubricity +slide +Tomlinson +AFM +similarity +analogous +sophisticated +Aberdeen +outspoken +Extensive +Jeremiah +implication +Tara +abbreviation +XIII +Brigham +Orion +Michelle +intentions +Mathematical +propagation +textbook +Lecturer +secondly +enigmatic +movable +automobile +Cliftondale +appropriated +Passenger +helmet +shield +questionable +patrons +publicized +Angelo +southernmost +Novosibirsk +unifying +breeds +familiarity +strengths +Extended +Wonder +Suicide +tentatively +Levi +Viola +Kenny +asterisk +contestant +Wujiutu +envoy +carnival +abandoning +Editorial +tutorials +Applications +Asai +unlocked +diminutive +permitting +shelters +Lijnden +mime +Horseshoe +encompass +Benson +leap +Beta +melodies +Italia +Calcio +Sounds +Soul +reacts +lasts +hypothesized +Guley +fraseri +religiosum +Kolkata +Rica +battling +mask +racist +spritsail +mast +necessitated +eighteenth +catholic +aides +packs +collectively +ammo +Ruin +survives +Intel +associates +transforming +chase +activates +Priest +Olympian +mythical +enslaved +showdown +gem +Yuri +Groom +consciousness +demonstrating +regaining +catalysts +poisoning +listings +distributing +mechanic +pins +grounded +Gaming +IGN +criticizing +ruin +polished +insight +Algotsson +volcano +climbers +Borussia +Kazan +Belt +pause +scenario +Noot +Noble +heirs +Male +Scientists +Enemy +particle +compose +disappear +Active +camouflage +frequencies +Baronet +Monitoring +attributable +pristine +convent +Duarte +Kedarnath +puja +inspiring +Pulitzer +formulated +inviting +industrialist +Grover +sororities +Karin +lethal +Austell +Pantomime +Cecil +churchyard +conventionally +Antoinette +zu +Cummings +uploaded +marathon +curator +Evolution +advise +ringed +companions +Oh +Gross +pairing +allele +attribution +practitioner +skeptical +asserting +periodic +sculptor +Alton +Trent +luck +Professionals +Philipps +Them +netting +Mugi +Athletes +Tiede +poisonous +mistaken +toxic +Blackburn +blacksmith +corps +mortar +mortars +Antigua +placer +Daoist +Things +isolate +cultivate +Hundred +starter +explosion +Derek +titleholder +learnt +Cooke +Jeanne +Irving +curtain +interrupts +shocked +Never +removes +envy +Carmela +vulgar +intentionally +shotgun +diversion +cables +schooner +References +Genchi +Genbutsu +happening +cadence +effectiveness +analyses +corresponded +exported +tutor +neatly +mosses +paths +Rao +Printing +Saab +idle +spaced +Benefits +accordion +Atkins +Purple +platinum +Bohemian +commuted +Brother +Freud +Hiro +costly +Females +Fruit +feeds +Norrbom +compatibility +drawings +helpful +Trinidad +somebody +hate +Clyde +architectures +behave +Duquesne +Aside +supervising +Irvine +secrets +Flowers +fabrication +bottling +BP +Dora +Whitney +Bayswater +Roslyn +southbound +Crafts +vernacular +Brampton +Greatham +concentrating +sourced +Fars +Neman +attested +Teutonic +monarchs +Defensive +uncertain +inmates +Nazis +commemorating +Belarus +autopsy +Saxon +Jesuit +cloister +sights +Kalju +bordering +yielded +airplay +headline +Present +slot +Downs +Veteran +thumb +quote +Twenty +Rubberwood +exposing +Genetics +Biological +coauthored +lactase +persistence +absent +Machinery +festive +Hyde +duck +oilcloth +sticking +seams +Jessie +orphan +senate +memo +accomplishments +stabilize +Medicare +donating +pregnancy +trans +Customs +Vince +Dall +aperture +bowled +abuses +Nord +Index +Trailer +smuggling +Journalist +randomly +interspersed +Wichita +grip +launcher +float +exerted +Crane +technician +tips +relevance +theorist +Toorak +searched +Ever +Nathanael +Militia +racially +Von +Comte +Territories +Lucas +Beat +frigate +Maluku +AS +Tahiti +codified +employ +subsidy +Opponents +disadvantage +junta +monarchy +Deinet +operatic +excelled +Isabella +Magic +Punjabi +projecting +Charity +onward +imposing +religiously +possessions +limitations +guardian +commentators +refuse +breach +postponed +obligations +defy +differed +diverged +terminate +Gender +Driver +flames +shaded +oblique +uniformly +kingdoms +petitions +riches +reigns +Lammot +Dog +paler +grapes +Testing +manipulated +heterogeneous +Maya +DesRon +Getting +Exercise +Shooter +smoking +moored +Dry +moor +Moor +Highways +synonymous +accessory +plug +devotees +mentioning +Maratha +Govind +Ashtami +Brahma +slain +consort +Devi +bye +Guest +Stephan +Lothair +Siegburg +Adolphus +Gerhard +archdiocese +diocesan +Cash +EC +reappointed +illustrious +accountant +sectors +sedimentary +petals +william +Gerard +Flower +archaeology +exceptionally +props +irrigated +orchards +observer +repaired +wondered +motive +traders +diversified +cosmopolitan +Decker +westward +purplish +greyish +thorax +costal +subdorsal +patch +Cathy +cellist +Craft +drafting +listeners +duet +BlueWay +constellation +Chinatown +borrowed +enterprises +Cotton +Winning +Lab +mater +Jets +Volodymyr +Thalassemia +clinics +payload +probe +Transfer +Indium +indium +absorbing +tanned +inducing +induces +fatigue +metallic +labeling +spawned +Kaplan +Beverly +Cabin +Sacramento +Juchet +assistants +nominate +Sidney +colleague +ICC +brood +automobiles +bat +nutritional +Templebreedy +independents +Kucinich +Mitt +Images +cortex +Ryder +Rodney +acquitted +Koon +disturbance +Lamorbey +Afghan +Was +uranium +feasibility +Sergey +corrosion +lid +Self +fabricated +rectangular +altar +follower +precious +offline +Greenland +Polar +relate +DVDs +abduction +kidnapped +mysteriously +hack +Crow +smallpox +defected +stricken +corpse +Cassandra +Praise +envisioned +wondering +Russians +Soldiers +kicks +Nina +serialized +Lars +crowds +harmonic +Devonshire +scarce +conservatives +tracing +Benefit +Smoke +Eastman +Chee +Dee +Sioux +procedural +Paralympic +unfortunately +competent +wicket +Dagga +constitutionally +herbicide +presiding +unconstitutional +Johannesburg +famine +contrasting +Burke +Hastings +silk +Mysore +restrict +impetus +canals +radiating +crippled +rails +alleviate +lowering +hurt +prospered +impressions +Oaxaca +Malawi +Ferry +numerical +basics +filter +weighted +phased +environmentally +Colombia +basket +Lucia +delicate +shine +neglected +prospective +Pontifical +Gregorian +Licentiate +Diocesan +overly +unpopular +hook +punches +Alfonso +barrage +Ana +Tomatoes +recommending +Includes +admiral +Aegean +Piraeus +Egyptians +neutralize +Gazette +cabaret +missionaries +variously +specialize +Danjou +hostilities +understands +insert +destroys +manipulate +overwhelmingly +vowed +lure +openings +Kazamachi +conspired +happiness +relying +sympathy +dissolve +useless +offended +weakened +fantastic +oils +pact +Kyohachiro +Shrine +remember +mistress +intervene +Warriors +armour +Kujaku +spinning +realistic +Tulane +MIT +Greenberg +Limestone +Mail +mudslide +Crawfish +developmental +Officers +BFA +honorable +Sonny +Called +Greenwood +Syed +noteworthy +Qasim +ancestral +Scholar +magician +distributors +Harvest +Scenes +intaglio +Wilfrid +foreground +vignette +Lyon +disappointed +lifespan +fare +Chen +garner +partnering +Midway +advocating +Hicks +Bern +pastures +births +seasonally +responds +alignment +bypassed +robbed +jackets +polar +protesting +Mateo +stiff +stresses +stuffed +Specimens +storing +react +Diet +Dorothy +objected +elects +seedlings +HD +cooler +luminosity +infrared +Shopping +contrary +theatres +Kripal +hailed +captained +Cheltenham +Cambodian +Placement +inorganic +McGrath +pleaded +exhibiting +Pauline +Galerie +curated +Henri +Archaeology +segregated +Mariano +suture +decisively +armada +stalemate +deploy +Tsar +consul +despised +totaling +concentrate +Pasha +extracted +induce +gaps +towed +prevailing +filing +starboard +intrusion +lighted +casualty +raced +conceded +battleship +cooking +processed +multimedia +console +loads +Epsom +seconded +recycling +airplane +Meiji +gangs +Northumbria +clashes +enthusiasm +Schepisi +Selleck +conglomerate +extras +slept +MCA +Wille +singled +rescinded +Theatrical +Have +seminal +Alfredo +urging +Resolution +Fidel +heightened +indirectly +Astoria +Bratislava +poets +Shin +tallow +Materials +exploitation +Qazvin +plaza +Enterprises +RFC +Wigan +heights +disks +jewel +Sonata +Biographical +SUNY +Viennese +Corner +Vol +inserting +resistant +touched +Sid +Vaikunthapurramuloo +Thaman +lyrical +predictable +entertainer +procurement +republic +reigned +Return +robbery +prolonged +Bir +overview +dissection +Securities +Isa +Tartu +inspector +Patient +Ownership +Equity +geared +turbines +Congo +monuments +flourish +Clement +damp +pizza +customs +stronghold +Gettysburg +banner +heroic +retreating +Boer +regimental +Gallipoli +ashore +gunshot +courage +Cherry +conspicuous +rallied +Newspaper +Catalonia +Roberto +Mainflingen +omnidirectional +tornado +clearance +mixing +Edmond +apprenticed +sited +spatial +pillars +texture +contrasts +breakdown +mosaic +shafts +Cut +Hurst +Alexis +cybersecurity +vulnerabilities +flaws +disclosure +Verizon +Acres +batted +interstellar +Jeffy +celebrating +satire +Saskatoon +detrimental +transitions +ballet +maturity +holdings +Outdoor +Lynch +investigator +grossing +Partners +Southeastern +repository +notices +aide +OVA +Bruins +opting +rarity +Sham +void +treatise +Institutes +Kereta +protested +Ichibata +Palmyra +Pingxingguan +Chiang +lucky +consortium +Provinces +disciplines +diagnostic +Difference +hygiene +courthouse +Bodnar +hydroelectric +node +traverse +extracellular +appressed +ions +permeability +signaling +Locus +myosin +utilization +viruses +specialising +infectious +grassy +Edgardo +Reese +Pike +synthesize +seafood +Seafood +Hero +ribs +joy +promptly +cruciate +colloquially +vaulted +Darren +Match +idol +Dominique +Rana +FWD +Wheel +Auto +Roads +NIT +Saul +dismissal +Favorite +renew +Sage +Blair +neutrality +planes +ejected +trunkline +Prescott +BBQ +merchandise +Thurston +Pierce +Yelm +BNSF +eastward +lane +Norton +Ruhr +Welch +Paramount +Evo +Friedman +distinctions +bundle +Proponents +socioeconomic +coincided +stipulated +CE +comparisons +Meetings +Want +hunters +shirt +betrayal +emerges +viewer +packing +Variety +Sao +Manoir +haunted +witch +cerebral +naturalist +Alto +snakes +Luftwaffe +Paulo +Vocal +Creating +Minamoto +Moses +Pilgrim +Goryeo +rid +sailor +designating +pants +mandating +compelled +spacious +fluency +Kurdistan +judiciary +salinon +Zion +nobleman +denies +socially +evergreen +overlapping +ridges +Highlands +Atmospheric +mathematician +wavelengths +algorithm +replaces +lag +PBS +Dynamo +consultation +doctrines +paramilitary +obviously +ALGH +minimally +marrow +Kara +halfway +jumping +predecessors +Kingston +Islamabad +boosted +Spin +Dewan +mandated +Strand +midnight +Scheppers +Holiness +Therapy +endorse +Constance +juveniles +oath +Businesses +correctional +Ethics +Stafford +Reviews +Orient +tug +grammar +Comprehensive +phonemes +discrete +Iain +Graeme +Yu +Colleges +halls +orchestras +Flat +Penn +Translation +Finding +purely +Willow +infect +predating +dispersal +pigs +footwear +halting +Palais +finalist +southward +statesman +Yves +Premium +screens +Moment +evaluate +concave +Crest +Reinhardt +BT +murderer +Products +spark +Rupasena +dramas +Munch +Candido +baptised +rector +Gunslinger +Morton +mainline +irregular +postgraduate +conductive +collects +default +undergone +FIVB +federations +Satyagraha +Abdur +disrupt +medicines +SNS +anthrax +stacked +novice +shorts +Handbook +Quentin +Track +accelerate +entrusted +resisted +disliked +blank +Seats +Councillor +Ferris +Yangtze +fourteenth +interacting +Bingel +extensions +flotilla +Auxiliary +stakeholders +Lauren +lone +stomach +bioavailability +caution +lacquer +Lima +swimmer +Ringside +Consumer +derive +effector +interfere +rebellious +Certificate +slab +repetitive +Dzhanik +scenery +UKFast +tech +Talent +imbalance +recognise +Humanities +signings +Brayford +Rooney +rumours +Hove +Bywater +squads +owed +Tomasz +Paget +clients +Agostinho +Santos +digestive +taxa +shrimps +travelers +Gifford +topical +successively +Partick +MPs +Jardine +Taiwan +Spartanburg +fielding +kilograms +Seller +Vampire +Fan +transit +piobaireachd +marches +standardisation +genres +fragmentary +lyric +dictionary +MacCrimmon +pipers +MacCrimmons +transcriptions +ornamentation +Laird +fiddler +highland +Alasdair +Falkirk +normative +Recordings +strung +mobility +drone +Cerdd +chanting +Jonas +Rookie +Chez +Donna +documenting +Erinsborough +Canning +ballad +aspiring +Atari +molecule +degrade +Pseudomonas +anaerobic +tandem +Midtbyen +Vesterbro +cyclists +Infant +playground +Barnard +classed +Ivo +Dre +Sandoval +Touch +Noroi +nameless +chronological +pseudonym +Sivasegaram +Wollenberg +afforded +Chambers +Lawyers +bias +Practice +Dakar +tides +supervise +Kipling +Hopkins +Larco +turf +Maggie +Deuker +clarinet +Aquatics +Oslo +Parganas +subdivided +Phillies +redshirt +reclaim +pig +attributes +mock +minaret +Masahiko +rematch +pinning +BiH +prostitution +onwards +Patni +Embryonic +aeronautical +MLA +underside +margins +dictator +Natie +Jules +Pilot +recycled +Protective +deadline +waterfront +overtaken +Plateau +lent +attitudes +Glacial +Meteorological +Associations +WTA +WIVA +Blyth +Income +chants +neighbors +behaviors +cruising +Mina +astronauts +catalog +doll +autograph +strongholds +Perfect +organizer +Bret +bordure +confiscated +Tuas +Revierderby +Issues +Mariyam +Douro +Idaho +Certified +Raised +fathered +MW +EGAT +Architecture +Laboratories +Sediments +Prominent +Polonia +Weber +separates +streetcar +Tulsa +Seema +Directorate +fertiliser +Constituent +Idrakpur +Redlands +rainy +tectonic +Cabrera +WPA +flexibility +stochastic +Jesper +Genevieve +Extol +Quincy +Sword +Kyoshiro +SHADA +Galicia +Susana +Moorish +Asturias +Os +Artists +tonic +Phoenician +repeats +hallmark +Rapture +voluntarily +oversees +Durban +camping +closes +Bring +Jhalakati +Sadar +Tales +JGP +Singer +Koehl +publicity +gravity +reversible +Bhakta +Naicam +Marquis +overthrow +Cyrus +dwellings +sewage +tradesmen +UMass +Boss +influencing +Colleen +raids +Waters +Plato +biographies +humanities +Nacional +Torch +Inc +Gonzales +phosphors +LEDs +waterline +boilers +boiler +searchlight +Leipzig +cardinal +Unified +Author +caters +CPR +Bolivia +Hawks +Poverty +ineffective +Copa +grassroots +Rally +Carbine +McNamara +Silla +Cruises +essay +Persistence +descriptive +dissertation +Ernakulam +commentaries +typesetting +servers +fractured +convict +shipment +dispatched +gruesome +classify +Shockley +Notkin +Jarrow +Vikings +Forth +oceanic +Younger +plains +nationalized +Parkin +Sedgefield +Mosley +roundabout +Storage +Juventus +Inter +conquering +nowhere +offshoot +GCSE +grooves +invariably +laugh +blew +approximate +Stakes +Lunta +Westerly +Bilbao +Marty +Northwood +Evslin +Suresh +closet +incorrectly +Katipa +raped +Detachment +Sabine +investor +punched +debugging +standalone +BIOS +bachelor +boathouse +Wright +biplane +Addo +Hamraia +Ville +ensued +campaigning +NDEC +operand +circuits +toll +CACs +gecko +boehmei +hatchlings +Hermes +badge +rowers +Joanne +Brackeen +Deer +shed +Chaim +Hebrides +eclipse +Aunts +Dahlia +GTP +translocation +UCD +Jada +Katherine +Southerners +misconduct +Retirement +Greenspun +Hartstonge +Palatine +projections +Brewing +promoter +pleural +Nets +Summerland +Crazy +chant +Voodoo +Cleven +Grimsby +treasures +telephones +LIHTC +foul +chariot +constituting +Ganesha +Hanuman +Olson +palate +molar +prisms +Marqise +Amir +True +epic +biographer +Chatelier +Havez +dummy +dull +Animated +memorize +Companies +semifinal +communists +rectifier +handover +casket +Sinful +Postal +negotiators +grenades +protocol +McNulty +Wiss +herd +Gerrit +Misia +Oricon +metaobject +Kerburan +Reconstruction +Blacks +Alemannia +Alemanni +ducatus +Howse +Polypodiineae +Allegiant +UNLV +Migo +basins +reservoirs +boating +canyons +floodplain +lowland +mapped +angered +Kogi +penetrating +flesh +caulocystidia +Eat +Orgaz +Eberstein +bloc +Twiss +educators +hourly +Leroy +mogel +Commandery +Stadt +Zu +Readick +Bukhara +Math +coincides +Zitzewitz +finch +Kyle +fisheries +Knesset +criterion +cords +Ascomycota +phylum +Katsu +burgstall +Voter +immovable +Sachsen +Twain +Raton +montereyensis +Styela +Sennen +Romero +Fowler +orbital +Tsinghua +ILO +upgrading +dialects +Haukr +Waterways +Drainage +Lock +astronomy +eliminates +Turkmen +Henshaw +Andean +mjosi +Morrison +sheath +SMA +Maidment +vertebrae +Kon +Westgate +coagulate +Pamlico +alpha +brake +Lockheed +Folding +foldable +Matches +Tiger +revision +Anatole +Southside +Paulson +Wilno +Bao +reformist +PEN +Sylwester +Amzel +Sheet +subgroup +Burbank +Ecuadorian +Fernandina +Volcan +goats +Chelston +ITW +authenticated +ESPN +Ava +mLiteracy +Takahashi +Bedrock +Shale +Hyper +Atewa +triglyceride +imageSee +SDNU +Bokenam +Hody +Mingus +Gunpowder +Jarrettsville +Medicaid +Togiak +Overdrive +Taxation +Dalej +jest +Nuh +BVIS +exposition +GAAC +Kroger +reliever +overcrowding +Caston +Tift +Tifton +Scientology +royalist +interns +Wae +Vectrex +futuristic +analogues +Tuen +fruiting +electrons +flagella +Quorum +Sheeran +assemble +Tables +condominium +Getah +Nkomo +Sherrwood +insurgents +SuKarne +Giroux +FSG +Qajar +Concerned +KUUU +Tharaka +Bitcoin +Paint +Adi +TF +Rgt +Guan +Taira +Laggan +graeberiana +residues +Buckston +Madding +Crowd +Dronne +Zabinas +Weisheimer +progenitors +Windwalker +Scottie +Malone +Conde +Yakima +Trumpeter +tundra +Beryl +Mimes +Chairs +Systrip +dulce +dehydrogenases +glutamate +dehydrogenase +Thiele +Ahaz +Read +mutually +embarking +cassettes +Talbot +Twelfth +bleak +reuniting +Joanna +Harmony +paperback +Unable +prehistoric +multiply +stranded +Disaster +enlists +galactic +surprisingly +abruptly +spaceship +declines +Frequent +noise +inadvertently +rays +showrunner +Freeman +Warwick +operas +incidental +Rory +catalogue +bodyguard +Pleasure +Audio +PET +iOS +unhappy +Character +predated +Reg +replicas +Yorke +Plays +introductions +chambers +expel +agenda +standoff +Compromise +weaken +cabinets +legislatures +unicameral +trades +ventured +Loving +Norrmalm +dominating +masculine +racquet +punch +muscles +hindwings +crystalline +atoms +rotated +nickel +implications +sliding +grafted +elastic +reactive +dynamics +optimized +AllMusic +autobiography +Outlet +Sex +Conrad +FRS +cigar +Jaeger +Tasmania +firstly +Fyrby +curves +boulder +repeating +Stora +Occupation +Clear +contingent +turnpike +snuff +Breakheart +Civilian +Reservation +refinery +quarterfinals +Pines +northward +Toponogov +Tomsk +Tierra +ideology +Cantabria +Cantabrian +hardy +importation +acronym +finances +Cullen +triumph +Units +customary +metals +indentation +Squad +Aquaman +Helena +Dwayne +semifinals +scattering +MHA +Successful +Peters +villain +accusing +Devices +gardeners +Bothies +Nordic +derivation +bury +salmon +Bothy +landowner +Marion +airspace +harmless +fence +fences +reclaimed +Corendon +announcer +fingers +clown +trainer +mentors +Babes +pretend +Crusaders +relocate +Democracy +Honduran +Tell +blasting +Incunabula +Artificial +Twin +Fact +techno +weird +Raggett +fascinating +revisit +Booth +Journey +Drum +dye +chemically +sunless +forensic +coloration +Phi +Newark +classifications +deciduous +endorsement +Terrorism +Judy +policeman +sprit +Armando +priesthood +Prakash +hates +Mumbai +regeneration +RMS +microtransactions +weaponry +Legendary +incarnation +Gun +Royale +reviving +Viktor +flashes +brainwashing +kidnaps +Godfrey +Brigadier +appease +amidst +culmination +souls +Facility +tricks +codenamed +proving +constructing +doomed +ensures +Eurogamer +reintroduced +confirmation +aggregator +sweeping +crazy +cosmetics +grind +lamented +predict +Tobias +Kurash +neighbour +Tashkent +Antalya +trousers +Halal +bout +bend +jam +Optical +guides +fog +adaptive +perfection +distortion +Mercedes +membranes +microwaves +fore +Foot +Suns +sparingly +Bass +photovoltaic +Congregational +Sister +heralded +contributes +mercy +assisting +pioneers +aldermen +picturesque +Gods +MacArthur +Sprague +exemption +lieu +employer +Ashley +Rowe +Coronation +wheelchair +torch +whip +Laurvig +wed +Auguste +Empress +janus +kitten +anyway +longevity +pierce +Marathon +Effective +Panther +upstate +ornithological +Ethel +plumage +systematics +recounted +perception +scrutiny +encyclopedia +Audubon +Endangered +Midwest +tracked +designate +Bard +MA +budgerigar +faint +fallow +undeveloped +moniker +capitalize +Portfolio +umbrella +tracking +Ophthalmic +donation +Hamburg +AG +Why +Sustainable +spreads +dominates +Sculptors +Kaiser +Pomeranian +Keble +Jutland +Verdean +Primeira +Verde +Cicuta +inflorescence +Stranger +corn +aerobic +Composed +lo +que +undertake +realization +stabilized +clarify +Kerry +Columbus +Valencia +instantly +coating +crust +Constantine +Riviera +Hart +Divorce +Browne +Hawk +Martins +choreography +Showtime +McQueen +Desk +Meredith +Pamela +Dubonnet +boyfriend +Happy +kissing +Brockhurst +theatrically +embarrass +invites +observes +apologizes +furious +overboard +Articles +managers +Freddie +Oceania +rarer +fragile +nephews +partition +transaxle +Gothenburg +vibrations +detent +shifter +weighs +Summary +Olav +Slovene +Skirt +Hank +Waterloo +DWIFF +festivities +manpower +bequeathed +terraced +Carole +Romantic +fraterculus +fruits +edible +larva +Larvae +pulp +recognizable +ventral +trait +Cassius +frequented +Baden +Melissa +Etheridge +Lucky +Need +rendition +Processing +rink +lured +Hern +Portage +interrogated +owes +installing +plantation +nucleus +Adjacent +photographic +cargoes +bays +Chandler +Maynard +Breuer +carpet +Gertrude +Yukimori +Izumo +harvest +castles +surveyor +tortured +Belarusian +dukes +Partition +duke +Governorate +Ghetto +Gran +Yitzhak +Classification +spectacular +Giuseppe +twinned +Faroe +Henkel +Maker +Britpop +Meet +Forever +Infectious +Nicknamed +dirty +Nationals +racer +Ride +fungal +latex +rubberwood +boron +Excessive +caste +chromosome +curious +Marino +Graphics +Oldham +linseed +garments +Ulster +Joaquin +Texan +affirmative +Hatch +mistakenly +Enforcement +Mixed +whorl +overs +cinematographer +documentaries +questioning +filmmakers +subdivisions +Shenzhen +manufactures +Merchants +detached +justification +summarized +Schluter +Biology +Geographical +Andre +Welcome +couplet +Chance +trustee +firearm +protects +Mitsubishi +lean +myth +Huntsman +trams +Interchange +Daly +meanwhile +testers +Bonifacio +Thanksgiving +singular +Battles +guarded +disciple +doubly +generosity +inhabiting +Thy +Increasingly +Bryan +Ballad +HMAS +Dock +convoys +escorting +recommissioned +Alvin +Confederations +exempted +establishes +Floor +Wagner +implements +Schwarzenegger +Secretariat +Search +Infrastructure +treasurer +orientation +polling +Elise +assignments +Wolfgang +remarried +Separate +Consider +grading +evenly +Amelia +cobbled +please +verbal +plaintiff +literal +kindly +gifts +Allah +norms +prohibitions +counterparts +quotation +conditional +polytheist +rituals +embrace +Speedway +Dover +interacts +Montenegro +Undergraduate +lengthened +nobles +Iberian +taxation +Delegates +resurgence +Eleanor +Lisbon +grandparents +FOX +Sinai +renaming +cheese +pan +monja +wines +Nero +subset +Moritz +reassignment +Puget +Cruise +Yorktown +distress +sighting +flare +Edge +equaliser +Rocker +controller +lamps +blessing +pronounce +Torah +Shabbat +Booke +Edo +Raja +Preserve +fortnight +prayed +forehead +wandering +tales +Prasad +bouts +Cadier +Keio +Breuning +gypsum +Smaller +Segeberg +feud +partitioned +Thirty +beetles +Herrmann +Marius +Produce +Madhya +Mahatma +Nagpur +forwarded +BCCI +budgets +LA +variegated +hybrids +praises +alkaline +frost +Bassal +Archaeological +Antonia +McClellan +depended +ascribed +staple +slips +Ruggles +swiftly +ochreous +brownish +dorsum +Grandmaster +MSc +Rolufs +Engraving +Placer +Accounting +arrondissement +Udo +Dzierzanowski +Markus +intermittently +Wanna +impressionist +proclaiming +Centennial +Jade +Zuleica +commencing +obsession +Theme +previews +Visitors +Juice +Kid +MonashHeart +cardiology +Dandenong +coronary +myocardial +cardiothoracic +vascular +migrant +dysphoria +persisted +specialises +specialties +pivot +intensity +UVB +hinder +deadliest +Surgeon +arsenic +subscribed +sunscreen +advertisement +SPF +bottle +factual +Gaston +Chocolate +jumps +constitutes +Bright +Prem +canned +Swift +Tule +waterfowl +Rare +aquarium +barn +complemented +contention +proportionally +conductors +electorate +inherent +instability +uttered +Jaguars +Karen +Hilda +benign +biting +myths +cottages +Sidcup +vein +vinous +postmedial +Abdallah +Saleh +Kuwaiti +analysts +detainee +reactors +enriched +dioxide +generators +modernization +INES +ventilation +cladding +depriving +defect +priory +exiled +tumultuous +embraced +Cairns +Raffety +Zealanders +graves +transept +Fra +Bernardino +baroque +venerated +worries +Watkins +southwards +Agents +supernatural +overarching +colonize +Differences +informant +forcibly +mistake +guarding +Elder +infiltrate +sacrificing +Fowley +CDC +resurrected +healed +clandestine +sticks +Knowle +Spartan +humanity +reinstated +centering +Bounty +experimenting +chips +intriguing +motivation +sentiment +Den +revitalize +Shearman +summing +baritone +selects +Doctorate +expectation +Evangelicals +foresee +optimistic +Davidson +SunEdison +buys +subsidize +researching +inauguration +Wounded +Thief +Agnes +Clint +meaningful +IPC +Nottinghamshire +pressured +modify +exemptions +Natal +consuming +hemp +regulating +reasoned +Drugs +viability +Myrtle +Clarke +harm +exploitative +Niall +yarn +Bihar +entrepreneurial +realising +shipments +deteriorated +Dollar +epoch +biochemistry +archaic +Witnesses +Fathers +SEF +oversized +HIT +liquids +specialists +Mud +borehole +cuttings +specialization +mistakes +exceeds +WSDOT +trunks +negligible +stamens +Helmet +merits +afterward +eminent +schooled +tale +descend +Crooked +Zhordania +prelate +Troy +Christi +homosexual +Bishops +Arturo +formidable +featherweight +Tracy +handlers +Micky +vs +horrific +surfaced +overlooked +coroner +Moama +predicament +Meistarakeppni +Chremonidean +render +Antigonid +contempt +bypassing +envisaged +mastery +Leinster +Shirley +Integrated +Su +differentiation +preach +Harlem +ideals +Sunpu +stare +scroll +cane +Koga +Renbu +corpulent +Rousai +Shogen +spray +elders +Manjidani +ambushed +butterflies +stabbed +granddaughter +darts +pragmatic +symbiotic +decapitated +mystic +apprentice +tricked +preferring +dries +suggestion +joke +swarms +Already +demeanor +proficient +enraged +outright +spoils +slaughter +shogunate +Knowing +favors +deserving +cape +mortally +Treasures +Dickson +Leitrim +Keshcarrigan +speculative +Baass +Poet +Bodily +Osborne +judging +Really +Get +Balsam +baron +Buckingham +excavation +Scandinavian +Gameplay +adjustments +garnering +Nehru +gallantry +Rowan +simplicity +inactive +Beautiful +Tribe +Quest +clash +Southend +vehemently +Mohamed +gymnast +gymnastics +Prometheus +libel +needing +burgeoning +bearer +stamped +Inuit +Niagara +genuine +unfit +Individuals +retailers +Fu +gala +Solidarity +persecution +Gao +Romanesque +Olympians +CPSL +fanatical +Climax +Wolfe +thrust +ligament +provost +unmarried +associative +unexplained +Recently +Elizabethan +Beaumont +memoirs +cured +fascination +pelt +furs +Fashion +byproduct +mammals +prevention +recreate +particulate +stubborn +durable +disclose +distilled +mammal +Barr +hell +heaven +deserve +Experience +characterize +ourselves +biologist +discoveries +Entre +amnesty +Uruguay +deputies +treason +Burkina +interned +indefinite +terrorism +seekers +WWII +incompatible +turban +convictions +marketplace +Arjan +brace +Taittinger +Shropshire +MAGNET +enrichment +Veige +characterization +catalyzed +abusing +Painting +Simmons +Invitational +Adolph +Fountain +Schlossberg +rebelled +revolted +vassal +triggering +entrances +promises +reinforce +intercepted +Rigny +fateful +atrocities +impounded +advising +logistical +Fellowes +tow +springs +endured +dusk +disposed +plotting +commemorated +aromatic +roast +reconstituted +outputs +Companion +Rubber +tire +Masayoshi +Tateyama +sponsoring +dwindling +pride +mediocre +expects +Wade +Thousands +insignia +angular +rookie +promotional +Honors +Uruguayan +pillar +nationalism +Marxist +LSSP +Vivienne +Mariners +Made +Romania +Alpert +Criminal +dire +condemn +propagated +Gregorio +ribbons +flocked +Prevention +Romano +Remembrance +Speaking +hastily +rupture +remembrance +Jana +Sugarcreek +Kettering +Jung +amine +surfactants +pesticides +potent +Results +Galton +Nuremberg +finisher +Associates +Hancock +Mercury +Thornhill +pedagogical +emigration +NYU +Darius +engineered +Hobbie +supervisor +traumatic +leathery +ventricular +limb +swapped +transliteration +Shekhar +Ravindra +threatening +rescuing +glimpse +Kulkarni +Rahul +gesture +posters +Mohan +minus +Bernhard +Vodny +monarch +Statute +gym +Serb +theft +jewelry +sunny +Carrizozo +Initial +Tallinn +Twins +encouragement +physicians +Regulation +mortgage +Makigumo +quadruple +scuttled +Chinakurali +Taluk +Brandenburg +missiles +modelled +winters +sunshine +Autumn +garrisoned +Tabor +consecutively +Ravi +Grenoble +restart +submitting +Mouquet +Promoted +Somme +trench +Ypres +Leading +depleted +Majesty +accorded +breakfast +Missionary +bombardment +Helmer +Crescent +Earlier +outfielder +motorway +guyed +masts +Beside +pointing +vertically +repairing +gable +Exposition +Durbach +horizontally +corners +expressions +outburst +strait +Walnut +Jeri +bug +Katie +DoD +cloud +YGX +prospect +PWS +Ecclesiastical +RAM +classics +Thursdays +surnames +pennant +dotted +toddler +compares +mammalian +rift +Riot +seismic +lowers +insult +decomposition +Danielle +Distribution +Hagrites +ankle +Assam +headwaters +Miniseries +Hard +Conjuring +Running +Byrne +Vera +antimatter +democracy +Fellows +coordinating +Rheinpfeil +conveyed +Hbf +wit +wisdom +proficiency +climax +wiped +guerrilla +exaggerated +credence +standardised +Groningen +visualization +Archives +pinpoint +insights +GLIMPS +astronomical +Make +Crist +Rumour +Mikurajima +Izu +archipelago +phyllodes +intervening +cortical +PM +Larger +cucumber +Callose +conformational +physiological +relays +synthase +encode +Strozzi +Battista +cm +PBA +Clara +listener +Pew +Charitable +pleading +dashed +toe +thanked +Tipperary +Shamrocks +prediction +Coats +stunning +Fairy +Winner +Ljubljana +Wajahat +Jordbro +dropout +contracting +baggage +handing +Bs +satisfactory +recession +intercollegiate +Prudential +Carlesimo +Success +Willard +Bulldogs +Soap +niece +Skye +Rae +Away +Swedes +Spitfire +collided +searches +convoluted +evolve +advertise +Fayetteville +Cascades +splits +illustrator +barrels +Essen +Barrett +economists +hopefully +Episode +Meaning +Empirical +Lottery +salaries +slipped +poorest +admit +systemic +erode +discourse +Lily +invest +groupings +marginalized +taxpayer +audit +checking +Proposition +Blythe +sympathetic +Johnston +Ozokwor +nineteen +Halloween +Dozens +plotted +Choral +Digger +Heikki +Madden +jockey +Roots +skirts +differentiated +Nail +Ahmed +unknowingly +Evin +lump +momentum +memoir +diameters +Archimedes +Danube +asymmetrical +bicycles +cellar +Si +decidedly +Ghiandina +intending +apologize +Tobu +confession +pleasing +Enraged +dives +cones +rediscovered +Dhaura +Tanda +normalized +Oceanic +latitudes +algebraic +perturbing +estimate +drones +Sit +enhancing +streamed +Clause +Reynolds +discriminatory +LDS +Peyote +discipline +inmate +Facilities +transitioned +LGH +Genesis +Milk +residencies +Prathama +detecting +Brent +Kristin +calories +bale +toss +skiing +Fyodor +privileges +Keratsini +curate +Sami +Slip +contractual +instrumentation +monstrous +Mechelen +occupational +Occupational +skate +Chef +pled +Mechanical +Gear +slogan +reelection +replacements +tapped +admitting +rhetoric +prosecutors +Rococo +Springer +disorder +Lanark +Clair +Fiddle +topping +bunkers +Rotherham +arbitrarily +reliably +Brittany +Deborah +Christina +seminary +Represented +alumnus +receptions +gymnasium +Armory +occupants +curtains +Stage +Alumni +Grizedale +Mystic +mi +Towers +identifiable +shoulders +overt +bleeding +noun +resin +fungi +preventative +Guillermo +undermine +Leaves +intervened +allegation +Mirdif +cinema +Krahn +revolving +bursts +quota +Odd +Bilston +Tingwell +hires +fulfil +Minerva +Basilica +hardcover +haul +Copper +Registration +Laos +denote +Electrochemical +plenary +Runner +Database +pawn +Pruszcz +CSV +federation +Rahiman +Calicut +Forward +Andrzej +Stockpile +packages +inventory +respiratory +compiling +Gunther +librarian +Seminar +Frieda +Herdman +manned +plaques +weakness +Tooth +groundbreaking +Ibiza +Serif +kibbutz +Ikhneifis +Myers +McDowell +denying +Luxembourg +Mainz +HNA +Airways +buyer +tenants +Cocoa +Chimera +Mozilla +acidic +generates +conjugate +Terrell +mates +MDMA +mice +avenue +Rojas +Julio +fluent +Royals +Coward +meals +inspecting +infants +indistinct +Physician +classically +worms +gamma +endless +dunes +thickly +Devils +renewal +abbot +Kitchen +Specific +Romantsov +Mariya +Maksim +Agalakova +recreated +dial +inaccurate +Fourteenth +Shackell +Maguire +Glick +undisclosed +contentious +Eastwood +cruised +Legzdins +Hendrick +winless +grabbing +Edwin +plagued +Stoke +chasing +dislocated +substitutes +contraction +dissatisfaction +Sita +spider +crab +biramous +fishery +appendages +Crustacea +Carboniferous +Universities +Kamal +Enquiry +unopposed +realism +privilege +figurative +salient +Telecommunications +borrowing +Boundary +adjourn +Underground +Tehran +Pumpkin +Trivia +trivia +Armored +rockets +Caught +howitzers +Smolensk +Offensive +ceol +mor +denoting +predate +Alt +simplest +siubhal +dithis +Mackay +orthodox +bardic +fiddlers +notary +rubato +Lament +confronting +disdain +piping +Masonic +corpus +Clarsach +interprets +PS +Piping +clairseach +bray +Dant +reconstruct +Eure +lodge +Dion +golfers +Weir +purses +MV +Stark +Shock +Fremantle +tweeted +Forster +teaming +wonder +Paige +Xanthe +Gibbons +Bacillus +Microbial +condensation +plastics +fermentation +Streptomyces +Frederiksbjerg +terminating +handicapped +toilets +fixing +acquainted +Odyssey +Fouad +nad +complimented +Bread +Mazzy +Velvet +drifting +Can +Kensington +troupe +Holiday +Regrets +Nanto +Odaka +disturbing +freezing +Shree +Java +Angell +Thomson +commended +Hartford +preferable +Helping +lauded +Corporate +SNAME +stature +Playing +Gussie +Ramhani +Dubai +Bellanca +Vidale +geology +millionaire +Rother +Relative +Megadeth +nomen +contrabass +Bela +Reza +Shirazi +Hussaini +Cubs +achieves +Melodi +moracizine +parganas +AFC +Josiah +pricing +Underwater +twists +mausoleum +Mustafa +amphoras +Judo +osoto +dan +karate +coffin +tossed +Academia +promoters +compel +intravenous +epidemic +Tuberculosis +installments +Lemmenes +Salomon +disbanding +Brite +reclassified +Stem +Cell +augmentation +Malika +Hasna +Hubert +accessibility +Thierry +gens +Lucius +Depot +caustic +Bonde +Institutions +disturb +poses +Micio +Demea +neglect +Burner +Lina +internship +Stony +potest +articulated +ultra +orchids +Degree +Holland +waited +Gorice +UT +Worlds +Berggren +AIK +dunked +improper +adequately +queer +rejection +Daphne +graders +Himalayan +Puebla +drunken +Ai +crater +recalls +hoarding +stacks +notions +eclectic +Kengo +WWC +NWA +IWGP +belts +differentiable +Indolestes +Palakkad +Legacy +LLC +Holdings +Expressway +Joo +MRT +Lothar +Andhra +deleted +erection +blessed +Xavier +Yasir +dad +Chargers +ace +Jain +Sexual +Traditionally +unnecessary +fencing +Fresh +cheating +User +plaintiffs +Attempts +Spatial +setup +Divinity +Compared +unfavorable +halves +inappropriate +Kunwar +revolver +beloved +Wellesley +Inspectorate +Everyone +Xiaohui +flees +Infotainment +Swindon +Masovian +Gorman +Sampietro +Racoon +sheltering +salvaged +spectators +architects +info +Neighbourhood +Lives +Irizar +DX +Emoh +Christine +hardcore +hypocrisy +Daiei +Shiho +Nobuo +Kyoto +lend +Malaya +Fennell +Bartlett +Lescot +countryside +termination +Germanic +popularize +hangs +bellows +Galacian +octaves +semitone +Ismail +tailor +Goals +palette +Likoebe +prowess +Clemente +Legends +Kodak +blades +Madagascar +signify +novella +Mind +Philosophical +Jayamanne +Sinhala +Annaly +Humber +ISU +Kayao +postponement +assembling +jurors +Speedy +violates +prejudicial +dismissing +investigative +Sofus +Meanwhile +unwilling +Cabernet +Jumping +emergence +worthy +Famine +Brotherhood +Strike +shortages +pledge +Taking +Hibernians +banquet +builder +deserts +globe +equatorial +masses +drier +Sant +Frost +conception +emphasizing +intimately +analytical +qualities +ant +chambered +Torneo +Parejas +Sangre +Oval +northernmost +Ramos +mga +Perez +Serrano +diodes +gallium +GaN +minelaying +conning +muzzle +awaiting +incendiary +Sympetrum +dragonfly +definitively +Scarborough +Jared +Dabney +saga +kits +Sovereign +Bergh +Lecce +Josip +spurred +Investigators +Lakers +Vicentini +vanished +spokesperson +rationing +politique +Siwash +Aquarius +Owners +rainforest +Lismore +Plasmodium +vertebrate +gametocytes +asexual +nuclei +cartridge +mm +Werft +CDF +Shonda +Narayana +Varma +Karikode +Malankara +Moolamattom +Lent +optic +Patriarch +Thelemic +Templi +Orientis +Magical +Gnostic +Thelema +Frater +reprinted +Aleph +Coil +Balance +tonnage +Mall +disintegrated +Guti +slew +cooperate +Mention +Boronia +albiflora +shortest +pallasite +conurbation +Wearmouth +Alkali +Goose +Bolckow +Brunner +Mond +Lucite +ridden +screw +Vehicle +Snacks +plc +Soil +labelled +footballers +Heroes +commentator +Ashes +Ethiopian +coastline +marsh +Truancy +Sungai +Pure +Diggs +excluding +analyzing +Wyoming +Customer +Hennessey +YSA +Zimmermann +Bayern +altogether +Panam +Handicap +Humberto +Appalachian +originate +sparking +bargaining +RN +Nahargarh +approx +Bollywood +Dulov +lacrosse +Lulu +Carrier +networking +ATM +Marlins +Hello +Berne +Herb +catchy +inevitably +hang +Daddy +spontaneously +Byrnes +GAA +Chimney +surf +surfing +hostels +resolving +Beast +Eastlea +Capra +bachelors +fever +Einstein +accessing +UNIVAC +MCP +dialect +rewrite +Equipment +Automation +DOS +processors +contending +VMware +Disability +Recreational +Minges +prepares +underserved +Nutrition +anchors +presidents +Patriot +Stampeders +Hinkley +Omega +Curtiss +Dayton +Squadrons +primitive +bulimoides +av +Malawian +Vicente +Daphnis +hypothous +Gutierrez +eurodance +til +bytes +LATA +CAC +Elisabeth +inhabits +hemisphere +biannual +Sidgwick +Lyttelton +undergraduates +Corpus +subscriptions +Dining +Rotterdam +Ketton +staircases +Porphyrios +Dobson +Controversialists +Kao +Khon +sauropod +dinosaurs +snack +Illusion +definitions +outlying +Holm +postcards +Mvura +Adventist +militaries +Reggie +Stiff +Mating +stealing +Bingo +fusA +undergoes +nascent +Legia +Daryl +EFL +Jundakate +Fed +COLA +Ralston +Lindsey +Independents +rush +qualifies +Qualified +bentwood +shillings +Silverpoint +tap +Mycoplasma +Laflamme +Experimental +Muntu +Cane +Rain +McCusker +ARM +Rattlesnake +categorized +Givens +Findlay +comedies +Farmington +putative +Initiatives +TBS +Ganjam +Arka +placement +Amerigo +vespuccii +chewing +semiaquatic +combines +Lundomys +zygomatic +foramen +predation +Poll +correction +sacks +Sagesse +Lebbiea +Mullingar +Consort +Edmunds +dislocations +applauded +Kiel +Clegg +Genius +Finish +Minh +thirds +Levant +Cristo +comfortably +Category +displeasure +Synchronous +rotates +flux +Otago +Kush +Indica +undermined +Clever +tense +Starr +lowercase +Linotype +glyphs +CSS +automation +thinner +mobilisation +Tripartite +Dinara +brigades +Vis +schism +doctrinal +Alongside +bullets +penetrated +Engagement +Biblical +deteriorating +pursuant +gallon +Watergate +newsmagazine +hardened +Dafeng +Zhejiang +Wenceslas +Lisp +CLOS +Syrianska +pastors +Methodists +duchy +allegiance +dwarf +sourcebooks +Patricius +ferns +divergence +Rebels +Adelson +Khyber +gorge +vault +urbanization +stormwater +reopen +gently +Irwindale +burst +steelhead +prospecting +sluice +Eldoradoville +dammed +farmland +impacts +potato +folklore +swinging +spearhead +Antarctica +Marv +villains +Hood +Laser +Biomedical +continent +Cranach +provenance +micrometres +gill +SF +casemates +checked +fix +precisely +Legions +legionaries +prank +Pecca +Ingleton +UCET +Kanara +Dewberry +Aeronautical +NY +Freiheit +Lichter +Seine +Mensch +Ameny +Ministerial +lowered +Rayne +Chauhan +Bhaiya +EFREI +Doucet +Bach +toxins +pelagic +demersal +Freiburg +epistemology +empirically +Tippett +caudal +Qiansheng +Mensah +Tears +Erysimum +Babylonian +pinnacle +Juliano +Seward +Retrieved +exon +Starogard +capsule +guild +fouled +trays +copying +scarcity +Enigma +ultraintelligent +airstrip +aeronautic +orphans +scarcely +silvery +Metz +blurred +Dorset +Bahamas +Allahdino +Inland +Dyson +Bosmere +attic +Naomi +UA +galaxies +Scriptures +Soundclick +Slantize +Jays +Quarry +RE +Marlborough +detectors +ballpark +Said +Slopes +agglutinins +degraded +Beaver +parrot +Albemarle +Goold +Lely +preceramic +pyrolysis +filament +ironing +wallpaper +Knoxville +Kiltiernan +Devonian +sedan +wedged +tailors +Quarter +Frenchman +Brygada +Cracovia +marshalling +Cauchy +Oskar +Katakowski +shag +Bat +Cellular +symporter +Ferrandino +Nakashima +Norodom +Freeze +McGill +Ogier +millstones +Quoy +disruption +Chelonoidis +abingdonii +necks +Kannan +Kents +wool +Tormohun +Mallocks +Christie +billeted +carbonate +fractures +geologic +Navratilova +authentication +Plus +Roku +Tawana +trailed +Insular +sandwiches +cuboctahedron +Pebbles +helium +Combo +Jedah +Huskey +extravagant +ASTM +simulated +Youlk +tubers +Akwatia +Etherow +GRC +Pterostylis +triglycerides +pancreatitis +Lalande +Anselm +Ajay +Pukhraj +Macarthur +Andra +symphony +hautbois +Corrective +NW +Beltway +Gaza +Painters +Marketplace +HealthSherpa +regency +Turtle +Forgiveness +Sehon +Jacek +Proszyk +Bielsko +nights +sipahsalar +Louvin +Lawler +Sobou +Reliant +USD +CARECEN +GANO +illicit +Hillcroft +Fondren +Concordia +Hurt +zoned +YES +XIX +Ostitto +autistic +WB +UPN +Valenti +Skins +Rath +Kivar +Montrose +Masarete +Yamagata +Mathias +Mate +nucleotide +nucleosides +demon +Sreeram +bodybuilder +locales +dubbing +Norwich +confessions +Tappert +eukaryotes +biofilms +peptides +Gram +flagellum +Archaea +pathogens +specificity +crackers +Filan +Presidents +RPR +Bonnet +Lockhart +Capernaum +Guardianship +Pastoral +Marigny +Faber +Mozaffar +Cingular +Frontier +Clem +Scouting +insurgent +extremists +massacred +Diyala +booster +Edling +Nithi +seeker +Bugti +Bentz +dominions +Poplar +Sastri +Verdeans +Marcoing +RA +cyclical +Gaskell +Raasch +Aye +Kwon +Whitechapel +edifice +Glenda +Taranaki +downy +conservator +litharge +Dyker +Aibonito +Motherwell +Longdon +Seleucus +Wissam +TCR +upregulate +barracks +streamlining +Hezbollah +Shevchenko +Rodman +Bryon +Diving +Wrecks +decompression +Wiltshire +Delivi +Fife +Craighead +elites +Maximum +insolation +Dolphins +Gaye +Bremer +Furlong +Zhigalovo +Moines +arrack +Niedzialkowski +Machnower +Ariq +decreed +Kievan +Khitan +Karakorum +Ariqboke +Kaidu +Ghazan +noir +LMGTE +Tulip +Kaiserliche +Harbuz +ACADs +ACAD +bonding +MCAD +Uzziah +Gurshtein +Aleksandr +Candidate +Astronomy +Mesa +Gloucester +Eoin +compilations +Approved +Tricia +creators +calculations +Innsbruck +Paddy +Eagles +CDs +foreshadowed +Whitfield +rework +Del +Rey +crashes +tiles +wipe +Save +rejoins +Message +happily +startled +Wowbagger +Constant +contradict +humorous +Dixon +Hulu +Mos +Rockwell +Commercially +Portable +Cindy +panned +casts +Schuster +Valentine +edit +stocked +Johnstone +unabridged +Audiobooks +Sometime +Interactive +TKO +Byron +idioms +Zimmerman +tablet +promotions +assortment +buttons +absurdly +SpaceX +Musk +Tesla +nod +bicameral +respects +bills +safeguarding +Rajya +instituting +retention +manageable +Unitarian +Questions +chaos +fiftieth +layman +theologian +eloquent +Except +pictured +sticta +Jurassipanorpa +Jiulongshan +setae +blotches +Sakhalin +kinetic +Observation +idealized +resonance +superconductivity +MPI +enhancements +subspecies +Gabon +swimsuit +Fear +Cougars +Siemiatycze +Podlaskie +imprints +Potter +Image +Speed +Ina +Theresa +Cave +signaled +Horatio +Carslaw +Transformation +geophysical +deformation +Sylvia +recognises +runestone +Runestone +pagan +apple +Arias +denouncing +Tudor +swamps +widened +Gustavus +harness +torso +Selectmen +archaeologist +Robbins +archeological +barber +incineration +clubhouse +Rumney +Marsh +Hammersmith +Melrose +ramps +Logan +moderator +neutrophils +inflammation +chemokine +activation +inflammatory +pathways +Riemannian +Mechanics +regionalist +foundational +federalist +intend +overcoming +cart +Clydesdale +Stud +shutout +plugged +deformed +static +hardness +permits +deliveries +correlation +assurance +Affleck +Margot +Harley +Emancipation +Jai +recurred +Jens +Mahoney +mandap +Stuttgart +PCIM +Honorable +Rik +IGBT +Device +microwave +Naoki +Sakunosuke +Novel +Newcomer +Kurara +wilderness +cognate +booth +upstairs +Zimmer +Alder +podcast +Zorro +tuning +vigorous +Toby +pretending +Autechre +Warp +panoramic +realms +contrasted +clarified +organs +Unione +Sportiva +Lazzaro +Eccellenza +Gonna +Jam +outdated +Train +dyes +keratin +isolation +reagent +latent +fingerprint +lettering +canopy +internment +contemplating +topmast +bowsprit +shipbuilders +dolphin +Chandni +chieftain +Missions +Fans +distributes +backlash +surpassing +reworked +Weapon +customization +attachments +slots +emphasise +narrowing +covert +Erin +somehow +tutorial +resurrection +disapproves +hunter +Alistair +undead +chemist +Unbeknownst +Medusa +renders +deems +Kronorium +Warden +activating +corrupted +Maxis +comatose +perpetuate +vengeance +Victis +Abigail +banished +contradicting +tiers +prequel +Andres +Informer +enjoyable +Battlefield +Kain +grinding +drawback +skins +greedy +Henrik +olympic +Bulgarian +Contestants +Yonbosh +innate +countess +Himachal +hypothetical +wavelength +metamaterials +likewise +bending +wearer +feasible +resembled +Damned +baked +Mostyn +Babe +averages +Conquistadors +Reef +CHICOP +reefs +Brigid +Teresa +Noongar +soup +Minas +Belo +Borba +idols +Tal +Wedding +Parvati +Relay +enduring +interdisciplinary +nurturing +consigned +financier +Isabelle +billed +Huntsville +china +floats +Marianne +Eton +Pitt +Exchequer +admire +Privy +Leigh +naturalised +Larvik +Aldenburg +cadet +Oldenburg +marriages +Dukes +Tufts +victors +Khannouchi +disqualified +Rita +biennial +reintroduction +Murder +anatomy +Mayfield +collaborators +chicks +shines +antisemitic +Cool +Sundance +Pale +Budgerigar +weaker +vent +patches +Cinnamon +lovely +Greens +youngsters +recessive +locus +mutant +fancy +watts +Risk +Padma +Hurricanes +devout +Nashville +Chip +supervisory +Yaiza +Playa +nativity +Blanca +Kerman +professionalism +Miners +seeded +Erasmus +Mid +Pembrokeshire +Haven +Tokushima +painful +Reports +operationally +Freccia +Barbuda +Diablo +Devil +diablo +han +te +fulfilling +disagreed +manifested +Confucians +Meditation +embodies +balanced +frees +Won +abide +goddess +Nate +Xorret +climber +gradients +erroneously +cools +Vid +Kidz +Defender +programmers +Jarvis +Coins +Ages +Website +misspelled +pastiche +triumphant +Cheryl +newcomer +Stella +Jo +directorial +Could +begs +kisses +yes +Twiggy +discontented +brothel +duly +Emerald +crewmen +swam +Ohno +solved +boots +Redd +Hansen +Botanical +Musci +Hepaticae +Botany +Britannia +naturalists +Nathaniel +Ras +Opel +Pontiac +helical +flywheel +sintered +lever +Tension +Livingston +Slovenian +accordionist +strayed +overcame +entertain +Calvary +slipper +impatient +angering +Arsenal +Pei +Bernese +patrician +restructured +elegant +XIV +Wattenwyl +Toxotrypana +dessert +cryptic +Lesser +listened +Jimi +Buddy +Shanks +impromptu +Westwood +Wake +Gore +Inspired +BICA +polo +WPHL +raided +Icon +extremist +seizing +Bantam +Leonid +thwart +Searching +betrays +ruthless +expulsion +Caicos +Chechen +Ames +exchanged +Wharf +Blackheath +Triangle +dredging +Dorman +sidings +enthusiast +unloading +Aggregate +Membership +rebuffed +Irene +Dobbs +expansions +renovations +renovate +merges +precinct +tramway +Northamptonshire +commemorates +Rossetti +decorating +samurai +crescent +token +assassinated +Sawtooth +Custer +narratives +resettled +Annapolis +Mohawk +Neither +relates +equation +piers +seaward +Yiddish +ceded +Minsk +captives +exterminated +rabbi +Khmelnitsky +uprising +establishments +Agrarian +behest +dissatisfied +perilous +masterpiece +Vilnius +Franciscan +CSC +sphere +visa +Meistriliiga +Drelnes +Reisholz +petrochemical +Melody +Partridge +Osman +Woody +promo +Maclure +Mel +Guitarist +Justine +unremarkable +IBMXF +Dirt +sneak +disagreements +Bicycles +USBA +hardwood +immersion +toys +lactose +Joachim +simulation +culturally +Yukio +Dominic +Northwich +Cypriot +linen +Leather +flexible +manganese +capes +brightly +Growing +rallies +councilman +exiting +federally +markedly +exploratory +undocumented +Paso +mayors +tweet +abortions +Immigration +repeal +unlawful +Violent +Waves +Yip +lamellar +columella +Raw +Gillette +Draughts +Hitoichiba +Containers +containers +clerical +Brunei +immunity +crack +cocaine +Ecological +Nazarkahrizi +deviation +magnified +Modular +Warfare +fusions +Bodek +consultancy +Yarra +Coburg +Essendon +Routes +Carthage +representations +antiquity +smartphone +CPU +Wednesdays +Drummer +matriculated +Kentish +brigadier +Ordinance +orator +exacerbated +mankind +Slow +fathers +fostering +Hannah +musically +guitarists +Traces +uproar +RAN +Guinean +scrapping +Rehs +Galleries +Lorient +Samoa +outpaced +neighbourhoods +Overtime +Standards +Wage +Cristina +Travis +Votes +unfair +SB +rationale +abstaining +Tyrone +Orville +Regulatory +Offices +PASOK +corresponds +Possart +aus +Susanna +Venus +Jammu +Kashmir +expectancy +Attendance +myriad +Assurance +Described +commences +evidences +inheritance +husbands +obey +custodian +believers +obligatory +protector +obedience +unfortunate +postpone +Unfortunately +arbitration +dowry +rejects +Give +invalid +attaining +Hadith +kinship +nieces +Signs +praying +vows +ARCA +Fenway +Fairgrounds +laps +Postgraduate +Forewing +blotch +abbots +reliant +ordinances +heyday +tapping +dispensed +sidelined +beverages +breweries +Nemours +Katharine +Kahn +optioned +humble +brushed +wash +Tyrol +vine +Val +cyclist +Commandant +Assigned +Destroyer +Intermediate +Maintenance +transited +Shipyard +overhaul +transiting +Vessel +FMS +Eisenhower +moorland +Holmfirth +Rake +VHF +descends +tackling +Pennines +NES +malfunction +bless +Psalms +nunc +Ascents +pray +posits +farewell +blessings +nighttime +Priestly +invoke +bestow +rite +Prayer +Ecce +hymns +Whole +cappella +mirrors +Maharashtra +Jayanti +manifestation +rod +Vishnu +insulted +Brahmin +vow +begging +Dogs +adorned +Rank +Crimea +brine +secundogeniture +caves +Bonn +Westphalia +tangible +OBE +Honourable +disabilities +Bridget +butter +Dairy +ruptured +Chartered +Vidarbha +Solicitor +advisors +Arbutus +Mapped +Dianthus +herbaceous +Culloden +nice +nectar +thrives +curl +barbatus +geographers +Seville +horticultural +planting +manuals +vegetable +sultan +signifies +fortunate +scribe +quoting +reverting +Mecca +Mesopotamia +fibre +Looking +greenish +Greyish +laterally +ventrally +tibia +tarsal +medial +dorsally +spurs +subcostal +narrowed +blackish +forewing +Abdomen +banded +grandmaster +Interzonal +Sinfonietta +Burning +Yue +multicultural +Faye +Janice +Angolan +Gangneung +Reasons +illustrating +celebrates +Brooke +motorized +foundry +Foundry +Jungle +Colts +Novytsky +Serhiy +refugee +catchment +cares +Treatment +Intensive +Rack +ISPR +ISPRs +Shuttle +Utility +Module +Hormozgan +Moderate +sunburn +melanocytes +darkening +pyrimidine +Fitzpatrick +outdoors +Vitamin +Josephine +radically +constrained +masks +transmit +Totally +Glee +Expanding +Money +Row +Houses +empowered +Tumor +Cars +pencil +arranging +Miercurea +redistribution +Kangaroo +strata +Unlimited +Typically +germination +wintering +walnut +Putah +Duck +relinquished +Voters +flares +rebounded +binocular +resultant +Vijay +resurrect +Sigmund +Eugen +sadistic +elaborated +pathological +Halfway +sorting +frontal +Hindwings +submarginal +writ +Abdullah +Salih +transcript +Mosul +Magyar +MVM +Governmental +refueling +alarms +Scale +Cistercians +brewing +Rite +Buckinghamshire +cornice +Hoersch +Metropolitana +Vincenzo +unfinished +Cesare +Innocent +Como +bust +triptych +XXII +shaking +Pediatric +Diseases +urinary +Torpedo +steepest +warmest +coldest +sinks +believer +mythological +discredit +Throat +resume +hilltop +recovers +Navajo +investigates +experimentation +unsure +hypnosis +telepathic +stung +administers +nanotechnology +purportedly +bizarre +benevolent +miraculously +crying +disappears +succumb +extraterrestrials +auditioned +hairstyle +shaping +meteorites +bees +melted +discontent +takeover +superb +plotline +madness +Talk +Citadel +Ellington +Stan +Peggy +vertices +linearity +criticizes +dispersed +Corporal +Victims +Jess +Sally +fireworks +Moose +Older +disillusioned +Wes +Saini +grace +Berry +Australians +Sachin +Afrikaans +foods +panic +Opium +hitherto +opium +Centres +dispense +legality +Innovations +liability +CDA +cannabinoids +prosecutions +Birdtail +Husky +Lagos +poorer +hugely +contends +weavers +anecdotal +thumbs +alleges +Griffin +dipping +modernisation +gunpowder +Carriage +ordnance +managerial +jungle +interconnect +shutting +Christensen +Falling +Asaf +overlap +patterning +Sandstone +inconsistent +contend +populace +Lavigne +Created +Livingstone +breakaway +Trax +DANCE +mixes +booklet +baccalaureate +Brookhaven +Fluids +hydrostatic +uncontrolled +blowout +swelling +fluids +spends +drilled +funnel +specify +schedules +Proposals +intersecting +Coliseum +corridor +southerly +Lampoon +leaflets +spike +epithet +filtered +baskets +delighted +notebook +bestselling +Tolna +Scarlet +impoverished +diversions +audacious +handsome +adventurous +masquerade +hint +Straight +folks +charm +fuller +YMCA +Quiz +Dinners +Eighth +Mater +consecration +homosexuality +homicide +Faced +vying +dressing +hardest +Brochu +Fantastic +porn +outrageous +Dario +complication +Deniliquin +Opened +upstream +Wentworth +Borgunarbikarinn +Attica +Antigonus +benefactor +chronology +Athenian +Areus +Whatever +Molina +columnist +Rapids +LGBTQ +Wicklow +Valenzana +pour +Micro +Haiguang +Zen +transfers +kernel +Howe +hospitalized +whirlwind +Mali +Basilisk +grudge +bloodshed +bloodline +Dojutsu +adept +brink +reconciliation +impaled +blinded +Kasumi +pity +ample +Udono +agility +bounce +squeeze +calmed +Shougen +weave +Alive +Fate +longstanding +clansmen +amends +commits +wrestle +malicious +awakens +symbiote +womb +expands +confidant +forgiving +deceiving +dumped +miniature +oblivious +mocks +unwillingness +silently +jacket +rage +childish +summons +glowing +stabbing +Azuki +vampire +snaps +par +hawk +warring +heartless +akin +Takechiyo +surrogate +Kunichiyo +Sent +Broadcast +Oka +Lotus +Root +passions +gathers +failures +plausible +sacrificial +Khuzestan +Mildred +Vorpahl +quantitative +Rotman +enlarge +surveying +unusable +Simcoe +aisle +Perpendicular +Spa +Archeological +inundated +beaver +cedar +sealing +abandonment +handheld +Doom +sprites +textured +GBA +Rajendra +peacetime +majoring +Cranbrook +Bloomfield +Aalto +acknowledge +Ceramic +Czechoslovak +Lester +saxophonist +Forrest +Elvin +powerhouse +Solid +heroin +lame +Alexandra +Waliullah +Muhaddith +Salaam +Haji +Imdhadhullah +Muhaajir +coincidence +Nanotvi +Collector +crept +footsteps +astonishing +disregard +Feed +Cheap +burnt +Coat +memorandum +wavy +Laurier +kayaks +ideally +Hume +printers +confiscation +reductions +ministries +Tang +Previous +Reporters +Amnesty +terra +carving +Ethnic +Rockets +CNSL +Catharines +Wolves +Supra +Shooting +Ciannelli +steamship +Bette +guru +Visit +abstracted +Alejandro +goalless +Gorge +stringed +Migration +forestry +wholesale +CHF +cues +stimuli +cue +droll +drolls +deranged +accessories +Taxidermy +carcass +modernized +Luncheon +teacup +Objects +infestation +Temperature +threaten +dyed +distorted +Cosmetic +saddle +solvents +solvent +nylon +fabrics +cellulose +preclude +rubbing +remit +encased +hangars +straps +fading +routinely +textures +silicone +coloring +convincingly +conserved +philosophers +Aquinas +cereals +cocoa +BY +photometric +brightness +fulfilled +Faso +Jordanian +fines +killings +Ruslan +invoked +condemning +Leyla +ads +Casa +champagne +Surrey +Buena +polymerization +Australasian +profound +sadness +extradited +Lyons +Gaiden +Bandai +Saturn +nineties +Residence +Wolff +mimic +uncertainty +Exhibition +Hat +Ulla +Purchase +Lil +reminder +genital +LST +ambitions +disintegration +despatched +fief +Cochrane +signatories +instruct +dispatch +contradictory +vain +gulf +Ships +appalled +exclude +masked +dismasted +discarded +Sphacteria +excuse +horseshoe +scrambled +fireship +corvette +gunnery +afloat +exploding +entrenched +camped +Protocol +compromised +petty +Carniola +Peanut +dishes +desktop +Educated +Nationale +Chevalier +Agricole +hid +endurance +lifts +newfound +knocks +premise +highlighting +sorted +typhoon +cheer +fictionalized +climactic +Yoji +CBC +shrines +expatriate +emblazoned +upside +sidekick +deserves +Audiences +Mighty +Beethoven +baronet +Calthorpe +fess +lyricists +Gets +enlisting +Schubert +anglicised +matriculation +stirring +Burma +pamphlets +Quit +Kusala +formulating +Alexandre +Mallory +Tied +Fetisleam +McDole +bottles +doubts +Ortiz +massacre +overlaid +transfusions +Communities +urge +poured +sympathies +perpetrated +anthem +Hemingway +Conservancy +Sarthe +Ave +flyer +Mondays +ethylene +polyethoxylated +surfactant +ester +neutralized +formulations +soils +harmed +indicative +fascinated +Oakwood +Sharqi +receipts +Halkirk +modernist +Randolph +untitled +Nolan +photographed +Directly +Mirrors +destiny +Nizhny +mediator +Sidelnikov +Liszt +invitations +Westchester +Tchaikovsky +Violin +fin +Catalogue +Recital +Musicians +Showcase +Unless +electrically +Dorinish +Lennon +Beatles +Pooja +Nivetha +cinematography +transliterated +ARK +HR +Amulya +Nandu +Veera +cop +flashy +treats +dear +Berset +Armasuisse +Hornet +landfall +consorts +Workout +responders +ladders +friendlies +Oct +overlooks +Notice +polytope +meandering +galena +Creed +downhill +skies +Valka +Schengen +Laxman +Kurukshetra +IMF +Col +Obrzut +Belcher +Ma +Tai +Mega +turret +elevate +throwers +sink +bombard +maneuvering +Pandavapura +exodus +Twangste +Electors +Document +oblast +Dmitry +Voronezh +fauna +statues +Leonhard +Pregolya +navigable +empties +inlet +Danzig +Strait +vodka +Trains +Adler +hegemonic +ousted +militarily +ethnicity +consulate +Lidl +predatory +blends +improvisational +Prodigal +Performances +Selection +exponents +Radcliffe +Kerr +Kishan +Raju +Dady +Ngoye +Baer +ACM +cheated +okay +forgives +bolster +medically +quartermaster +platoon +dislodged +Breaking +weakening +overpowered +boarded +Factory +Lilly +Woodlawn +hanged +Ritchie +FRSE +Leonardo +armoured +evenings +Trans +Telekom +nearing +swap +interpretive +drainage +signage +fronts +Humane +pavilions +au +Holman +Wilkinson +furthest +stumbled +inclined +jagged +plastered +Larisa +Hubble +og +Anita +Smyth +decayed +shipbuilder +Severn +Welding +workshop +robotics +Magnet +Greyhound +aggregates +Judges +Appellate +plurality +Marxian +leftist +hackers +Abma +Prins +Sheryl +netted +Runs +Innings +Earned +SO +Strikeouts +forewings +Eriophyllum +planets +Jupiter +Bernardini +Mauritius +Cambodia +Weekend +Rochert +ZIP +Footy +SFF +originator +Spalding +Thel +cream +upbringing +NC +Pearls +thank +imagine +shy +Dad +twins +balloons +sampling +disapproving +gags +despises +Osama +Laden +Hershey +fashioned +CGI +tumors +splicing +encoding +aven +OBJ +Stream +Speleological +brewed +Lancaster +PBX +Experiments +advantageous +shelf +hydrolysis +Energetic +binder +improves +cracking +richness +Axiata +badminton +Jazziz +Humerickhouse +upland +Preparatory +closeted +Owl +Bless +Anthem +antagonist +Liam +blockbuster +Quaid +planetary +civilisation +simpler +Ymir +refine +Astronaut +futures +aviator +Tenchi +alludes +Porta +Describing +lectured +bestowed +Padua +Battaini +jealousy +maid +Ostrich +mini +troubles +standout +Yan +westwards +morale +LTP +astronomers +Wide +handwritten +telescope +archived +degradation +sequencing +deduce +scans +Crosse +Interim +jamming +Elvis +nominees +wetter +administratively +scuba +polysaccharide +soluble +fractions +endoplasmic +reticulum +thickened +depressions +pore +entangled +equating +midpoint +remodeling +shuttle +mediated +Viruses +fluctuate +suggestions +owning +saints +Brewmasters +Lipa +Jun +Susanne +CHUM +commitments +Urdu +Aquarium +elevating +Gayle +ODIs +detriment +Farrell +icy +intensified +knees +Sheridan +Regarded +Christy +Tier +Downloads +Selling +Sponsored +Paisley +steeple +magnificent +Goodman +surgeries +Halifax +Hartleb +Doctors +Shiga +Prefecture +axle +brackets +uneven +airlines +Marina +Downey +Sixteen +Regular +tragically +monographs +Justus +Fredrik +risky +westerly +routing +totalled +Amtrak +Rainier +Hiram +Omnibus +Hattie +Hurd +Milling +Haskell +sewer +Nana +Channing +Vouchers +deviations +incurred +Angrist +compensatory +Scheme +incentive +inspections +parochial +proponents +efficiently +practicable +chooses +Zelman +earmarked +thinker +safeguards +violate +Establishment +precedent +Pensacola +tally +duplicate +DeVos +priorities +Amendments +tagline +infects +accosted +paranoid +tankers +chased +lodged +Bowman +contacting +formulate +drafts +Olof +Enugu +Shan +phantom +Bloody +monsters +imaginary +Trees +undergrowth +pagoda +Sundays +Tuscany +Teacher +Artistry +Thilo +Horror +guess +Kaj +Helsinki +Elgie +Willie +Nashboro +Peacock +Cleve +teen +Ewha +braided +unify +diminish +polytechnic +Nagar +freelance +Kurdish +mistreated +incarcerated +reiterated +Yusuf +Omani +Mahmoud +Oman +mediating +semicircle +Yehuda +Golan +dunams +pylon +pedestrians +unearthed +accompaniment +charming +il +advises +astonished +defends +excursion +panchayat +analyze +orbiting +Explorer +reflectance +latitudinal +Rouse +Compton +Tucker +vegetated +exploit +assessments +intrinsic +variables +Almond +aspired +RIAA +Gamble +Uptown +Eli +Booker +verifying +recorders +adherents +polygamy +outlaw +paragraph +DEA +perceive +forbidding +warnings +Corrections +Merrill +remixed +Minasyan +Osteopathic +Deaconess +Brain +Transfusion +Safe +Binz +Uetliberg +Toomey +tattoos +Competitors +Assault +logs +qualifier +sandbag +sandbags +progenitor +unrest +AO +Gamma +aka +IK +ordination +incumbencies +Synod +Sorokdo +Mainstream +Ghosts +mastered +Giorgio +downloads +Quarterly +Ethiopia +Chicken +Pi +Nuttall +Rudy +Giuliani +Graves +Broadband +unconcerned +ridiculous +RAND +Innate +Magdalen +resonant +Elsie +Benito +align +Hertford +Lambert +Overseas +Ingersoll +compressed +intuition +postulates +enhancement +articulation +Ko +dean +Dieterle +overlooking +Updated +toggle +specifics +Keaton +Belleville +Arrested +illustration +plunging +Kauri +Sweetwater +Waipoua +bark +Roure +Villages +Organized +Gardiazabal +Rubial +artery +Roxboro +Batista +liberals +Maternity +VOX +accomplish +cuckoo +wasps +familial +Clyst +clockwise +coursework +Minnie +granary +HTV +bulletin +Committees +overturn +Namibia +deriving +overturning +Regime +confirms +cruel +Ananda +artiste +Rukmani +Ole +brasiliensis +Bud +brainchild +theologians +arcs +Beginnings +Zoological +MBE +railcar +Distinguishing +stickers +Terra +Universidad +Responsible +Scientist +wagons +Firstly +modernised +signalling +Either +superstar +Cecilia +admirer +Bloc +stockpile +Preparedness +Managed +contingency +caches +ALA +Gates +Chavez +Nepal +straits +Nora +Grain +Gottlob +query +Valera +Milecastle +bacteriophage +Highly +humiliating +Tibetans +Armoury +educating +aristocrats +utilised +pile +guarantees +Kermanshah +Imperfect +foral +Balearic +Municipalities +nationals +Palma +Unity +Canarian +Ruehl +SIL +Aram +huts +absentee +Camel +turnover +Fraport +Etihad +freighter +Antonov +Trading +apron +Landing +leases +Conventionarii +Nativi +Gecko +password +browsing +tabs +beast +Blog +majored +malonate +sodium +deficient +displaces +SA +Salisbury +trainees +Tomorrow +biosynthesis +clinically +carbidopa +metabolized +decarboxylase +psychoactive +mayoral +nam +palaces +Lucho +Bronx +scout +IIHF +Citadelles +Aces +charities +Polska +everywhere +Reid +Kindergarten +certificates +galleries +Rees +Interpretation +mediate +helminths +IgE +lysis +Banff +canvases +Tanami +bowls +Moving +Supported +glaze +motif +echo +Maurus +Troger +Marble +crafted +Garten +Exterior +unprotected +Location +Mosfilm +STV +Elizaveta +crumbling +hairy +pads +Con +Tryggvason +Gail +HRH +boosting +Caring +Sessions +backroom +fortunes +Striker +Naylor +debuts +expiring +rumoured +expiry +Watford +Inverness +Reflecting +internationals +thumping +expire +pound +stoppage +scour +Croft +thigh +matchday +appointing +Argonauts +MPLA +Malacostraca +Crustaceans +taxon +Pancrustacea +sessile +unchanged +biomass +barnacles +glands +hatch +ubiquitous +salinity +krill +zoea +precedes +spikes +Cambrian +Burgess +crabs +bony +Cuisine +Quetta +Balochistan +Eureka +Auza +Talsi +catering +retreated +Govan +reselection +Libyans +Chiayi +Jabir +unofficially +spanned +Pasteur +pharmaceuticals +rankings +accompanies +Mechanised +Caucasus +vocable +tempo +rhythms +virtuosity +phrases +treatises +antiquarian +Mor +remnant +harpists +lute +Craobh +procure +Raghnall +fingering +McMurchy +warrior +lively +cry +brave +signatures +MacPherson +archival +Watt +Salaman +hardanger +restores +fraternity +recitals +Kinnaird +flautist +disseminating +revisions +affirmed +Coptic +vizier +Talbenny +Listeria +granular +Dalton +harshly +Walkerton +illustrates +Clef +Camacho +Islander +Froilan +Covenant +conditionally +kilowatts +Steph +SheilaTV +Episodes +Partnerships +Maroun +Harrington +Slade +Rebecchi +toes +regulars +Goodrem +showcase +parental +Dutton +ZX +disrupting +faded +excepting +thermophilic +strollers +outsourced +UPS +Torv +pedestrian +Frederiks +tile +Bike +metro +Hitler +renumbered +parked +dormitories +Bath +Anatomy +Cariboo +Statistical +placenames +Diveagar +trick +prescription +tribunal +Confessions +Sal +fist +Request +Colm +Heatseekers +assigns +narcissistic +Buddha +telecast +portrayals +Adelaide +TS +Regret +Riko +escaping +Misaki +communicates +shocking +Hai +Samay +Krutika +forcefully +Surke +Thaili +Rajan +Woda +Kedar +Priyanka +plainly +Clash +GUI +Adobe +LLP +Shawn +Stryker +analyzes +Firm +Sloga +Rasina +Novi +figured +consolidate +Partnership +Drones +Francesca +Were +portraiture +Institut +Bahrain +tapestry +cremated +Aerial +flaps +jigs +Norseman +Emilio +Geophysical +Caltech +Rudyard +Rewards +litter +Cure +Herrera +kilometer +latitude +sporadically +Exodus +sandwich +Mars +Moylisker +Neue +funk +humour +Laguna +Keuper +Spektrum +McDonnell +randomized +Sultanate +CONCACAF +Thoreau +Alcott +Mack +Leiden +Jamaican +NHS +branded +PPRS +Caselle +sweat +metaphor +imitation +Sites +Papal +squared +reliefs +fortify +passages +antique +lions +Revolt +Nautical +Kodokan +judo +Karate +Yamaguchi +consequent +choke +insults +symbolize +seoi +twisted +Kill +JWA +trainings +mouse +Diario +clinch +exchanging +injecting +Gay +Hospitals +Balnarring +equestrian +Tiller +supergroup +disband +Crucified +Moffett +beautifully +Takes +Catbird +Cod +grafting +retrieved +Christoph +transplantation +Fushih +utmost +Cells +paving +aeronautics +Dharmesh +Tomar +scutata +undescribed +Amin +philanthropic +Gustave +tunic +masterpieces +Reynaert +playful +Seghers +lends +NLSIU +Incheon +plebeian +tribune +Destruction +Weapons +mustard +acceleration +problematic +Closure +Detva +redesign +arcades +proprietor +Gottfried +Kiir +Forestry +cultured +Katun +subalpine +Ctesipho +Ronnie +DMSP +kerosene +potestas +maxim +inhibited +litigants +expressly +Jus +McFarland +Instruction +suffragan +graceful +contradictions +ensembles +wa +Vnanje +Slovenia +selections +TEMP +Joneses +Aloysius +Tavorris +gastrointestinal +anus +Parasites +latrines +Renvyle +BSA +Trango +Usman +Tariq +Yamada +Saori +Gonzo +referenced +embarks +min +Maiden +Penrose +anxiety +thrill +authenticity +Items +electronica +upsetting +fairness +Pareto +Suppose +indifferent +Define +domination +infinitesimal +compony +argent +Usually +Nanjing +damselfly +azure +casino +Carano +casinos +Tropicana +Leisure +protesters +disastrous +addicts +assured +eclipsed +Visakhapatnam +Coordinating +Intellectual +Vannes +pious +Wilding +Listen +excel +kidnap +Ayesha +Jeep +Hussain +opt +Advancement +marshy +Cardigan +Malay +posture +kayotsarga +Wessex +criticisms +Dilemma +favorably +excerpt +imitate +poultry +Honda +Lunatic +Lampang +Mai +baht +zoomed +windowing +UI +iPhone +panning +Garrett +reconciling +Khorasan +Basket +timeline +Roughnecks +Swope +princess +ducal +Martina +Chandrapaal +Sujeet +Suddenly +grabs +AHL +surely +Bismah +Scan +Gibbs +borrow +alcalde +governorship +Delcy +suspend +Subahdar +Shitalakshya +pirate +Departments +Chrysobatrachus +grasses +Councils +Cladrastis +biochemical +Morales +Washoe +Seniors +kicker +Damonte +instructors +Prell +Francesco +Via +Frazer +hauled +McGuirk +Englewood +Bidwell +Homestead +planners +inflated +expose +asphalt +predicting +KMFDM +Konietzko +Elias +Ebenezer +telnet +Papers +frantically +doom +Steely +Tamura +Loomis +USDA +cryptostegia +razed +Moors +pitu +Cao +harmony +danzas +Razor +Duran +funky +Bee +Banshees +bankers +solicitors +Contracting +intermediaries +AMAIDI +Nicole +Gardenesque +Zulu +Kaizer +racehorse +basemen +thanking +Westmoreland +PNC +Deakin +Highwood +McFarlane +Absence +copied +Gossett +Cornelia +battled +Ditmar +adaption +repurchase +Harbor +continuances +duces +diligence +initiation +motions +pretrial +omission +justify +Jail +wakes +winery +Bordeaux +Mons +fetched +Petit +obsolete +Radekhiv +Lviv +Facing +displace +industrialization +persist +workmen +Redfield +Proctor +retracted +wealthier +illnesses +Berkshire +Woodrow +Beacon +IQ +Fenian +Emmet +AOH +Boru +liquor +realities +loyalists +affection +GraceKennedy +equatorward +heats +continents +Models +ecosystems +chronicling +anticipate +Sahitya +selecting +rupees +defied +Narayan +quintessential +overtly +websites +Holmboe +structurally +alto +prosecuted +impaired +faculties +obscurity +lucrative +forcible +Rainer +Camponotus +Colonies +stainless +butt +Incredible +Atlantis +Dorada +Tecnico +Voices +ang +Dulang +Essay +Eliza +Maranan +Nakamura +nitride +Reichsmarinewerft +keel +Versailles +disarmament +Ship +Palmas +maneuvers +KzS +Erich +Bofors +naiads +meadowhawks +Naiads +Sherlock +birdwatching +vibrant +TTC +GO +Tap +rant +Jarrad +Providers +pendant +Wanchalong +Lumpinee +Saylorsburg +Vedic +Palermo +surrendering +sidelines +Aristotle +equivalents +Linnet +Swing +theorized +disposing +Thea +Gallo +analysing +inequality +pedagogy +Rani +Consultative +motorboat +Zaragoza +Pick +haven +colourful +sack +Restricted +cabins +schizonts +erythrocyte +Egernia +Mulberger +submachine +Garand +FN +alloy +hinge +battlefield +lavishly +Vance +biased +Dunn +Namsan +maturation +Tomb +mural +flap +livery +categorical +Mellie +Julien +clocks +craggy +Alf +Fusion +Coleman +Palaiologos +plundered +Keezhmalainadu +Vadakkumkoor +Mosque +erstwhile +KSRTC +Sub +Mini +Dhanwanthari +Ayurvedic +Josephs +Illikal +easternmost +connectivity +DV +equations +exponential +Ecclesia +occult +Hymenaeus +Commentaries +chartering +Wasserman +Weiser +Magick +attainment +Guardian +conceptually +Versus +Initiation +Thee +Leaving +Titan +Trilogy +Pengo +shoes +Kazakhstan +redirected +Cyrillic +artefacts +Macquarie +oyster +Deco +CBD +illegally +evade +detector +Miocene +gram +Wearside +Lindisfarne +Cuthbert +Whitby +plough +Friars +Shore +Haverton +ammonia +petrol +Coke +Brine +cavities +Polo +undersea +monster +Coalfield +furnaces +Ewart +coils +estuary +Turbine +Kielder +supermarket +incandescent +Banks +RTC +investing +Aycliffe +Procter +Rolls +Royce +Ashington +Nice +snacks +RF +Easington +cough +Blaydon +vividly +Goswick +Braid +Cypress +Brabazon +equalled +Cramond +unitary +boroughs +butterfly +Unemployment +carriageway +Bergen +airports +Greenfield +LEA +Echo +headmaster +Levels +sorority +ventifact +eroded +sculpted +Ventifact +Mojave +abrasive +psychically +Ernie +Brea +retort +Audience +Rest +Thoroughbred +Emirates +Lens +organises +rename +Claiborne +encompassing +deferred +interviewing +Lietuvos +ACB +Cromer +Deoji +montana +IAAF +Rowland +bubble +rubble +pooled +parity +stern +Menon +ONE +Zonguldak +Coma +Gil +cheerleading +Wants +continuity +Removed +bullying +insane +homeless +Sticky +clever +Dungeons +Kilrush +paddle +shipwreck +tide +swimmers +cove +Divers +racquetball +Merton +hostel +chains +Contra +Schoolhouse +benches +parted +IPO +raping +compliant +Fairfield +Dexter +stirred +Adair +yells +paperwork +predisposition +Bros +mainframes +genesis +misuse +payroll +mainframe +boot +timesharing +BASIC +Supervisor +MOS +SNK +Net +Palo +smartphones +Phone +fraternities +ECTTS +cupola +onsite +Curriculum +offerings +Provost +outsiders +sustenance +faintly +standardize +Langley +Think +Wael +trivial +Hanya +Suspect +fictitious +Veronica +Campori +endure +papacy +intrinsically +Commodores +everybody +Aston +Ripon +Crawl +intellect +Colpitts +allt +Pionki +gmina +Radom +stadiums +Franky +Edvard +microcontrollers +overflow +opcodes +IXCs +subscriber +routed +Catharina +rumored +courted +Evert +Taube +ambition +Dwars +herpetologist +irregularly +colder +Kitson +Gracias +encodes +Varsity +apples +Magdalene +Wren +annex +MCR +toast +JCRS +quorum +Snowball +SCBC +Bumps +Harries +Kaen +Huai +distal +Paleontological +Maha +institutes +Rebellion +Hiebert +Addington +Abramoff +filings +Bute +archipelagos +Lerwick +Lamb +ha +Hoy +Iqbal +promontories +diaspora +Donato +arises +projection +greet +composure +favours +Constable +bestows +wonders +exceedingly +dictionaries +narrates +Inimitable +Feudal +barangay +methamphetamine +quarrel +prokaryotic +catalyzes +hydrolyzes +polypeptide +rotate +fusidic +Staphylococcus +mitochondrial +Stubbs +Qualifying +Shinoda +snippets +accrued +disability +saxophone +newscasts +NFC +Renamed +Menz +Nee +Roughly +songwriting +Kinney +bitterly +Sabbathday +shilling +Covered +Oscuro +Laughter +Barty +Varippuli +wrath +befriended +Bosillio +Clippers +liquidated +adherence +suppressing +tunable +intentional +Frequency +reconfigurability +spherical +polarization +Melinda +Virus +KACC +Berezovsk +crocoite +Isapur +freeway +halftime +Jennie +indistinguishable +coincidentally +Ogou +outward +Kris +Roddy +Melara +gothic +undrafted +Anders +Welser +nuns +sabotage +Biochanin +biochanin +Markets +Connections +TNT +Idris +Wynne +Bessie +posthumous +symbolically +Sanskrit +sage +procession +Thiru +Holocene +Eurasia +Carleton +braincase +enamel +folds +labial +Kiffin +sideline +backs +tallied +warrants +Rashed +Nasr +Damascus +Barnet +Vita +Debra +credible +identities +Seaven +Sting +Yardley +solutes +Dynamic +Nutter +Darin +inconspicuous +Stupendous +Yappi +wooded +tarot +bellhop +lighten +Buster +Beck +crisp +Newcombe +Era +sighted +torpedoes +sequenced +Roadblock +hike +Pagoda +bricks +tractor +peg +Harcourt +Glan +Navigation +Airfield +berth +prevailed +Saigon +Viet +persuading +Believing +infiltrated +outlet +overthrown +electromagnets +timers +cobalt +windings +reversing +brushless +accelerating +armature +Deventer +Cynon +Kirkuk +Chilcot +Inquiry +buds +uplifting +Waratahs +agitation +majuscule +uppercase +spacing +Bulmer +faking +Bold +typographic +minuscule +glyph +capitals +Font +metrics +metadata +PostScript +OpenType +mobilised +obligated +Broz +puppet +Serbs +evacuate +Imotski +Axis +Lola +Ribar +typhoid +rightful +Harwell +courtroom +axe +McLennan +dealer +inspect +undercover +sheriff +notified +Phillip +apocalyptic +cults +HRT +mobilized +Ferret +unprepared +Newly +Arm +countered +bulldozed +negligence +assaulted +disaffected +spokesmen +Schneider +provoked +Danforth +homemade +FLIR +bulletproof +Nichols +snagged +galvanized +Retro +centimeters +terrifying +leaks +Tampere +Ari +TMOU +compass +Jiangsu +buffer +milu +Reserves +tears +absentia +ICTR +contended +Aramean +typified +repetition +itinerant +Seeking +sociologist +boycott +interracial +Israelites +Austrasia +Dagobert +Austrasian +Meuse +Alsatian +Etichonid +mirrored +HortaPharm +Pharmaceuticals +Epic +Franjo +proudly +Rollins +Brewer +Kinema +Citrus +Weston +Gunns +Polypodiaceae +Dodgers +Majestic +Realty +Howie +Backstreet +retractable +Yellowstone +skirting +swampy +Piolo +ranching +watersheds +Saddle +Islip +Soldier +landslides +Rivera +scrub +forks +irrigate +diggings +staked +Conflict +percolate +quarries +Zarya +Tailgate +tailgater +reclamation +batting +Dinesh +altitudes +Stick +proliferation +labs +Kierkegaard +Commitment +Lorestan +Cement +Crucifixion +cheilocystidia +Stein +Olga +ripped +footprint +Waffle +Cruiser +usable +Fitzherbert +Kunigunde +Sikorski +Majewski +goby +Kingsdale +Keld +Doe +steeply +Ordovician +Lubin +nationalised +ventures +bargain +Valls +syntheses +thematic +Ramanagaram +airbrush +Seu +Apostles +Entrepreneurs +Ordinary +Opole +Ashby +Yarrow +refitted +Patent +sq +NAS +lemon +Curt +Siodmak +Sovac +Wuyuan +Breakdown +Lenin +Honour +Digi +RZA +sired +Newmarket +guineas +Gotham +um +ECHO +Awami +Mujibur +Flinders +bribes +Zucchet +Whatman +behinds +deficiencies +Albina +Hales +tentative +UB +Belizean +SAT +Ronan +Cobh +Vallancey +Holloway +installations +Botswana +atlas +Thermal +Dalthamban +Sahi +Jhilmili +Amar +Bahadoor +Pratap +Singhdeo +Samar +triads +Stewardship +allergies +cocktail +oysters +wares +Dani +Karavan +cared +presupposition +rationalism +verify +subscribes +generous +Faust +Darjah +Utama +Bakti +Cemerlang +muddy +Assyrians +spinal +richer +Mandavi +Basidiomycota +sac +recombination +stressful +nervosum +Rivka +unfamiliar +Sudry +Alexa +Reporter +VFL +Wight +Franconian +hewn +majorities +reconvened +Courthouse +abolitionist +polypyrimidine +FSIA +Ritter +proprietary +kopsteini +liberalism +initiating +Fractionating +reflux +Cooney +Quick +Penetration +bigram +Offizier +professorship +Rugians +Rugian +recapture +Baja +Moxy +Hungaria +orbits +Divide +Jiao +dodecahemicosahedron +Magnus +Swazi +unequal +ratified +Fireclown +BB +concession +authorising +duplication +standardization +Geospatial +Consortium +ontologies +Incubator +Snohomish +shortening +turnip +sagas +stabilizing +nitrate +actresses +Rats +Orwell +Mendlesham +Baylham +Caen +Stowupland +overland +towpath +Creeting +barge +waterways +Pipps +Badley +storey +convergent +Monsieur +wig +profiled +Betty +demonic +LPL +atmospheres +dialogues +arthritis +autoimmune +CCIR +Elimination +instrumentals +Grants +Danville +Opening +Mansions +baptism +stegosaurian +cooperated +Wuerhosaurus +vertebral +Dacentrurus +Saitta +Rochdale +neoclassical +Manire +sangita +spectra +hemagglutinins +Audley +Elinor +maroon +binomial +IUCN +parrots +amphibious +constricted +Chesapeake +earthworks +Forts +gunboat +flanking +productivity +Abbas +Homa +molten +silica +Carborundum +precursors +oxide +alumina +arresters +Schottky +MOSFETs +unleashing +Moissanite +jewels +fluorescence +Misawa +inactivating +Convair +aircrews +Keflavik +deter +ON +Bernadou +lays +Topeka +Fighters +TIP +Lathrop +elephant +Monzie +Corona +Fram +fins +Prints +Rev +despair +pamphlet +Cruelty +nationalistic +ragged +Taste +countless +Handmade +Litvinov +Limits +topological +converges +epsilon +topology +Tyrolean +Kix +furrows +Slingelandt +pensionary +Biophysics +thyroid +clone +iodide +Ravera +Myre +meltwater +oni +Kokon +Came +nonfiction +Gracia +assimilated +unification +isolating +Monique +multipurpose +Precious +remade +Prose +Arthurian +handmade +Vermillion +nonsectarian +Beagle +Testudo +taxonomy +volcanoes +phantastica +duncanensis +Genetic +Negra +Breeding +whaling +bromeliad +instant +mutualistic +conceding +Nest +incubate +Fauna +Mahmud +TriStar +Belgrave +Brythonic +Carys +Mohun +Warberries +Brewers +artisans +cholera +Crimean +Conan +Stoodley +Chesterton +respirators +newcomers +login +dwindled +unchallenged +SWD +Pumas +FunDza +Sen +Llanymynech +miga +thinly +tiling +Centered +octahedron +Rex +pistols +delisted +Scrivener +codex +Hikaru +Dusty +combo +Strider +overwhelmed +Rosemann +Gladiator +Seekers +Vazquez +insufficiently +franchises +Silvia +Capasso +Cleto +Esquivel +Birimian +trending +gravels +Ashanti +ounces +bauxite +Neck +Syedpur +Pasir +Teo +Ng +Hypertriglyceridemia +xanthomas +Framework +Shotton +Arnulf +aspartic +Osbern +ochre +Kira +Stowell +Pilsdon +Vernal +woodwind +cor +Wiener +conservatoire +musette +Lorraine +parkland +Skjoldungen +Harford +prelims +NKOTBSB +puffer +Vanderbank +Entities +Stride +misleading +Bout +Moves +Danger +Aiko +Sueur +Yadkin +laboratories +IPL +Thistle +Guevara +Gulick +consultants +McIntosh +Chrysalis +Haydon +Gontarczyk +Reentry +trajectory +AMaRV +axial +Turteltaub +groomsmen +shortwave +Cannery +Gympie +Abd +Alptigin +Tsuda +Egyngolia +Canty +hooked +Dischord +Unitech +Momase +bidding +Altman +renters +deregulation +GARC +Taafe +sidewalks +policing +Demonstration +Precinct +Grupo +promotoras +Braeburn +Pin +ISD +Mandarin +Westside +HPL +Ciner +Moved +Morelos +Coulter +millwright +Lennard +AAF +Tisha +Buffy +Nasedo +Congresswoman +Covenanters +APIA +spear +extensional +Niigata +bedload +Zephyr +proclamation +scrolling +Stern +nitrogenous +Aryanto +scar +Chola +Sault +Ravichandran +prosthetic +Rajinikanth +plague +nett +MYR +Off +unicellular +Chemnitz +Nicene +disclaimer +Tunmen +Kirikiri +lords +geld +Ves +prokaryotes +mitochondria +dormant +organelles +obligate +Whispers +Lands +Admits +fuels +Weibull +Feehily +trended +Wiwibloggs +Streams +Spotify +su +Quercus +Polystichum +Cistus +contiguity +Seminole +Dania +lows +adhered +corridors +Goldsmith +Fortuna +Crassula +Bouchard +Coasters +Xiaochuan +denominated +Calderon +Rociada +Selar +boops +Holtzbrinck +Readers +revenues +Vail +Ameritech +Mobility +wireline +Marx +Ludovika +Carpathian +Skorzeny +FCD +LV +Kryuchkov +retroviruses +ribonuclease +cDNA +strand +insulin +Shia +operatives +Anvil +Courageous +Avatarium +Jidell +Chuka +Azerbaijani +avenues +Zcash +cryptocurrency +bitcoin +Ethereum +Bugoslavskaya +corona +Burrard +Nanaimo +Burnaby +Pen +Kingsway +parkade +Lafflines +Rumble +SkyTrain +Keg +uptown +Burr +Muir +Naturopathic +Pocomo +Ada +Yasuda +Winkler +AECA +Brahmoism +Hemendranath +Sivanath +ropes +Garm +trainers +Trametes +Battalions +Wellingtons +TA +Searchlight +Maloney +Hypericum +delphicum +Arai +Och +Neotropical +Lacouperie +oriental +decimal +Sharpton +Seyi +Marceau +monocoque +outboard +FPE +hairless +carpels +Lighthouses +mothballed +Iinoura +UTC +HDI +Splash +FY +Ricans +Edinmore +Trimble +cots +Lias +Pleistocene +Epiphanes +rallying +Lasthenes +Faction +radioactivity +Debt +Tamar +lymphocytes +maturational +autoreactive +thymic +rearrangement +Matkal +Oblate +Foucault +Nishino +Luariz +Cador +Bigyan +Mohammadali +Khosravi +Buckaroos +Irrigators +Downfield +ablation +climatic +interglacial +LGM +Ferranti +Taplin +whooper +condor +cygnets +Rachmat +Lusatia +Sivagami +Freight +Genshagener +Heide +Pax +Toluid +Genghisid +Xia +Qara +Shi +Heima +Manchuria +Jochids +Ismaili +supremacy +Mamluks +Abaqa +Duwa +khans +Ilkhan +Similkameen +Skagit +Sumallo +Skaist +Inductees +Immediate +Umpires +Conductors +FASA +EgyptAir +Jannarone +railroads +Dreyfuss +Silverstone +WEC +cowry +melon +coregency +Tishri +Nisan +papyrus +Sternberg +Visiting +Haddon +misadventures +constructor +hyperspace +hitchhiking +Beeblebrox +spellings +spine +Usage +Entries +inconsistency +intergalactic +Islington +guidebook +Megadodo +punctuated +nonsense +Phases +NPR +Kingsland +Workshop +Dolby +Souster +Maggs +reprising +Zarniwoop +Lumley +Saeed +Hexagonal +Hawking +inaccurately +plunge +sacrifices +shack +fundamentally +Haggunenon +Krikkit +poised +Humans +Quandary +obliterating +dangerously +Flyer +misses +Quintessential +Hyperion +awaken +Infinitely +Asgard +Unabridged +leatherbound +Krikkitmen +Signature +Segments +setbacks +Def +homeworld +collectible +Ink +Hate +HNBL +Audioworks +Composer +Nullify +Shimon +Pearce +Animation +BAFTA +Hothead +novelisation +layouts +Shepherd +breakdowns +instantaneously +Babel +Appreciation +towel +beeblebroxi +OK +referencing +Utterly +Starship +introductory +amendments +liberties +Weatherill +reconsider +budgetary +frivolous +abolishing +Bavaria +Seanad +centrist +Slave +provincially +championed +gaseous +Frontiers +ecumenical +seminar +Reinhold +Fuller +perspectives +startling +generalization +majestic +curricula +Knuckle +Racquet +knuckle +impunctata +scorpionfly +impuctata +Avittonia +Nysh +coefficient +microscale +micrometers +theoretically +micro +slides +nanoelectromechanical +repulsion +Repulsion +Sumant +experimentally +Argonne +simulating +LAMMPS +Parallel +Homidia +Erebidae +Druce +Observer +Corea +bait +jailbait +Shandi +Susie +Teen +gown +Doctrine +Popes +BYU +Cougar +Fiesta +unsportsmanlike +Clarkson +Suzanne +reprints +Midtown +ionized +conduction +Conduction +Operational +Introduction +Geophysics +Canberra +RAK +runes +Scandinavia +stafa +marga +inscriptions +Vs +Hutchinson +Auditorio +Telmex +Guadalajara +mechanically +Harrodsburg +technologically +ton +tyranny +Parson +Revere +Mosman +tablets +allegorical +Bond +racetrack +Atwood +Floating +MDC +excavating +dredge +Hawkes +Walden +Bristow +cactus +Hilltop +MBTA +SCTV +Andreevich +Abram +Regionalist +splinter +stud +Punch +tractors +insurers +Sox +denizens +Tensile +necking +deformations +dictate +correlates +linearly +mac +Rathanadav +mounds +burials +Superman +Gadot +reprises +Ezra +Connie +Pavlovna +Irina +Daniil +Jensen +Murdoch +BJP +ascending +Dynasty +princesses +maidservant +acceded +Xuan +judgement +Inquisition +repressed +queens +Semikron +Electromagnetic +Microelectronics +Controlled +CAL +packaging +Konan +Mi +sae +NHK +Rena +Katsushika +Daughter +Bothan +hut +etymology +windproof +watertight +draughts +fireplace +spade +perched +Thrower +Martyn +suspense +examines +Roberta +Priorities +mimicking +riff +confides +pantomime +McCann +aspirations +Treatise +Oyo +renounce +Mani +Ibadan +brilliantly +tasty +solos +CMJ +Wax +opining +gel +nasty +Seconda +sawing +Sambenedettese +Coppa +Categoria +refounded +Birchmeier +Malak +henna +extracts +aqueous +Isolation +photoluminescence +maximized +ether +Lafayette +Tau +Triplets +forums +SARS +Subcommittee +Homeland +caricature +topsail +jib +irrelevant +Bortolaso +Kardzhali +Marathi +suitors +gauged +Jayshree +Sudhir +Solo +backstories +predictive +ballistics +teamwork +Automatic +projectile +customized +perk +consumable +Elixirs +Talismans +Raul +Menendez +narrows +equip +ATV +traversal +Immersion +tipping +overseeing +hinting +sparing +Gideon +vampires +instructing +Titanic +Artifact +activate +Delphi +inhaling +zombies +Zeus +inputs +Hoping +Perseus +slaying +winged +Masaki +detour +Apothicon +Aether +engages +willingly +elemental +awakened +Ascension +Elemental +Rushmore +Dubbing +Apothicons +Element +swears +instructs +Siberia +Summoning +resorted +retailer +Harden +numerals +Redemption +lithographs +tiered +purchaseable +microtransaction +Blundell +Ponce +Vargas +refute +disgruntled +duplicates +Vonderhaar +unlocking +eruption +Strobl +TSG +SV +Werder +koresh +Sportsmen +popularizing +sportsmen +amplitude +Anhui +Lobbyist +Kang +Philips +Josepha +Assche +Alphonse +robin +subcontinent +Kullu +Manali +Butyriboletus +cloaked +Developments +EM +confuse +refractive +optics +murky +shrouded +metamaterial +surroundings +metascreen +cancelling +reflections +scalable +overtures +keyboards +Ensign +Menorca +Buccaneers +Tams +highs +Sails +fringing +Closed +Tanzania +ecologically +Biodiversity +Giant +Coconut +repopulate +Mazenod +Maude +boarders +Convent +Boarding +sprung +IB +Mato +Gato +rapids +waterfalls +Usha +Slovakia +Hinnells +nurture +Hillcrest +deeded +Farnsworth +Mears +Craftsman +lecturing +Residency +Helios +Furman +Gregg +Hembal +accidents +widening +Pits +carnivals +motorised +worshippers +harshness +Harriet +Scanian +Siena +Nyborg +viceroy +Kongens +Urne +af +Tinchy +Aloud +crediting +Guangdong +Khalid +Ndereba +erased +Jeptoo +retroactive +Fundamentals +Computation +Whipping +Bogovinje +boosts +Allamuchy +Lehigh +ornithologist +ornithology +falcon +afield +warbler +naively +apparatus +downed +Recognized +finches +Raptor +peregrines +reestablish +Peregrine +hatched +pounding +Antisemitica +uncredited +Waldo +Sight +contemporaneous +Fumero +superficially +discernible +rump +suffusion +cheek +duller +violet +beak +devoid +Papin +breeders +Steiner +analogy +LPFM +WWJL +WMKX +shuffling +Brookville +simulcast +programmed +ophthalmologist +Rajyotsava +Infielders +Transitional +RWE +assumptions +forecasted +Durmitor +Javorak +Bukovica +Maud +Lanzarote +anglophone +Huskies +finishers +Bisson +Davids +peerage +Leonora +predeceased +Silkeborg +Monteiro +shootout +Spartak +Kursk +Volgograd +Niger +Selected +Shikoku +unstaffed +itaku +carrot +shiny +tuberous +hemlock +poison +unsaturated +aliphatic +Occasional +Materiel +wheeled +Trades +Dhankuta +Aeromicrobium +Yunnan +Sven +waltz +anda +visto +es +li +tranquility +inward +Quiet +unbiased +transcendent +Confucius +mindful +Qigong +internalize +thoughtful +Chou +empowerment +filial +repose +deliberation +Anann +Lowgators +Lacey +Frankie +FBA +Keeper +Coinage +reclassification +imitative +Valentinian +keeper +Chow +Salad +immensely +Madam +Violetta +Arnaud +Celia +Glynis +Bayless +Gus +Carne +Vaughn +Lesley +Barre +Wyatt +Maisie +ironic +surprises +rekindle +Pierrette +verge +comical +rigidly +reply +remind +exclaiming +weaving +Archive +Alva +Transylvania +Sopranos +retaliation +ruining +carelessly +Artie +kiss +sloop +distract +broadside +provisioned +Roseau +decks +stowed +Wescott +respectable +unquestionably +chalk +Gemba +Shades +Caridina +hybridization +botanists +Botanic +Towards +Kew +Pyrenees +Bentham +broker +Iyoas +chronicle +Janata +Powertrain +pinions +dampen +whichever +smoothly +Synchronizers +gearsets +detents +vibrating +Castrol +Burmah +validated +Ragnhild +polka +dedicating +Bulge +amputation +gangrene +Patton +Waltz +Tunemixers +Welk +Browns +Parrish +Stole +Weird +Hundreds +Councilman +submissions +Held +Launched +MovingMedia +Inclusion +Trochita +Fleming +magically +housekeeper +undo +insulting +potion +piston +invaluable +Frisching +Quemado +Rough +neat +Murex +tropics +guava +papaya +rotten +ovipositor +intermedia +Eggs +Fly +cherry +morphologically +phylogeny +australis +behavioral +sensu +morphotypes +microscope +maxillary +Thorough +Ry +trombone +Dylan +Regis +Janis +Joss +ZZ +Fender +Stratocaster +Cognitive +characterise +profiles +redistributed +Bestseller +charismatic +UPF +recommends +Aldrich +interrogation +Ilyich +pens +subplot +allude +Grenada +frontage +contractors +shunting +Manufacturers +sleepers +sheds +specials +railtour +traversed +Aggregates +YMLA +Bois +brutalist +Architectural +Curator +recessed +Monuments +Darlinghurst +Rushcutters +articled +SPAB +Naworth +Cumbria +Yamanaka +Sengoku +ornament +Katsuhisa +Confederacy +Maliseet +Acadian +ranger +lest +Katz +skier +Beyza +coefficients +Lists +shoreward +Prussians +Vytautas +Trakai +Magdeburg +Voivodeship +Castles +Rada +Ost +recaptured +APCs +Byelorussian +Surviving +massacres +Barbarossa +herded +ghetto +synagogues +ghettos +Haradnichanka +subtype +Yanka +Parma +Batory +polychrome +Remains +flourishing +Fontana +Cultures +Annually +Benrath +Pixies +solicited +Surrender +Wizard +meanings +Blondie +thirtieth +deluxe +Huge +Prodigy +disparity +fouling +Started +sanctioning +Turned +License +Depending +Cyclisme +Cycliste +FIAC +sanction +NORA +Snap +Transworld +BMXer +placid +yelled +aggressively +messed +Malaysian +grained +diffuse +kiln +shrinkage +Evolutionary +Anthropology +woolly +patrilineal +priestly +discrepancy +determinants +modernity +Haber +heroism +acknowledgement +Spinning +Despair +McHale +Salford +boiled +waterproof +Oilcloth +carriages +boil +waxed +leakage +bonded +endorsing +Raza +boring +Rosie +skipped +matriculating +intern +Feld +Lyndon +Hardberger +Cafe +Butt +Discussion +commencement +confidently +un +bullied +authoritarian +PAC +rectified +Mara +tighter +Erica +Survive +Engineered +Turbonilla +dina +angles +submedian +lip +PFC +Prophecy +Gillingham +Warwickshire +powerfully +Paraguayan +Kaunas +morality +ir +Aspern +Mertanen +Latonia +SZSE +Vanguard +refrigerated +trailers +shred +confidential +VGA +transposed +Contras +shredder +Nicaraguan +Dinner +Sugerman +addiction +overdose +bookstore +FRSC +Zoology +Diversity +Hashtrud +Etta +Wilkins +Rhythm +Fool +stanza +attaching +nut +handguards +avoidance +warping +alters +Shigeo +Saga +Taiwanese +Toyo +Hiroshima +Minute +prevails +Inspection +Anzac +Sturt +Sidi +baths +Qualcomm +telco +jack +subnational +Lot +Taguig +Kwong +Makati +cantata +prodigal +gigantic +Forge +Rochambeau +classmate +Drowne +mournful +gratitude +Camila +Keberle +Kendrick +Copeland +lightness +Borneo +Duntroon +Timor +refit +Naturalist +Ilya +Blanchard +Steevy +Tahitian +reorganisation +Lorena +baseline +workday +amend +Pay +Equal +Debate +Wilk +shameful +veto +burdens +Assemblymembers +Chesistege +ministerial +cortezi +indexes +regimes +reinforcement +SYRIZA +Isolde +Helmwige +Giacomo +Il +dem +Hutt +reconciled +Sikhs +juniors +MacInnes +inquire +Macdonald +gatehouse +Numbers +lately +ATI +PCI +chipsets +Comparison +Crisolli +nikah +Sasanian +Jahiliyyah +refined +dislike +tread +bedding +preferably +sermon +bridegroom +fulfillment +guardianship +bridal +insist +neglectful +arbiter +Mahr +transliterations +fulfills +scripture +chaste +Anyone +GOD +disbelievers +nullify +Forced +Indeed +connotes +Likewise +suckling +allures +ascertain +Sharia +justly +injustice +Understanding +Nikkah +motivate +fasting +navigate +stigmas +Mustang +rewarding +Kern +Raceway +Roush +Daytona +Haas +Marks +Toukouroua +maldivensis +elongate +dashes +stigmata +costa +Hindwing +Artemisia +intermittent +Visigothic +Changing +pledges +enshrined +repealed +usurping +Habsburg +formality +Sancho +Luisa +legalized +Sale +Prohibition +taverns +Hedrick +Belin +Flett +Sammy +Hidden +Lenny +Jacobson +sojourn +colloquial +dough +cylinder +chunks +buttocks +Nosiola +hillsides +dei +Amongst +Pinot +DOC +Kengere +Unconstrained +algebra +benchmark +superset +Bamert +Presley +Ingalls +refresher +Activity +Availability +readiness +Venture +Efficiency +bilateral +decommissioning +Kirklees +Dike +Pennine +Fell +Ramsden +Samiev +LJN +numbering +Septuagint +Vulgate +benedicite +Orlande +Version +enjoined +Spurgeon +midrash +ritually +kohanim +pours +Dominum +Toba +Archaeologists +sobriquet +Deccan +Patwardhan +Nizam +Hyderabad +conquests +Sangli +waning +punish +immersed +scriptural +skulls +loser +Kokkinen +Ichiro +Kagoshima +Oleg +misnomer +sulfate +Zechstein +disturbances +overlying +Heligoland +saltwater +Counts +principalities +Landgraviate +Weiss +Documents +archbishops +Willsch +SpVgg +Unterhaching +Stealing +mandolinist +KBE +accountancy +Butter +Narendra +staunch +statehood +Mukherjee +Vasant +Humayun +aligning +Sheregesh +TOPS +Marta +Gorilla +knob +Carpathians +tapered +brutal +stinking +etymological +folkloric +omits +symbolizes +naturalistic +flats +Middleton +cultivar +agronomic +sakia +stratification +Maceira +agronomy +kitab +agronomist +propagate +agronomists +liking +Umayyad +Improvements +waterwheels +aqueduct +Menahem +fruitful +farmed +proposing +horticulture +widen +Michele +sizeable +screws +Fairchild +Squatriti +concise +usefulness +theses +Cayman +Shining +Thorax +midleg +hindleg +fascia +outwardly +inwardly +protrusion +cilia +tuft +Tbilisi +Candidates +terrible +Eastop +aphids +Treasurer +Nares +Fires +Generator +Discipline +Crash +Alps +Maier +collaborate +Frozen +Orchestras +Sint +Maarten +Cantonese +CRHK +Begin +welcomes +Intercontinental +Cabinda +Chunnam +Stavanger +Noordzee +Helikopters +Vlaanderen +adjourned +Sherkat +lovers +MCC +Lucille +Lortel +Brantley +adolescent +contrived +easygoing +ponder +LaBute +eloquence +resonate +illuminates +insecurities +stumbles +ears +Lyceum +Marin +overheard +Almeida +Billie +Dramatic +Laurin +Jenna +cookie +chocolate +Latta +upscale +Levitin +Chopra +Texans +Sanders +SSR +bandura +Melodiya +angiography +angioplasty +imaging +Acute +op +CT +renal +pancreas +outpatient +mycobacterial +TB +Psychology +Psychological +Eating +Adolescent +psychiatry +cohort +adolescence +referral +paediatric +Payload +interchangeable +outfitted +docking +Logistics +proximal +Huari +Germanium +Gallium +PCB +darkened +Melanin +combats +oxidizes +photodamage +Ultraviolet +ozone +colorless +alleviated +wrinkle +shielding +whiten +Niels +curing +Tenerife +blond +sprays +Travelers +Tracked +microbiology +informative +Tori +Spelling +Unusually +Oroville +Butte +Elm +Opron +Aided +synergies +Marcello +Giugiaro +Sergio +ISTT +Dulce +Un +resurfaced +TMV +Mosport +simulate +dexterity +dice +positional +Blockade +symmetric +asymmetric +Nirajului +trillion +Eternal +endeavors +topple +affectionately +caricatures +Fazio +Glide +Rios +acquisitions +Swamp +snake +rye +uplands +grasshopper +sparrow +hydrological +shorebirds +Spectacular +forage +geese +slough +putt +BL +ambushes +manifest +Delegation +PLEO +Politics +hostages +outnumbered +Excluding +discrepancies +GOP +fovea +deviated +Safari +unplayable +LAPD +deadlocked +Mob +Heidelberg +scouting +Ida +Helene +sexuality +psychopathology +Oral +pessimism +cleanliness +conscious +orderliness +monotheistic +prematurely +Significant +Hollies +Charing +Woolwich +antemedial +whitish +extrajudicial +Serial +Combatant +Personal +Detainee +warranted +Designated +violations +Arabiya +CNN +localization +bipartisan +Fukushima +serviced +MWe +abstentions +Zsuzsanna +Developmental +Eighty +Subject +magnetite +neutron +fission +flaw +resuming +Powered +outage +mismatch +Strict +lux +Presentation +della +Maggiore +Palazzo +Braccio +campanile +Danti +Piazza +Alessi +sala +sarcophagus +relic +Oratory +Luca +delle +Gian +Sala +housekeeping +straighten +pediatric +Permanente +Endocrinology +Interviewed +circumcision +Circumcision +Ritual +Tetronarce +Sermersooq +massif +Courtauld +Frederiksborg +Fjord +skeptic +Myths +superiors +debunk +abductions +secretive +shapeshifting +Gunmen +Soona +assassins +Investigating +middleman +hoax +fakes +distraction +abductee +angrily +Kurtzweil +heals +gestates +culprit +betray +artifact +Kritschgau +explanations +ritualistic +rejuvenated +unstoppable +newborn +hopeful +needle +motel +Fourteen +Tad +Struggle +premonition +foreseeing +motives +precedence +hacked +faked +smoked +audible +Shiban +conceive +Colonists +Manners +mapping +Payne +Starpulse +awry +Iverson +ostentatious +Meghan +Millman +unbearably +Wanting +scathing +Pounds +Alekos +Iliopoulos +Nikos +Fantasy +reprint +Tatum +digitized +variance +pyramid +Spider +Riders +Buguese +Peril +catastrophic +Pentecostals +Emerging +planks +Caesar +Monkey +fundamentalist +determinant +liquidation +insolvency +NH +traverses +GPS +appreciators +tenancy +Contrived +tenancies +landlord +Woo +Evan +Hillerman +breakout +Ojibwe +Cowboys +Rosamund +Noah +thrower +Till +Mahaveer +Shot +Gavin +Basotho +Hazel +vitality +Transvaal +outlawed +criminalised +Session +Pharmacy +Abuse +Trafficking +presumption +invalidated +decriminalized +marijuana +uprooted +herbicides +Mario +cannabinoid +decriminalisation +commercialisation +classifies +authorisation +Sinan +Cardale +eastbound +northbound +westbound +stagnant +plunder +Digby +mismanagement +Calico +Dacca +Irfan +Ashok +industrialized +deindustrialization +bans +handloom +artisanal +TISCO +Ganges +multiplied +Gilmour +unskilled +Encouraged +Stores +hindered +treasury +constituents +Lang +unite +formalised +rims +growths +Ludlow +unambiguously +coralline +Hebei +stereotyped +bandy +daisy +Zec +Chichewa +Catholicism +Kenan +Tetsuya +Komuro +Disco +Eurobeat +PLANET +Gielen +DJs +dimmed +understated +Finger +Ayumi +countdown +Nico +Fourier +admiration +stir +deputed +intensification +fracturing +jets +additives +shear +thinning +viscosity +additive +muds +rigs +predictions +Typical +hydrocarbon +impede +Brayton +Spokane +petioles +ivory +floral +taxonomists +palms +foothills +understory +woven +thatched +accustomed +intrigue +vigor +turmoil +lurking +cleverness +Scrubs +Generally +pledging +Salvatore +punching +IBF +Taliaferro +deducted +Had +regulators +rang +Gianluca +Leija +Mayweather +recuperative +Antonin +strap +Rodrigues +pathologists +Cattet +Forzani +oppressive +Magritte +unmoved +Riverina +Permission +Premiers +Bent +Mildura +Gwen +Lex +Macon +Thainston +KR +Sonic +Bwlchyfadfa +Adelphoi +deified +nauarchos +classicist +Epigraphical +Thera +tighten +Corinth +anecdote +taunted +Cos +Lenz +Mado +Tianjin +THATIC +Hygon +microprocessor +Dhyana +kneeling +Spoke +frail +Putnam +acreage +markers +idealist +pointless +adheres +plead +wields +blinding +jaded +persona +desiring +Kisaragi +mound +impersonating +Muroga +boar +backstory +embody +kunoichi +fits +boorish +silenced +physique +obesity +consistency +deflect +Jingoro +cocky +Jubei +flexing +armor +sewn +slither +dagger +sheathed +sticky +shogun +chieftess +needles +ascends +painfully +tempted +tenacity +lusts +regenerate +reservations +memento +drifted +Apprentice +Chikuma +shreds +compels +sheer +Amayo +smearing +creep +Remaining +dragged +teased +bo +monkey +underestimate +tangled +grasping +stiffened +swollen +antiquated +smashing +interrogate +strands +Kokujou +slice +Chosen +comrade +undoing +hinted +consented +bulbous +expendable +dislikes +secures +regret +sway +Tendai +Yagyu +awesome +protege +rashly +manipulative +depictions +groomed +draped +Scrolls +inheritor +Butterfly +Squid +attendants +Inverted +Halberd +Rinne +Kelley +Scur +mussel +Laureate +Bilger +Principles +Bain +Artesian +Solutions +Peepshow +burlesque +ditches +Grass +toponym +Jacobean +Evangelist +lancet +Bartholomew +Atton +Tunbridge +unpopulated +fringe +Sand +decays +decimated +FPSs +Franchise +ceilings +acceptable +Baramulla +Chakra +Wilmington +Blaine +Mat +CSX +Bingham +Alec +Archie +Bray +infusing +Erlewine +Wynn +unsung +bluesy +shelving +Bit +Tyner +Matador +Gulls +Luton +Leyton +reformer +Vellore +Hijri +Arifbillah +Hijaz +Makki +Hadrat +Aziz +Spiritual +Shariah +Thittachery +Keeranvi +steadfast +calibre +spoon +reproduced +Claims +cereal +Fixed +bogged +Nickelback +loonie +printer +mauve +multicoloured +tints +redeemable +interleaving +tugboat +CBN +refrain +Gunderson +Moraine +Mounted +centennial +orchid +colouration +lithographic +lithography +smoother +Regina +uncut +sequential +transactions +unfold +conscience +Tiananmen +dissent +pal +opposes +Endowment +Release +Guo +arched +embellish +Kickers +Bayly +saloon +Always +Estrella +Featured +Marked +Strange +Din +Mysterious +Luigi +Kildare +Marlon +Epilepsy +VfL +machining +unproductive +Arrondissement +widows +importer +acquires +Perham +Bangor +paralleling +plying +taunting +Lame +Pelts +trophies +predisposed +insulation +Nile +ermine +robes +Surrealist +Object +Example +manners +vitamins +scavenger +airborne +infest +pests +shedding +infestations +fluctuations +residual +fats +putrefaction +collagen +insecticides +daylight +pigments +staining +Nitrile +gloves +straw +nozzle +HEPA +Greasy +Processes +unnecessarily +polyester +polyethylene +facilitates +undue +nontoxic +mouths +safer +toxin +ethnographic +Ethafoam +enclose +fragility +tiled +rodents +Sense +curators +Wendy +tailoring +molded +yak +glued +directive +Banno +Aichi +Knollys +Porthamal +knowable +monotheism +purified +intervenes +tendencies +Judaism +adjusting +truths +Rupert +Dzolo +Dayi +Draconis +emitting +chromospheric +coincident +Pilar +Eighties +inhumane +Loizidou +Ms +illegalization +rulings +Bitiyeva +Robins +Pruneyard +secularism +sharia +spheres +SpyVampire +Rogue +uninfected +alerts +scan +infecting +Spyware +Participants +Mar +hails +cricketers +Ram +Ranji +echelon +ChinaSat +integrates +alkene +alkyne +apology +Marylands +verdicts +motivations +Christchurch +Louvale +Gundam +Abstraction +Everson +Closer +Steinberg +Cartesian +subvert +Responsibility +Guided +Abstract +Thorp +Gottlieb +Sharpe +Forms +Text +Vosges +demobilized +Wine +Schweitzer +vineyards +roofs +Galumna +Harland +Ionian +hegemony +manoeuvre +coercing +fledgling +expeditionary +nationalists +liberate +raged +overran +revolutionaries +cessation +mediation +despatch +impetuous +assesses +brigs +Messinia +impassable +daring +Recognising +slipping +lee +Pappas +provocative +desist +navies +hulls +dogged +Genoa +shackled +involuntary +Capt +Letellier +mayhem +unacceptable +risked +FOR +captains +cutter +inflicting +muskets +peal +demoralised +Varna +jubilant +Buoyed +suzerainty +Constantinople +precarious +Tory +disapproval +memorials +Memorials +Pavan +aroma +flours +fattening +HDTV +GeForce +downloading +unavailable +voiceover +Springwatch +Merite +Bakumatsu +hooded +Mille +mesh +scarf +glasses +reinvigorated +contenders +intoxicated +Yomiuri +bunt +nods +fouls +infield +veers +rewritten +expired +Mansion +Kuno +Graphic +Commenting +Hanshin +Swallows +leaping +Yokohama +Magnum +gripping +slugger +locker +Maslin +CinemaScore +Redford +redeemed +CCTV +Peabody +Abels +Ella +Choate +Rosemary +Jayne +nightclub +spliced +Vanoff +Primera +stints +Deportivo +Eyes +Hector +Samaja +Doric +Bracegirdle +Galle +Reginald +deportation +Colvin +anticipation +fortnightly +Appeal +dames +comique +libretto +Talens +scaling +Wanted +Wharton +Spaniards +solidarity +Koran +Islamist +Puerta +PP +Rumours +Maragall +unequivocally +Instituto +midday +mosques +condemnation +condolence +vicious +weep +Response +Prodi +Schily +assurances +updating +Annan +militants +assert +perpetrators +urgent +alarm +ringing +reawakened +Gogh +Vida +Rowling +Pays +Loire +Aldo +Campeonato +Vacancies +officership +Erroll +Dunsford +Pompano +Yoon +SBS +framed +escapee +ethoxylated +polyoxyethyleneamine +ocular +transient +tissues +endocrine +Gau +Reichswerke +Ministries +eighteens +challenger +Idols +Kapoor +Auld +manse +internally +fountain +Fugitive +pre +Heaton +dispensation +Fartown +Headingley +Evgeny +Triumph +deliberations +Mendi +Aviv +aesthetics +Malyi +Clarinet +Premiere +Salle +EDITION +Recorded +MSR +strikeouts +diagnose +pulse +cataracts +Timi +hippies +tent +Geetha +Haarika +Hassine +Hegde +Murali +Navin +clerks +Aditya +Yasu +ledge +Appala +Naidu +kidnapping +solves +parentage +acknowledges +Aravinda +Sametha +factionalism +Eiffel +Shreya +Ghoshal +Blaaze +digitally +Grounds +Sankranti +unveiling +Firstpost +witty +Rohit +Vaikunthapurramlo +Karthik +mediocrity +Sharma +bang +Luxembourgish +Manche +gliding +Centaur +Aurora +Gripen +Ukhta +subordinated +Cabot +sovereigns +styled +Stefano +routines +Kettlebell +barbed +Sahara +Elevation +MTN +Tularosa +detonation +juniper +partitioning +polygon +polyhedron +honeycombs +Cretaceous +aeromagnetic +snout +Dive +CTI +Kjetil +Aamodt +Moe +yielding +Pskov +journeys +Hobby +Step +kindergartens +Kaithal +Jubilee +summited +Pal +mountaineers +Agra +pronunciation +Pacers +Ants +QEH +Yau +Wah +Kowloon +hospice +Wong +Formed +Surgical +Imaging +Kampon +traversing +reload +aircrew +SBD +PT +Isamu +Mandya +Sudan +Ottokar +Crusade +Truman +Presidium +Facilitated +Leningrad +Sergei +Pionersky +Kneiphof +Dohna +Rossgarten +Oceans +accommodated +regionally +Juditten +Wrangel +Rauch +squares +Interpersonal +backdrop +Centrally +graph +Vistula +Lagoon +Ports +Chkalovsk +Zelenogradsk +Svetlogorsk +trolleybus +televisions +perceptions +Jerzy +contests +visas +stifled +supermarkets +Mervyn +Wicks +safest +densest +Passing +Wards +Bensham +Whitehorse +Potomac +Dennison +Keys +exited +Vanuatu +Steeleye +Tongues +Viji +Bramble +Kind +Jools +Kit +Dom +Flemons +Score +luthiers +Krishnan +Lark +Rise +attributions +Bada +Bhojpuri +Anil +Monika +Anuj +Tiwari +Congolese +fertility +thrilled +hesitantly +aback +furthering +Queer +repulsed +Transferring +dengue +adjutant +Disembarking +Thiepval +coolness +Clearing +Bark +derelict +attackers +Boursies +reprieve +graciously +judicious +investiture +Ammunition +Footscray +valor +boldly +Peach +obituary +Cambrils +Massacre +Tarragona +mekaniska +verkstad +COGEN +cogeneration +proposers +Hutton +Melville +Allende +notoriously +intelligences +Inteligencia +Feller +doubleheader +groundwave +longwave +cage +radiator +insulators +undesired +twister +Steele +Silverton +bustling +masse +blizzard +stimulated +dismantling +stabilization +townsite +Structures +Photo +blasted +visionaries +EX +DG +HSM +rectilinear +Irix +zooms +protruding +Universelle +Adolphe +Chemins +fer +Gare +realtime +USB +FreeBSD +compile +specialise +vantage +horizon +monolithic +Andromeda +Mon +dim +Tek +encompassed +Indrevevring +mergers +Southville +Finds +Confessor +Avon +silting +tonne +coalfield +Opposite +greyhound +Nor +Bradshaw +Gloucestershire +Samir +Orbital +warehousing +footballing +gunman +Fuchsberg +refereed +coordination +Terheggen +Vulnerability +Coordination +EQT +mitigation +bounties +Batting +RBI +frazione +Zanjan +irrorated +discal +termen +Elmore +YG +Instagram +scouted +Gord +Swinford +probes +Nunciature +Delegate +Congregation +MOPS +hunger +Startup +Bakery +captioned +composites +appreciably +Labrador +parlor +enjoying +boyhood +bedroom +misdeed +gremlins +minivan +salesman +assures +scuttle +Chrysler +Millard +flattered +Zippy +enlightenment +epitome +Pastis +Swine +parodies +mashup +Bin +CG +Colossal +Leah +Forkhead +forkhead +shakehole +bends +Downstream +Pothole +HMX +binders +insensitive +Explosive +Thermochemical +Sarasota +Allegro +danced +Badminton +Muse +Highnote +Chronicles +Reubenites +Hagrite +steward +wiping +Slam +matured +Beltrami +coniferous +Norris +Rapid +horseback +Comes +Gifted +Fargo +Orm +Bigelow +Callahan +Nite +Zack +unspecified +Streisand +Jaume +Brenda +terraform +colliding +restock +Problems +delaying +Periods +Planner +Refugees +Humanitarian +Peacekeeping +Isekai +Seikishi +Monogatari +directorship +VAP +Funimation +journeyman +Karlsruhe +disgusted +Venetian +Historia +Historiae +academies +Teguh +Tuti +Indra +Casting +storytelling +Putu +Tempo +Citra +Bandung +minesweeper +uptempo +Shimane +Ichibataguchi +Meyrick +Beiping +Shanxi +warlord +defile +CPC +wildcards +ASTRON +LOFAR +ingested +LifeLines +neurology +Euclid +visualisation +Judicial +Duval +Goulding +Judd +Juliflorae +resinous +symplastic +plasmodesma +plasmalemma +sugars +desmotubule +Flowering +Daltons +constrict +unfolding +tagging +gated +catalytic +macromolecular +defective +Reconciliation +Adhikari +paediatrics +Florentine +Adoration +Hermitage +Glens +cycles +Zenobius +Eduque +Import +besting +awardee +Nair +Arnon +Flynn +Atticus +Freyer +fugue +Switzer +Phyllis +Shaftesbury +jubilee +Ghosh +Musician +conservationist +mornings +intertidal +endeavor +Dhawan +Brix +Tarzan +halfback +rib +tearing +accolades +mighty +Brickley +tacklers +jar +blocker +intercepting +Tuam +brightest +Offaly +Shekhinah +Hippolyte +Blanc +diminishing +stairs +header +Graydon +nicked +NII +Krakar +Rolin +feminist +hotly +Lebel +Delisle +Lucien +assign +Sher +Guldbagge +Okumura +cab +gearbox +axles +tyres +pneumatic +Pancho +Trucks +Convoy +Bonito +Angela +McMullan +Lisdowney +Gymnasium +sullied +Villanova +Invitation +Carolyn +frustration +idealistic +blames +mobster +thorn +Months +Quartermaine +Lila +amnesia +fabricate +Cramer +Willoughby +Ingvar +disembarked +arduous +enlistment +Diver +exploded +Ogemaw +Briggs +ridership +bothered +ordinance +tabled +SSH +Starlight +Downing +Tacoma +McKenna +Tenino +Architect +Federally +Weatherbird +Zant +Flour +citywide +MWB +Medizintechnik +Nokia +Akwamufie +Akwamu +WHA +Blades +Edmonton +Oilers +Paine +denominational +Wrong +incentives +instill +underlie +externalities +Indifference +Hoxby +Voucher +PACES +incomes +SEP +postulated +Parents +concurs +slums +Levin +PKR +advertises +posited +reflective +Scholarships +perpetually +socialized +GI +Pell +defray +disproportionately +prioritize +Eskelsen +curiosity +arguments +discriminating +discriminate +neovoucher +substantive +barriers +justices +disbursement +evacuees +simplifying +Betsy +usher +ingenuity +creationism +Contrary +unsolved +Mitch +disarm +chastised +Bronschweig +unexpectedly +attends +whisks +disrupts +domes +telegram +sketching +responding +Lata +scary +Explaining +Flattery +blueprint +achievable +Dust +Metacritic +Turan +complaining +Believe +foretold +Glenea +Aurivillius +Someone +Kengtung +Marseille +Montpellier +leper +Cour +miracles +Adil +Lachaise +hunchback +costumed +Publishers +Cherokee +Braille +crow +Gschwend +choirs +Hal +Colla +Take +musicality +Poetry +anthologies +Skating +Faithful +Vesey +Midwood +Marcy +Adrienne +Savoy +Malaco +Crume +Affair +prescribed +Womans +shades +blouse +oppression +alienation +collared +outerwear +polish +Awa +Ban +unconditional +suitability +unmarked +stepping +Sliver +cervical +spying +Alyssa +contradicted +semicircles +EB +radii +DE +Bnei +moshav +regrouped +SNP +Teatro +Finale +inn +Una +che +habits +vi +replies +Spiriti +Matthias +Egbert +Yatsuka +Skytree +womanizer +clumsy +impresses +luring +deceived +excite +whipped +fidelity +smiles +entertained +shoe +Sichuan +Farges +inaccessible +Bareilly +Mela +Katse +Sputnik +Cosmos +meteorological +Nimbus +Landsat +ERTS +expanse +dilemma +multispectral +quantify +NIR +mathematically +functionally +pixel +coded +subsections +calculation +derivatives +Radiation +Verba +Dovells +Motown +Highlights +Palladium +Caucasian +complying +packets +sniffing +handsets +recorder +telephony +packet +Florian +Hannover +FSV +Gerstungen +Thuringian +bigamy +Morrill +professed +peyote +Feather +eagles +Charges +claimants +Penitentiary +Asatru +prisons +discriminated +uniformed +Lajos +lastly +Yes +snatch +surgeons +Leavitt +helipad +CyberKnife +malignant +Invasive +Gynecology +cystic +orthopedic +transfusion +Infections +NAT +thalassemia +Voluntary +Statue +Aghasi +Sargsian +Sargsyan +Sihltal +Bahn +grower +Alliant +Affiliate +podium +Fikowski +Sanchez +handstand +Tanja +thruster +burpee +squats +hymenopteran +Stroganovs +Grigory +Terrible +Urals +transparency +isaotakii +Petri +Zai +PECT +Interscope +multitrack +turnaround +airy +reassert +evinces +Rakyat +persists +inroads +feast +pilgrimages +delinquency +Burundi +Consolation +glazed +recipe +suspending +Hendersonville +buyout +unilaterally +inoculated +vaccination +behaved +LaFalce +Hochul +subcommittee +Cons +wealthiest +Jeb +ardent +repealing +ACA +prostate +implantable +deductions +lawmakers +Asked +apologized +ethics +Immunotherapeutics +improperly +nonpublic +email +Gravitational +Extreme +RSC +Attribution +CC +Chains +Emory +Sarnia +Napier +Trunk +Recommissioned +auxiliaries +Proceeding +Mare +necessitating +Rand +Brands +Thunder +quantal +phonetic +consonants +vowels +Proposed +formalizes +nonlinear +aspiration +Ondrej +Paterson +Simone +Ryo +Margarita +Julia +Ju +Kramer +Megan +Cobb +rehearsals +Nippert +Rehearsal +overlook +showcases +TDP +Featuring +Toll +Levy +Neal +Tambor +Tunes +Nemo +Alias +tumbling +historiated +Figures +Swallow +Log +turtleneck +shirts +illusion +overlaying +sleeves +Brigitte +tops +sexy +Sarajevo +Radiostation +IPTV +Symptoms +chlorosis +verb +lesion +ITS +agar +plantings +scientifically +PTA +zoospores +cinnamomi +Feral +Waikato +Hilary +Trials +phosphite +Phosphite +sap +prolonging +Alonso +realist +Octavio +empathy +Alvarez +Steinway +Intergroup +Rougemont +Hillsborough +nestled +Cordillera +Flaveria +oppositifolia +Tanaka +Gerardo +Fulgencio +corrupting +unresolved +fraudulent +insurrection +sentiments +Restaurants +GOLD +lounge +Courant +habilitation +impress +emerald +wasp +subfamilies +hottest +driest +Hydon +Tahoma +Aged +Surgery +Cymru +Swansea +latterly +Broadcaster +Catching +Yiqin +Bai +Regulations +Dispute +enforcing +WWF +Ahankara +Sthree +Sundara +Abeyratne +offence +Kele +serials +Rune +Frankof +gum +novitiate +penitentiary +Jae +lion +Coaches +Haggard +smelter +roller +coaster +Stadler +Circulation +Goods +DSIT +Myanmar +KH +motorists +embossed +Slovenes +Zagal +Electrocatalysis +fundamentals +correlations +thermodynamic +Conicyt +ISE +Opinion +Macrocyclic +macrocyclic +GT +diagonally +Pascal +endings +rook +Voleibol +Ary +presides +Gabriela +Yanli +woken +Abdu +Thrissur +Aligarh +jewellery +convene +Bose +Kozhikode +Posts +Abdurahman +dwells +Oru +emergencies +Inventory +airplanes +shelves +Stations +Superstorm +HHS +stakeholder +antiviral +Kamakura +Hiei +pianists +pianos +Quartette +Librarian +Salinas +Cesar +librarians +tilting +Barracks +Bosporus +USGS +Wien +verification +Logic +Algorithms +Logical +CAV +Distributed +Docan +Rudkhaneh +milecastle +Turret +epithelial +millimeter +villes +greenfield +Bouygues +Cycling +Dazang +Eager +Dzasa +Norbulingka +Calcutta +Sikkim +aristocracy +downfall +demoted +Tsipon +Shakabpa +Hindustani +Clarity +emo +Nash +allocating +Winchell +toy +Councillors +Valencian +Valladolid +Gains +Sarid +Jezreel +Moshe +profitable +Natan +Heinz +proscribed +equidistant +frontline +USAFE +Schriever +charters +Instrument +Parking +Antiqua +maneria +assessionable +Conventionary +camino +scanning +Hyatt +Firefox +AOL +hypermedia +tab +Crimestoppers +hydride +solubility +carbanion +nucleophilic +Protocols +Boland +Felisha +conscription +intramural +appetite +placebo +augment +antidepressant +physiologically +Potential +Across +doses +mania +peripherally +valve +tryptophan +inhibitor +Excess +medications +insignificant +Griffonia +herbal +overweight +inlay +Rattanakosin +lai +Zenobia +Angelica +Milagros +cheat +GKS +Katowice +entourage +Loicq +scouts +Dea +Horvath +Quakers +IHL +hesitant +Stanislav +Flyers +booking +earpiece +Gabriola +gregarious +breaststroke +Kahly +Blossoms +Greer +courtship +silly +Grayson +dumping +daycare +prenatal +revolutionized +hyaline +streaks +ringside +Martial +Attorneys +Testimony +uniformity +McCain +regulates +USBC +brackish +cylindrocarpa +estuaries +lyses +eosinophils +antibody +cytotoxic +eosinophil +eliciting +incubated +catalyze +trastuzumab +monoclonal +polymorphism +Happiness +formative +potter +Takaezu +sensibilities +Curtin +interpretations +Willem +lustres +Warrick +scraping +Queenie +slanting +masking +assemblages +Marbles +Karlu +yourself +Altenburg +Hildeburg +Boxler +Placidus +Munggenast +Holzinger +Hoppl +Socialists +crypt +excised +unbroken +Fayziev +Selyanov +Elena +RealD +multinational +breathtaking +cyberpunk +remotely +Producing +Lisovaya +Taychenacheva +scratch +facades +rusty +suitcase +bubbles +dimensional +Mistress +Sserumaga +KB +businesswoman +Altrincham +welcoming +Olympiad +slump +Riggott +reinvested +departures +loanees +Bueno +expiration +whistle +Caledonian +Pringle +rumour +Kilbane +embryonic +Burnley +Regardless +Window +Ayr +inquiries +laughed +Millwall +exhaustion +reaffirming +hamstring +Priskin +groin +Tottenham +Cywka +Addison +collision +Tudgay +Brett +crutches +spells +opulent +Abaris +Cyzicus +Luanda +wedge +Neto +Lopo +mestizos +disproportionate +arthropod +subphylum +hexapods +Stygotantulus +moult +nauplius +branchiopods +Triassic +Krill +copepods +carcinology +cephalon +ganglia +sperm +oceans +supersedes +mantis +burrow +Oulu +mentored +Uppsala +Khaliq +Hazaras +eyesight +miner +diggers +grievances +unrepresentative +Stockade +Ballaarat +headstone +decorator +stepfather +symbolical +Bharatpur +EETPU +Healey +Militant +Libya +Suburbs +premierships +sarcastic +sectional +Seabrook +Albuquerque +hostess +amazon +Komatsu +Launch +warhead +Naruto +Bogdan +Mogilev +brunt +honorific +Bagpipe +complexities +convey +Related +stylised +progressing +interpretative +Warning +fanciful +MacLeods +predates +appropriations +grounding +Documentation +Garbh +Bag +Morison +lament +prestige +tunings +Niel +Intended +genius +airs +MacDonalds +diseased +disgusting +shrill +gamut +Cheape +appropriating +notations +MacLeod +Gesto +Pipers +ornate +traceable +urlar +cleansing +transcribing +dampening +Ailein +Virtuoso +MacKenzie +bagpiper +Purser +Harlaw +milieu +harps +outlining +harpist +Dooley +poetical +Morsent +Lesens +Maison +les +Caliphate +Caliph +Clavatula +listeriosis +Bartor +cooked +quaternary +reiterate +mailed +Sylvester +Sablan +Benigno +Fitial +Babauta +Investments +pros +TPD +Beauchemin +CPGA +Ranking +reshuffle +Sepp +Jairo +Guedz +Roadrunner +Cavalera +Must +Ruhen +focussed +Zeke +FremantleMedia +psychiatrist +Skip +Stingray +Moloney +Ariel +scammed +Toadfish +Bliss +Additionally +Lynne +Geyer +Ty +reminiscing +Phillipps +Zeppelin +skateboard +Madge +Lucerna +hymn +Polyhydroxybutyrate +biodegradable +monomer +PHAs +granules +Monsanto +Biopol +microbiologist +Firmicutes +Comamonas +Aspergillus +Gower +promontory +desolate +Gamle +Ceres +Docklands +proposition +outwards +prioritized +hubs +taxi +popping +Arriva +propelled +elevators +sidewalk +encircles +Skyline +Wagon +Foch +Compagnie +ravaged +headquarter +Midcoast +Bowdoin +ATEC +Chesterfield +Hop +Pupils +Confidence +Sava +Styria +confusing +Shrivardhan +coconut +turtles +Ganpati +incitement +Slant +Kellman +strut +Bert +advert +Setanta +haunting +harmonica +narcotic +deed +lore +Kazuo +Enix +navigates +Menu +Partway +roam +Nagasawa +chases +dubs +Ushio +Tomoka +Realizing +curse +murdering +fades +Geemu +anthropology +Dil +Mein +reciprocates +blackmail +duping +drown +sorrow +Sivaprakasapillai +Somerville +riots +normalcy +Khai +Anju +Dipa +Sitaram +Kattel +Dayahang +Qarqafti +Frenchtown +Brownstown +interchanges +Travels +MySpace +mad +designates +Application +simplify +Nugola +Locke +Particular +Reuters +hedge +Darlene +Reinsurance +Client +cartel +Entrepreneurship +CFO +Founder +Innovators +entrepreneurship +Lawyer +Jonny +implicit +Taunton +prefixes +professions +Rear +helm +Blakely +Jedinstvo +Jagodina +Radnik +OFS +Tutin +Yilan +Exton +gentleman +societal +Reunited +Madeline +Bassett +Attenborough +Eileen +BFI +Authorities +Forgive +Offenbach +seizure +outlived +CBE +Arunachal +draughtsman +Frits +docks +Norco +Macelwane +approximation +Steinhardt +archaically +pubs +Would +forge +Weald +Merriman +Batemans +Trujillo +Seoane +Avelino +thoroughfare +Judas +BAF +Shaheen +infrastructural +PEC +Eerste +AZ +Jill +Teed +Stargate +Seasons +Ulrich +Ideal +Shiite +Mirza +uncles +Vaqueros +Erwin +remedial +Debrah +fronted +Elk +Brandstorp +Trondheim +Cardiac +Arrhythmia +Suppression +infarction +paradigm +antiarrhythmics +CAST +endpoint +encainide +flecainide +proarrhythmic +Akbar +persisting +singularitez +Houbraken +Purcell +Knox +Uva +Amos +Bronson +Louredo +credibility +embark +shuffled +Bluestars +Scream +molestation +shelved +Ying +Pharmaceutical +appraisal +NICE +Financially +discounts +Borgaro +Settimo +Torinese +characterises +axes +Jaap +handy +selector +Boneh +Chae +Yeon +MBC +Seo +redeem +Wynter +premiership +plucked +fowl +jesters +corpses +sauce +Confronted +Mausoleum +rainwater +Mehmed +repelled +bastion +lime +leopard +Kemal +vases +Shipwreck +quitting +gari +Masutatsu +makiwara +Kato +Maracana +Expectation +challengers +ouchi +ippon +subdue +breathe +chop +biographers +Waldemar +Carlson +vale +tudo +takedown +Noite +Hosted +headbutt +antiretroviral +inhibitors +Economist +Roma +examinations +diagnoses +stemmed +CrI +Apoorva +orthopaedics +crossover +Oyster +Silence +crowdfunding +foreboding +Speakeasy +debuting +Samoy +Nitro +culminate +bandmember +Cornerstone +notifying +toothpaste +Cristobal +Miscellaneous +autologous +adipose +prognosis +frowned +Lift +downtime +Luftwacht +Dholana +Samajwadi +HH +Latifa +Rabat +Sternaspis +Idi +Kisumu +indentured +laborers +Asians +migrating +Fernand +Huts +strives +Wim +Jef +Karel +atlases +depots +academics +Jordaens +Caermers +Ghent +vos +Rockox +Teseum +Ambedkar +UNHCR +Registrar +Salzburg +Representing +Amity +Conventions +Kyaggwe +Quintus +Ogilvie +arisen +plebs +Fiscal +neutralization +simulations +Employee +Peterson +Tipsport +Riddarhustorget +Gamla +Munkbroleden +idyllic +enlargement +Centralbron +alley +pilasters +Gustaf +Ryning +Amadi +Gbudwe +Yei +constitutionality +Sudanese +Africano +Mande +minorities +Moro +borderline +Karakol +Buddhists +Scythian +permafrost +excavate +Petroleum +Gazprom +Altaian +Argyle +Adelphoe +Adelphi +Aeschinus +emulate +decrying +goodness +ut +id +Adalbert +Lucca +regent +chronicler +Marozia +Castel +pillow +Teach +mishap +propellant +loft +senators +Delegata +discretionary +Exceptions +Juridic +Election +Pontiff +Societies +Faculties +Lucie +Ozaukee +Lessons +customize +quiz +thru +Mutrie +Dioecesis +Sigifredo +Noriega +Tijuana +Moral +Ordination +astounding +summon +utterly +Suzy +witless +skip +weekends +Allotey +Bid +Hildegard +SHL +Keeping +Momand +McGinis +Bella +resonated +Memoirs +socialites +linguist +Hempstead +leaning +academically +ingestion +Intestinal +sanitation +intestines +stool +protozoans +hookworm +underdeveloped +tapeworms +Zane +Lowe +Farhadieh +pronouns +mantra +Aweil +Bahr +Parbat +pursuits +Kaghan +pitches +bolted +crevice +Pony +cups +confess +Atsushi +Ito +otaku +ASCII +confessing +reunite +Getsumento +Heiki +concerted +Missing +figurines +Descriptions +petrology +crystallize +Lamont +relive +insecurity +darkest +impulsive +bowels +fantasized +archetypal +antecedents +collectibles +collectors +dignity +Hoarding +interferes +prefrontal +compulsive +Weisman +Ladislav +Accademia +Suk +renaissance +Cello +Inoki +Akira +Osamu +Yamazaki +Countdown +Nishimura +theorems +singleton +suppose +Xk +economies +paly +blazoned +billetty +gyronny +Roommates +Capri +Casinos +Vicksburg +Properties +Vici +Worldwide +avert +denatured +Rajah +Benoi +ply +Jalan +testify +Activities +Appointed +Gauliga +Araku +apparitions +Carmelites +Frankston +sportswriter +rickety +Anant +Sulochana +Feature +humiliation +Impressed +shake +Rauf +Javed +slaps +Moti +screaming +Aja +Re +se +hilarious +Saba +Qamar +Baton +Rouge +Teasdale +Meramec +Climb +inclement +Aitken +Albatros +Lieutenants +Kayotsarga +unflinching +recites +homotopy +Nectan +Augustinian +Hartland +Townshend +Howson +pews +Weekdays +slaughters +scratching +cow +smelling +sawdust +woodlands +layering +Aberavon +MUI +Moulton +pardon +Gentlemen +Moh +Mueang +Nam +snail +zoomable +browse +multivariate +arbitrary +joystick +Smalltalk +Bederson +Zooming +creationist +zoology +BayHawks +Hofstra +TBB +Trier +onetime +retainer +Wanskuck +opener +Whitecaps +Svenska +QPR +Elector +Wolin +Kannada +Geeta +smells +lips +Prisons +unannounced +enquiry +RedHawks +bored +romances +saleswoman +Tian +Pretending +Sahere +Worst +reggae +Led +Shit +adapts +Twilight +Kick +grinds +invective +uncompromising +hopelessly +Alta +wade +Cricklade +Purton +Hajiganj +Sonakanda +Mughals +exercised +trader +Meghna +Ichhamati +Magh +eager +cannons +battleships +crushing +legate +pontiffs +baronies +Tulla +Derg +Woodford +Tynagh +Settlements +Mindanao +Erpeldange +Yas +nu +Rhymes +legume +Fossils +simplex +Smear +Napalm +sticker +Differently +Anneke +Freshman +JC +Mustangs +Champs +Raider +NJROTC +Rocco +Carbonara +Anime +Vittorio +Santi +Maurizio +marque +Madeira +Fernandez +rendezvous +whalers +bump +Folz +fullback +Shibe +Researchers +Curse +Wootton +briefed +chairing +Electrification +Temporary +Garland +Winfield +builders +approvals +tungsten +Atlantica +exposes +GSMA +Vodafone +Sejny +rockfill +impoundment +hillside +Razon +timeslot +Coenraad +Freemasons +Matrimonial +iterative +solver +softmax +Euclidean +innermost +headlined +Uzbekistan +Pennepek +Calvinist +Eriksson +lowbrow +Sturges +amusing +pretensions +Role +hija +burdened +cumbersome +Schaliach +Us +likened +Stepping +jidaigeki +Animeigo +Sleepy +Mask +Tamao +Tomisaburo +Miwa +Michiko +Kataoka +Satsuma +pickpocket +Bunch +Directoriate +Mersin +Gaziantep +accusation +petroglyphs +Cowboy +Barbour +Dartigue +Havea +appellation +Perfecto +Corral +Universidade +Sonia +Hevia +Tejedor +shawm +hurdy +gurdy +snare +tamboril +flutes +Nadie +Remis +Ourense +blowpipe +tuned +chanter +chromatic +asturian +joyful +alalas +undefined +Auglandskollen +Vanessa +Fitz +synths +stuff +dreamy +Escort +Tallyrand +Cale +Horses +Polydor +Showgirls +lush +sublime +melodrama +recalling +Kiss +hailing +jolly +publico +NGOs +Conduct +aspire +nonprofits +commodities +deliverable +branding +Engaged +Thibault +Wadley +conjured +heady +Yamaha +ambiguity +intoxicating +evoking +disguise +Wits +Glove +homer +excelling +sterling +slugging +Sandberg +Wrigley +inning +Coates +Softball +sarcastically +Mets +Rosell +Kananaskis +Burslem +Halving +Bones +Mead +Documentary +falsely +narrator +reconnect +Nutts +Northolt +Hercules +Anodonthyla +Upazila +chimney +Edgemont +formatted +virginal +McCree +Dykema +contender +languished +Neilson +Sandglass +Stoker +Kadawunu +Poronduwa +Jemini +Magul +Nayagam +misgivings +relented +Hadisi +borrows +Fannie +amplified +Cops +Humberside +Cheshire +Mentorn +Isadora +outscored +Magna +Fogapoa +unforeseeable +constitutions +Courts +Factors +consents +Coverage +Sheppard +alibi +propriety +thereof +adversary +ETV +Odia +Tasty +Focus +Utkal +Vijaya +Performer +commendable +Nikolaj +junkie +wardrobe +cope +distraught +whereabouts +haemorrhage +bleeds +curling +directory +Cru +periodically +Placements +vineyard +Franc +Macka +MacKail +Certification +Barron +Bluff +Qarah +Developed +EDS +Geodetic +Cakran +Illyrian +Alush +Cooperative +Gisborne +inventions +bottoms +Kazimierz +Conradh +Hampden +Ely +Pulaski +Patch +Livestock +outbreaks +irony +occupations +Lowell +apprenticeship +Chicopee +Ruddy +DuBois +Carpenters +Ultimately +unionists +dangers +urgency +Tigertown +Connor +exhaustive +Majority +incentivized +fraternal +martyr +comparably +Cause +Curran +Hibernian +Banquet +Alderman +Irishmen +hearts +Elms +Avery +Proclamation +deacon +namesake +Presiding +Houghton +divest +subsidiaries +poleward +thermally +meridional +Halley +rightly +Hou +convergence +thunderstorms +compression +subtropics +monsoon +Surat +Elphinstone +Ashram +Parishad +Bardoli +Sardar +Rajkot +Aga +Jabbar +Tran +Mahadevbhaini +Bazar +Gandhiji +Waterman +metalura +composes +Rolf +Advancing +pneumonia +elicited +precedents +contingencies +ripe +resold +Rapla +Amstel +Benin +nearcticus +oaks +Lapua +Homenaje +Dos +Fantasma +Rudo +Garza +Kraneo +Diamante +feuds +Nuevo +Passengers +Ingrid +Razia +Cleo +crooked +Reuel +Aguila +Daigdig +Maikling +Braga +Isang +para +Coroza +Kabataan +Balino +Iti +Violet +Poems +Goodbye +LCD +Ogawa +Semiconductor +dictates +cruises +interruptions +harbor +NIACC +blueprints +welding +launchers +harbors +recrossed +Crete +Reconnaissance +Funchal +shorted +zur +Guatemala +Leopold +Wever +minefield +Blenheim +Strander +Bucht +Fortress +Raeder +overhauled +refloated +drydock +Werke +clouding +naiad +hooks +ants +mosquito +ephemeral +affords +dragonflies +Varberg +bladder +seabirds +Gesta +Clarks +Marsha +Bluffs +scolaire +catholique +Parkway +Walking +Lotru +Jie +Maoist +Hired +Letter +Valor +Newt +Gauntlet +preventive +affectionate +Albans +militaristic +callers +ambulances +resuscitation +Outreach +Alarm +loudspeaker +Bolivian +Verona +Divisione +Latina +qualifiers +nouns +mingled +onoma +Halloran +vagrant +canvassing +cautious +Fuerst +abducting +molesting +Redmond +Larkin +heartbreaking +mysteries +Chamos +Econometric +malnutrition +lagging +statistician +amenable +Dictionnaire +pratique +socialisme +UAFA +Zamalek +Varun +Kanpur +Cameroonian +UD +escapist +Bundjalung +Interviews +punishable +MardiGrass +Bong +runners +bucket +dogs +Premises +Buslines +Tweed +mackerrasae +sasai +Armalite +FAL +suppressor +HK +confrontations +uncontrollable +wounding +shooters +clashed +rifling +Caliber +Stoner +Numerical +hammer +prongs +ergonomic +slanted +nail +backward +USAF +Gyeongju +reconstructions +tombs +coffins +Hwangnyongsa +Myeonghwal +Meyer +Mallorca +spindle +tulip +reverence +slalom +backflip +Planned +filibuster +Liv +exhausted +couch +spitting +Soft +surrealist +replied +Profile +Dancing +tabloid +microfilm +Morrow +Spectroscopy +sophomores +APS +newsletter +Maureen +redistricting +Andronikos +Macedonia +Elasamprathi +protectors +Thampan +Trivandrum +Chalamkode +Theni +Tekkady +Sabarimala +Kochi +inter +Aluva +Sabari +Periyar +Vengaloor +Vaidyasala +Highranges +admired +Uravapara +Kallu +Kanjar +Hydroelectric +Cheruthony +Elappally +Wagamon +shrubs +elephants +Swantham +Shanti +Ghats +Dyakonov +anisotropic +exponentially +Gnostica +Grady +caliphs +prophet +caliph +organisational +Montauk +Wilfred +Hogg +typeset +Regardie +illumination +annotated +administering +meticulous +Antiquarian +Marjorie +cofounder +Trip +avant +Ezequiel +Cauca +Generals +External +Mercer +Archeology +alerted +corrective +Tetley +Residential +polemical +Fusinus +Spindle +Kazakh +Dialectique +fern +Shortland +Menzies +whence +breakwater +Morpeth +freighters +collieries +Tighes +Kurri +Cockle +Messrs +Lever +RSL +Meteorology +heed +Bluebell +Collision +beaching +RAAF +Sabre +parachute +Marchamalo +Tercera +Harz +harass +dismembered +Vienenburg +boasting +manslaughter +Bennen +superficial +smiling +murderous +pinnate +sepals +astronaut +Professors +Gem +Garvie +plowing +Piotrowski +misdemeanor +fraud +Nilsson +NUTS +Cheviot +Bede +miserably +Conqueror +Pictish +Precipitation +Tynemouth +frosts +petrochemicals +bleaching +Losh +soda +Felling +fumes +chimneys +sulphuric +uneconomic +detergents +Lotte +feedstock +ConocoPhillips +Ensus +SABIC +cracker +Cerebos +Hovis +Tioxide +titanium +Candlish +Teesport +extracting +outcrops +seam +Buddle +overseer +colliery +Killingworth +Ceremonial +Accidents +Witton +Eston +Marley +Issac +Bessemer +bandit +Sahaviriya +Skinningrove +shipyards +Scotsman +soak +Algernon +NEPIC +PD +inventing +bulb +acrylic +Acrylics +SMEs +Carbon +Neptune +Renewable +Able +Chemicals +Brave +KP +Crisps +Balliol +Fossil +LG +Tanfield +Goodyear +Dunlop +Lynemouth +Easy +Well +Pontop +wafers +bearings +syrup +remedies +Peterlee +Cummins +Lingfield +belly +uniting +Lipton +Milburn +McDonald +Gosforth +Ridley +Races +Alnmouth +Alister +eastwards +Roper +Stow +thrilling +medalists +Bekele +multi +millionth +Tracey +grasslands +Conception +pregnancies +Secondly +Brexit +Nomenclature +ferries +NE +SSAT +Martyrs +Macmillan +prep +Kimbolton +Buloh +Tun +Available +Heads +Breast +administrator +Prospective +Macau +Realm +Touchmenot +Catskill +geomorphic +aeolian +Ventifacts +mushroom +facets +immobile +pitted +windblown +undercut +interconnector +objecting +adjoins +breakers +kV +indicators +levelled +tagged +telecoms +Terrence +Surgeons +Magnificent +stumble +Arbogast +envelopes +mayonnaise +Funk +jokes +puns +Tar +booming +loudly +camels +slam +Beaufort +Keyserling +Margaritolobium +Silesian +mare +stakes +bridle +Tulloch +Riutta +Tompkins +Harlan +Recommended +straddles +reptiles +dispar +earthen +Pinnacle +Registered +RISNA +relinquish +FNHP +Fatima +AFT +Brattleboro +BK +Napoli +Euroleague +Seaside +Footprints +Jaipur +Jaigarh +Sawai +Maharani +Yagna +Horticultural +Ouhadi +Jeux +Francophonie +Youssef +Vladislav +Konstantin +Nikolay +Mkhambathini +Humanity +IGCSE +Handicapped +Recovery +stimulus +ITXC +ISPs +Networking +interconnection +SuperJanet +userbase +bandwidth +allusion +Viczay +curation +aggregation +artful +Strangers +thief +illiterate +guessed +piercing +Temptation +seeming +ugly +McMichael +stepdaughter +disdainful +jock +Winona +attentive +guilt +consummate +Colbert +Evelyn +strained +pacifist +Onyx +insisting +likeness +fetish +insofar +livelihood +stereotypical +adulthood +fiery +Bunnery +Cassie +counselor +horrible +laughing +housewife +beatings +flamboyant +dialog +Neef +Duggerna +Gradually +Notes +emphasises +whitewashed +lodges +Intrinsic +Aubrey +hectic +Pollock +beginners +Olympia +Cois +Fharraige +Hostel +Alameda +oxen +Fallon +Donner +rests +preexisting +Sheep +Originating +Enlisted +profiling +psychometric +pictorial +HP +NNL +lapse +Hilton +Jud +Buck +umpired +misheard +Headhunter +NURFC +fielded +Kalaijakis +NERFU +Middlebury +Cortland +ZDF +SE +Neville +Hougaard +squash +Tatenda +Tawanda +Mayors +Chitungwiza +Viasat +Promax +BDA +Lace +Epstein +Horton +strychnine +pinch +dispose +gentlemen +overstated +antiwar +Explanatory +queue +tampering +runtime +OSes +vendor +manually +PLATO +implementations +Honeywell +ARPANET +DEC +Modcomp +dependencies +portability +exemplified +macOS +Licensed +Amstrad +floppy +commands +microprocessors +Neo +Dreamcast +Psion +GSX +Server +ECC +faulted +Blount +Greenville +Scales +softball +Including +Vidant +VMC +Trauma +Laupus +botany +signet +Joyner +comprehensively +oversee +FBS +outfield +Herring +Staton +Gerlach +Crandell +Calgary +Avett +Adding +dwarfed +Navatkadu +Kaluwankerny +Seeing +sweltering +Recognizing +foreseeable +hue +eccentricity +photosphere +asterism +Cyclone +airframe +Prototype +Composite +Zuurberg +Housni +Muammar +edgy +Oued +Chott +Alwin +Doris +redefined +choreographic +artistry +Medaille +muscular +Apparent +mutilation +preoccupied +perceiving +intensively +Malai +congratulate +attendees +Maffeo +Aeterni +Patris +streamline +Ludovisi +discourage +exhaust +IFT +Misuse +Dmitri +Tot +bounced +punting +punts +Homer +Halil +bantamweight +chapbook +Editions +Ulrika +Dubbla +fusing +Tamayo +nightclubs +interpreters +Silvio +jade +nerii +dots +Wendlandia +instars +reunification +Thing +Massimo +Eurodance +Sharky +Maloy +Lacy +Ace +KC +microcontroller +pointer +Cosmic +fetch +byte +unprefixed +fetches +converts +Applying +Interexchange +telecommunication +Identification +dialing +CICs +Existing +Eleonora +chamberlain +cooled +aliases +reproached +mortal +unfounded +Vaxholm +Berendries +diurnal +lizard +stripe +lick +wax +Weinstein +JGR +Willamette +Wesleyan +Editors +Learned +Lichfield +townhouses +louder +reproducing +Staircase +Undergraduates +ensuite +bedrooms +perimeter +prohibits +Term +Halls +fellows +concede +Amen +Lt +Lecture +bequest +Baroness +negotiates +CUSU +raucous +sexist +misogynistic +disciplinary +Balls +rowed +beams +Khok +Sanambin +femur +Chakri +Sirindhorn +Highness +Mahidol +Thais +Popsicle +originals +reaffirms +prairie +homesteaded +straightened +Scholastica +Lapin +Beis +Avrohom +seawater +Shapinsay +Scarba +Rockall +habitation +NRS +Argyll +Ayrshire +Stirling +Losses +recognisable +causeways +causeway +Ceann +Dubh +landlocked +crannogs +Zimbabwean +Graduates +Dobo +Comfort +Chikondo +Headmaster +Chikumba +Fishers +insurgency +theorists +Authors +profess +reliance +restricts +collateral +urges +buttle +Jevons +docile +Deverill +Silversmith +Thank +solicitor +Ganymede +butlers +scarlet +stunned +foibles +illustrators +hints +waitress +Damsel +Did +chiselled +finely +Proud +manipulates +misty +jaw +knowledgeable +phosphorus +momentarily +stuns +hangover +dubious +Makes +lookout +tang +cheers +Bertram +appreciative +mess +Carry +Bells +downplayed +Hadik +Poole +Filipina +Barangay +Lovelie +finalized +str +Domains +Elongation +stalk +rRNA +SRL +peptide +subunits +Lindsay +Davenport +Crumlin +Belvedere +Aviva +Oriel +Premiership +curtailed +Clarion +underage +Honore +Aware +GP +Loudwire +riffs +raps +Koski +sweating +Videos +relinquishing +Hogan +Childers +Dulcie +Wickremesooriya +Mendis +Peiris +auditions +fuze +Colgate +Whitehead +Congresses +resented +abolish +Judiciary +Barham +Audrey +actuarial +refunded +taxable +repaid +MRA +layoff +Unused +FERCCA +Pastre +Banana +Ratings +Stoldal +DayONE +Gillan +Sheena +interceptions +Redskins +Doc +AFPA +Muncie +revamped +Celts +meantime +collars +confederations +EuroHockey +Ginette +Connors +quarrelled +Bruff +pantry +swallowtail +pence +sneaks +Angered +unleashes +Tangerine +wanders +Halfheads +rampage +MVC +Preseason +brew +bottled +soliciting +beers +Lager +coding +antigenic +anticancer +pelvic +modifying +Reconfigurable +dynamically +impedance +Pattern +Polarization +polarizations +Compound +Founding +Lankans +pathology +entomology +cherries +Gitonga +Strasbourg +transitioning +KACA +NARC +Ursula +Puffin +realises +Sukumar +Rashid +legato +Jarvisville +Natasha +Checker +unplanned +patois +dangling +bleach +Thunderbirds +amour +fi +Hey +Mobilian +Choctaw +Chickasaw +finha +dans +Yoruba +Setting +Milo +liki +Cochacucho +Quechua +qucha +Woomble +cannon +UNITAS +mustering +Challis +RAKA +shortlist +postcolonial +Shiibashi +Ohki +Fiona +Habib +intrigues +Gonzalo +Federmann +Cundinamarca +Omagua +abbess +bulldozer +Wages +impending +Cy +Patil +Ahmednagar +Arrington +Hidell +CoA +syndicators +LISC +homelessness +Homes +Bostyn +Zulte +Waregem +Stunt +Thrones +Elba +Alicia +Spotlight +Hein +Kelsey +Katja +Annette +Uzo +Downton +Penelope +Yael +Radoslav +Philadelphus +Rathasapthami +innumerable +Vedas +Veda +Mandala +Steeds +pooja +Multan +Ekka +Important +Mada +Prabha +Lettera +sciureus +morphometrical +cladistic +Eighteen +Carletonomys +jaws +interorbital +mandible +strikingly +weakly +squamosal +microstructure +cusps +crests +encloses +tubercle +rusher +Cornerback +Torin +unanswered +Zach +outscore +Clemons +kickoff +UIC +Phish +expansive +Saeb +Salam +Homenetmen +Antranik +Mohamad +Hamdan +FLBB +nailed +Graecae +Goodvibes +Hardly +rheophytic +Leone +Sewa +Commissioning +Shanley +plight +Songes +lutenists +Fitzwilliam +Deller +Scores +Marck +Worcestershire +interstitial +manifestations +bookended +bumps +Zelma +vague +Dukenfield +slit +consults +deduces +emulated +rigged +nicely +Reviewer +Yampa +Iduna +Shooters +foaming +paneled +Darrell +Ridgeway +cartoonists +Nobby +Pulcinella +Pit +Toya +Leg +Salvage +stuntman +bamboo +dish +riverbank +flip +washer +Shelley +Monetary +euro +eurozone +bagel +Hospital +lingering +euros +GWh +kWh +liri +payable +Employed +Cluses +Arras +Kia +Nurse +waving +Vietminh +northerners +noisy +locking +dispatching +bureau +sects +multiphase +oscillations +magnets +interconnected +magnetizes +toothed +ripple +lags +asynchronous +formulas +wrapper +voltages +inertia +unloaded +inversion +Excitation +KCB +Pretoria +Anglo +Novlenskoye +rebelling +closeness +shredders +Baathist +Ballot +Eupatorium +amabile +tetrahydrocannabinol +euphoric +Lamarck +landraces +Strain +chemotaxonomic +relax +Porsche +disclosed +deserted +Wallabies +Biarritz +Surveillance +Signet +Leander +Jamesons +Leith +indefatigable +softer +Monotype +keyword +bolder +diagonal +Condensed +algorithmic +Knuth +specifies +Grades +cursive +strokes +Fonts +roman +digit +kanji +superfamilies +traps +bounding +Core +Garde +swashes +alternates +schoolbook +sympathizers +swore +Rade +organisers +detachments +Chetniks +Sutjeska +Proletarian +Sinj +Dicmo +reinforcing +Adriatic +rejecting +curacy +Chaplain +Suspecting +stockpiling +Firearms +probate +salvation +notify +Breault +McClennan +cardboard +refrigerator +makeshift +Fatta +Mag +audiotapes +injuring +stun +screams +outbuildings +Criticism +leveled +snipers +harming +blaze +inhalation +breaching +Autopsy +Accountability +unlawfully +undisturbed +Fagan +Racketeer +Influenced +Corrupt +Organizations +bodily +whatsoever +defectors +lunatic +Thibodeau +pyrotechnics +imperfect +yearlong +cans +pouring +pertinent +provoke +obstructed +igniting +Posse +Comitatus +helmets +silencers +Incorporated +Violence +Columbine +timed +Sommer +disagree +piecing +transcripts +longhorn +Merced +poise +semester +Ziegler +Speakes +Cynthia +tomorrow +Powerful +Bellino +Saluzzo +Ottavio +puzzlehunt +transposition +Hunan +Nanyuan +unpolluted +fabulous +amphibian +Decentralized +Solution +Ein +Butare +Tutsi +Rwanda +slaughtered +militias +Gregor +dismayed +Historians +debated +oppressed +Awakening +sermons +Mays +Wilberforce +starved +autocratic +empower +Pentecostalism +Creoles +Hebrews +Birth +Chlothar +Theuderic +dukedom +heartland +Liutfrid +Etichonids +Martel +Aquitaine +Francia +Elisatium +Disc +Pate +Franciscus +Frane +Platonism +Cres +galley +Ferrara +continual +Discussionum +universis +Sneed +Structure +Est +troll +trolling +Sybra +animate +Yards +coxed +eupolypods +Aspleniineae +Pteridophyte +Phylogeny +circumscribed +cladogram +Dryopteridaceae +Tinker +Mandalay +impasse +runways +renderings +Mortenson +vanity +resolutions +Pakthtunkhwa +harassment +TransAction +Ofsted +Participation +sourcing +rostrum +dignitaries +Firehole +roaring +stagger +Locsin +Pascual +disappearing +desperation +rancher +Ribeira +Azam +Bahawalpur +Nawab +Hollow +QAMC +draining +riprap +Rainfall +headwater +trailhead +Mermaids +Windy +ramp +riverbed +southwesterly +Largo +swings +outflow +Pico +Cerritos +reverts +Anaheim +Fault +chunk +uplift +foothill +clays +Gabriels +riverine +damming +emptied +wildfire +channelization +arroyo +canoes +tar +cultivating +willows +ranches +colonists +Potrero +Kearney +prospectors +stagecoach +sands +gamblers +massively +landslide +precipitous +floodwaters +aquifers +Watermaster +mountainside +fecal +coliforms +Haynes +Sadko +Obruchev +Onkilon +grilling +salads +Palmetto +Simpsons +Nomadic +backpack +Valledupar +minstrels +troubadors +vallenatos +Emiliano +Escalona +Abel +Mendoza +Silvestre +essence +Nuestra +Asbestos +auctioned +subarctic +Suranga +Dilhara +dismantle +Chandimal +Veensgarth +Meadow +Gymnostachyum +warrieranum +fishmonger +inhibition +Wolfman +Astonishing +suggestive +cyber +redundancy +Quibus +bursting +Dru +Shomali +gaze +alluding +OLS +regression +restrictive +serially +Phylogenetic +circumcystidia +uppermost +softwood +Groove +abuts +Cheney +Cameroon +pornographic +Golding +divulge +exclaimed +swims +Failing +etchings +Guernica +lashes +eats +Desire +Tail +ass +insatiable +Airborne +equips +Cancellaria +nutmeg +navigating +reckoning +Minotaur +Monmouth +Steinbach +Lentekhi +standpoint +Dat +Franciszek +incompetent +sympathized +Cieszyn +unprofitable +Narutowicz +foreman +nationalities +Ziemia +Przemyska +Complaints +Pogorzelski +pavement +Silesia +Adele +Katey +Alvaro +Entourage +Distance +CSI +Duhamel +Gundishapur +Glynn +Encounter +Wuhu +Carvin +Masley +boasted +Swilla +Lonsdale +unconformity +Craven +emphatically +Pershing +Ninth +Nasir +Mehmood +Jhelum +Eparchy +Narek +combustion +outbid +grille +consultations +Axelle +Clotilde +Valter +WSOP +Novels +Tiehai +Trained +Minsheng +Bund +evolving +Samba +Profit +startups +mentorship +BCIS +Lubelskie +spree +robberies +Cooksey +warden +Vouga +Thornycroft +Tejo +Antioquia +sonar +ensign +amphibians +stockholder +Wildcat +cockpit +penicillin +Bayview +sore +honey +yolks +colds +Aybolit +Thirteenth +Ridges +wasted +Invisible +Qin +Jiuyuan +Xiongnu +Mopang +Hemu +Ivanovich +Usual +Eves +Ilyin +Fatherland +Maxim +Gorky +Sovremennik +baptized +Inspectah +Diagrams +Remi +foaled +Oaks +Stamford +Sweepstakes +stallion +Tribalistas +Bernd +guested +Doomsday +Frohes +Fest +Zelluloid +Amphi +showcasing +Unter +Musik +ist +Shady +Yours +Pepper +Perm +Uttam +Ziaur +facilitating +Seyyed +Mahidasht +Rizzolo +Teana +grammatical +vocabulary +prewar +MX +Collingwood +addictions +Intervener +Ashlee +Rosenbaum +COAB +collaborates +STINE +Hermosa +Belmopan +UCB +Cayo +Caye +Ruta +Mochuada +smugglers +bombproof +communion +Mitchel +Meagher +hone +Viscum +Cyclopaedia +illustrative +Ridgway +Everybody +Ayers +Khwaju +Morshedi +Bhaiyathan +Surajpur +install +Dharamel +Joda +Baland +Maharajah +Granted +avian +Villejuif +Carlsbad +Stoneman +enact +Cage +Manakondur +alteration +chorale +sur +sustainability +Bivalve +Hygiene +Fishery +mussels +Amnesic +Poisoning +connective +accordingly +scallops +omega +cod +finfish +Composition +Reference +Dietary +Herrgott +Gall +Krozingen +Pistons +Delft +rearrange +Particularly +regulative +multiperspectivalism +Poythress +Scripture +irrationalism +deterministic +Philosophers +regularity +Blackford +Valerie +Job +Absolut +unpredictable +sonata +oratorio +subdistrict +BYOA +VPN +Gobitrichinotus +radiocularis +Rawalpindi +Gaowan +Jian +Sima +Ji +repertory +Spartacus +taraxaci +dioicae +Kushadhwaja +Dikarya +subkingdom +Fungi +haploid +diploid +grandiflorum +Haredi +Hasidic +Mordechai +warms +Keough +Yiftach +believable +illustrate +touches +Azulay +delicately +ambiguous +observance +AIF +Tugwell +curacies +Hanzo +chanbara +Shintaro +Burgstall +Thuisbrunn +Burgstein +mediaeval +Urnfield +Hallstatt +terrace +footbridge +BSc +Merle +Hamlin +Dred +Whigs +articulate +Yancey +Buchanan +Guthrie +Lowndes +Herschel +Vespasian +neared +coercion +secessionist +secessionists +stifle +Paraguay +PTB +exons +skeletal +Immunities +liens +Kriegsmarine +WBS +Sunrise +Callulops +Sanana +tympanum +Observers +Lechia +Noted +Beebe +Morrissey +provokes +Clavelinopsis +rubra +subtidal +malacologist +siphons +siphon +foraging +coasts +Bredal +nisi +consensu +mixtures +Vigreux +beads +Raschig +condensate +condenses +distillate +downflowing +upflowing +wetted +Hendrik +entomological +Trebehor +forerunner +Losing +bulls +bullfighting +Mandir +Vithu +Deubai +sanctum +Swami +parcels +Talking +Rockies +Cochise +Wash +networked +capitalization +Bayesian +Isadore +Gudak +effortlessly +enciphered +dreamed +surpass +disused +Ope +ornithologists +CMCMA +Dossin +asteroids +NEOWISE +lightcurves +Lightcurve +Assuming +albedo +eclipses +discoverer +astronautic +Hangzhou +Peking +Aeronautics +Dual +stellation +RECLISA +Targeting +buff +umbilicus +pearly +umbilical +Kanonenbahn +Wetzlar +reparations +Lahn +Divisions +Hessian +Daewon +Blind +Ontologies +interoperability +annotation +ontology +contextual +generalized +rivalries +Rabotnik +swedes +swede +cabbage +Hervarar +handbook +videocassette +Been +hobbyists +Kodachrome +Portability +enlargements +Maximilian +Wife +Cusack +Guildhall +discouraged +Hesse +Occupied +terracotta +ornaments +Jessop +Baynes +trespassing +Rennie +shareholders +expenditure +Catchment +IWA +Claydon +Haughley +Watercourse +Coddenham +statuette +superphosphate +Sproughton +Chantry +weir +Linsangs +Javanese +linsang +youthful +Juglans +Microcella +reveling +Compston +Henshall +commissioners +photographers +bipolar +Moyne +Loyola +licentiate +Catalina +Steward +lukewarm +Funes +scriptures +encyclical +Marquette +learners +Rheumatoid +commonality +Uzbek +OIRT +Rn +matrices +LMIs +stemming +dilated +stabilizability +credentials +commercialization +vocalists +Partysquad +Haichen +Hwang +Lanman +Jiang +Curwen +Sutcliffe +Haworth +editorship +brethren +herbivorous +skeletons +Mjos +disarticulated +ossified +tendons +Aathal +Christiansen +JRDI +spines +maxilla +ilium +spongy +remodelled +rosette +millimetre +ducks +dump +Kentrosaurus +GamePro +cursor +JS +Lagatta +reelected +Wadsworth +MacNamara +Genesee +Biltmore +Patron +dormer +Regionalbahn +timetable +Kerim +Fuat +Famagusta +Resistance +concentrates +Braemar +remarriage +Lairds +Abergeldie +Terrace +Amalie +Dhumpa +Kabisurjya +mnionaetes +Penk +Kremlin +Bely +spectrometer +Tinsley +Jamail +condominiums +Agglutinins +agglutination +Reaction +lyse +vested +relaunched +UCL +pullout +KCSI +Mountbatten +Navigating +Compass +Cadell +cheeks +edging +pacific +conifer +reared +invaders +impenetrable +Pork +Huger +respectful +Parke +kedged +contraband +Datow +Bandar +zoologist +highrises +Butkus +McDaniels +DUI +probation +pectoral +carborundum +radios +coke +Moissan +corundum +Mohs +Virtually +stardust +anomalous +ferrosilicon +resistor +crucibles +glassy +sublimation +kilns +bandgap +Zircaloy +printmaking +divacancy +CCF +Yokota +Nang +Phan +Rang +Commando +GA +TFTS +equipping +Pave +inactivation +insertion +Gu +Celt +Diloma +dived +astern +DF +evaded +slowed +discontinue +Martini +Card +Burlington +Touring +Wayland +Southbank +Fairway +Replacement +MU +petitioned +dolmen +Gironde +Seat +Po +motorcycles +Supersport +Superstock +porolepiform +Frasnian +Geologist +fishes +siltstone +Fluorochromasia +fluorogenic +fluorochromasia +evils +Increase +nourished +Imports +Defoe +Manner +Size +Subjects +moralising +slum +distiller +campaigners +prosecuting +dilapidated +crazed +Which +Pinch +pavior +housemaid +trinity +refresh +hoop +patriotic +practised +unregulated +populate +serpentine +Liotard +enameller +Anecdotes +footnote +meagre +Inspiration +Felton +gloss +Dickens +Sketches +reused +Sportowy +Bugiem +Ruch +Unia +favorites +Sentenced +integer +Method +Appendix +rigorously +Bolzano +finitely +Hausdorff +xH +Junge +Ulivo +Pensplan +Miliduch +knyaz +Saale +Foveaux +Acorn +Chasers +Qix +Huachucocha +furrow +Fifty +henchmen +Beckman +Sibylla +Merian +Kaspari +Proceedings +Symporter +shoreline +glaciation +savanna +pelican +Hallerstein +Epprechtstein +tinplate +bet +Watanabe +Konjaku +Fourouclas +Actinobacteria +guanine +cytosine +phyla +Syd +Gorodets +Volga +Bagley +AVC +Harrisburg +immigrated +seaman +Tecumseh +Seaman +Mickus +droits +Sociology +Sorbonne +nationale +presse +Feliz +Graduation +LILA +whiteboards +Dec +aggression +rectory +Protestants +Ivica +keepsake +Slavko +Chevalerie +coronation +Tambre +inexperienced +Intercollegiate +Nikolaevich +tastes +paraphrase +McClung +Crawfordsville +faltered +gigantea +Aldabra +mated +pairings +chathamensis +darwini +hoodensis +porteri +Chaco +Mitochondrial +hatchling +inhabit +reintroductions +anchorage +Weights +gigantism +osmotic +crossings +withdraws +Saddleback +sexes +scutes +Opuntia +saddlebacks +overhanging +dew +cloacal +forceful +hind +certainty +slightest +urine +easiest +turtle +ratifying +rearing +progeny +goat +perch +sterilised +migrations +Calotomus +Billah +Vestron +Premonstratensian +Briwere +Haldon +preponderance +watersports +Cavern +Dumnonia +quarried +Corbyn +Torquinians +Livermead +Mallock +Boleyn +Bushell +Paignton +Lincombes +Hesketh +Bon +Watcombe +privileged +sewers +sanitary +Devonians +shutters +comrades +relieving +rioting +Trouble +Romanoff +Hound +Agatha +Oldway +seaplanes +RNAS +Fascists +Drill +fascist +Fascist +Wardens +ration +Babbacombe +airmen +blockship +tragedies +Bye +Taka +Tripoli +ponors +McNamee +Doubles +Wimbledon +oligarchy +collusion +foresaw +TWC +WatchESPN +apps +Seacrest +NPD +Anytime +encirclement +Usseglio +Stoddard +Culberson +Toth +Edinburg +Runoff +Westley +Neugebauer +Wakely +unseated +Gallego +Cuellar +Babin +craftsmanship +pranks +Eccles +junk +salons +recumbent +Leafs +champs +Aurillac +Vodacom +UJ +Literacy +organically +Storybook +Dash +numeracy +Coleen +Worldreader +Yoza +airtime +Ulwazi +SMS +Atoll +Unorganized +Llanyblodwel +layered +sliced +hyperbolic +rhombicuboctahedron +alternated +trigonal +trapezohedron +cyclotruncated +antiprism +rhombic +disphenoid +JX +Headquartered +Dhabi +IDEX +Sections +Hungry +Gyaritus +hiragana +Flintstones +Slate +Brontos +speeding +antebellum +Chopin +Wanamaker +Avren +extremity +Seminoles +universes +stat +underperformed +combos +swapping +containment +Ryu +Symbiote +infiltrating +Xgard +Satsui +transforms +programmer +weaknesses +Guardians +bracket +Toy +Figueroa +Valdez +simplistic +diminishes +Reeves +aggregated +inclusions +Reviewers +GameCentral +Zapala +hardship +Peronist +Dirty +Campbellton +Robichaud +keywords +battering +Habitat +Platysace +Jimenez +Kibi +Akim +lowlands +Minerals +PMMC +byproducts +Esen +Sunamganj +Shaharpara +Ratna +Jame +Mosjid +Representation +Sengkang +Akimoto +honeycombed +Sursurunga +plural +quadral +fibrate +Niacin +statin +statins +Ardabil +EsperTech +EPL +sided +endowed +Magnesium +supplementation +NOAEL +Widespread +doubtless +Legenda +Aurea +Koleji +Basketbol +Ligi +inked +Ironi +CRISPR +Bayan +Collett +Jong +Hah +Automobile +Lara +colt +pertains +contrapuntal +dissonant +Hode +Jewel +Reynold +Izatha +vaudeville +attribute +phonological +deficits +Algiers +Halorhodospira +Photoactive +anglais +timbre +transposing +embouchure +oboist +laundry +tessitura +Sinfonia +Concertante +Haydn +Guntram +Boehm +Releasing +Roxy +Beechview +Bullets +Coccothrinax +macroglossa +gemelas +Ernesto +Norbertus +Dinaburg +Cerrito +Joppa +boulevard +cloverleaf +Timonium +Sunnybrook +ford +Kohas +swum +Cleeve +Patrolman +Baldo +Marro +Laurice +prizewinning +muskrat +starfish +deduced +Wildcats +Sakarya +Afternoon +dissemination +Kneller +Hallett +Hayman +reacting +rococo +Marat +succeeds +redirecting +APIs +Payment +Parameters +commenters +CMS +Characters +Blow +hos +unge +IID +Westcliffe +Procurement +Enemies +Wonosobo +forgiveness +Relationship +TVS +RX +Hobday +Annotated +reunion +Homesick +Papillion +Bromley +Geske +circulate +Holocaust +Piotr +Bochnia +methodological +noc +Maneuverable +MARV +AMaRVs +frustum +taped +abducts +deliberate +booby +scarred +Harkin +distracts +Creator +python +Sumatra +rouge +Barrister +Tus +Buyid +Sallarid +Marzuban +Shahnameh +rim +Noweco +snowfall +emblem +Husseinic +Jabalpur +Saugor +Nerbudda +Ingham +Lally +Pirog +Tzadik +Guria +Sherrell +Hopedale +Retail +NRG +Genco +functionality +Yolanda +Pereira +Escritores +Letras +Correo +Liqhobong +Kimberlite +blows +shale +Cros +Jaune +Houstonians +DRG +Goodner +Gaines +Publics +Beatrice +malls +Begley +Beechnut +Delinquency +lobby +Probation +Campos +mom +Banco +TACA +flourished +Promise +Controversy +PACT +Cholos +Trustee +Ribbon +Mistral +Childhood +Kaleidoscope +excludes +flea +HCCS +Medya +LaFourche +Teche +Opelousas +Cloutiersville +Natchitoches +arouse +Hornby +ES +Claxton +Aleppo +Sewer +Aquatic +Damage +Adirondack +Knollwood +Hanna +Knott +curb +clad +Nancarrow +Anonno +Arifin +Crashdown +Antar +Whitaker +husks +Vilandra +Zan +Granilith +remembers +Destiny +mindwarped +powerless +wary +McLachlan +Weeks +Indiamen +Summerfield +Covenanter +Brig +pardons +Robstad +Start +Vindbjart +Aeolus +Belyuen +Advocates +affiliates +bylaws +Miley +Entitled +Fogi +sarong +prefectures +Tohoku +deltas +varves +Acanthocalycium +Coreocarpus +arizonicus +florets +Grude +Stjepan +Ravno +JNA +Bosniak +Republika +Tomytronic +Nucleosides +nucleobase +ribose +thymidine +abundantly +digestion +Preble +MacMonnies +bas +Yuniawan +Filmmaker +Cartoon +Kochengat +Saivite +Sthalams +Radomir +Milovan +Mikaina +Interscholastic +Nes +Finsand +Aascar +Gopi +Upen +Santhanam +Ramkumar +Weta +Filmfare +infatuated +Osma +unwittingly +inject +Enthiran +Sivaji +Padukone +hunchbacked +Twitch +staggering +Trypanosoma +antiquus +trypanosomes +metatrypanosomes +Triatoma +dominicana +credal +Lutheranism +Andreae +Philipp +Smalcald +creeds +doctrinally +norma +manhunt +Acton +crawling +CP +Speech +interscholastic +violently +coed +Djanogly +cofactor +functioning +Antibiotics +endosymbiotic +engulfment +symbionts +chloroplasts +cocci +Myxobacteria +ribosomes +inhibiting +Flagella +Fimbriae +conjugation +pili +secretion +Endospores +endospore +endospores +Clostridium +Chemotrophs +acceptor +mercury +anaerobes +introns +competence +phage +motile +twitching +behaviours +antibacterial +Manual +biologists +digest +lactic +inhibits +workhorses +genetics +Ehrenberg +germ +Cohn +Ehrlich +WGL +OpenGL +Edict +Nantes +leaved +twinning +Tramore +Confirms +Looks +RTE +forbids +Cosby +Nakagawa +Osaki +Horna +Direction +Raise +Phonographic +angel +Bop +Vevo +lads +Fix +chamotte +Sarkozyst +balladurian +Sarkozysts +Orto +Botanico +Polypodium +Achillea +Centaurea +Ophrys +Orchis +Romulea +Soldanella +marinas +Bahia +Tequesta +stockade +Stranahan +Stacy +yachting +hurricane +AutoNation +Barnett +Riverwalk +yachts +stole +Culex +fetal +Basehart +peril +Sunk +brainwashed +Strive +Tsitsi +Kamhlaba +psychosocial +crises +airlift +Foetus +Immolation +recess +obnoxious +Tanner +Tavern +proclaims +disables +Victoire +Donnissan +Rochejaquelein +Gemsa +Barrhead +Pharmacology +succulent +Kelvin +Loren +mailman +happier +Cast +escalating +Molly +Vicent +Heilongjiang +Flipside +sportsman +Vizcarra +Traverse +Stylus +Brookes +Andaman +Zigao +Hauser +Farrar +Cudahy +ascended +Mozzafar +monopolistic +liquidity +Kingsbury +cumulative +SYSTEM +Carterfone +MCI +NCR +telecom +McCaw +TCI +Olivetti +billing +Telecom +GTE +BellSouth +stockholders +Advertising +hotspots +Leap +finalize +uphold +Foundations +Lana +Krauss +MYMP +Jamboree +forbid +punitive +Yugoslavian +Waffen +priceless +Exile +AK +PGU +VA +Jordin +Iwahara +Standings +proliferate +Retroviral +RT +Rous +sarcoma +Creation +antisense +PCR +uncovering +Qaida +Reaper +Euphrates +Rhythmic +Oquirrh +callsign +KUDD +Gleneagles +Foursomes +handicap +amateurs +Bracket +Candlemass +Mats +Rickard +Chogoria +Modernization +Baloch +Anthospermum +laundering +Jalpa +Gandini +Poloniex +Winklevoss +Cboe +partnerships +NYDFS +brokered +Qayqayt +steamboat +Jargon +Pattullo +boggy +Trapp +Chinook +Piffle +Salmonbellies +Amika +Dumont +Adsiz +MADtv +Caliendo +Caeti +Mexique +communicant +Bhowanipore +Sammilan +Tagore +Nobin +Aerodrome +hangar +Havilland +Musiciens +facial +Rifles +sustaining +Bourlon +Howards +Havrincourt +Sambre +Bty +HAA +Storyland +Colgan +Cockayne +stunting +Mossley +Grantham +skated +Wiggles +Estoril +Brownsboro +Tumblety +Microwave +Kinloch +Spean +Cairngorms +Allt +habilitated +Ching +Akkadian +Stanislas +Oldest +Babylonion +Sheshghelan +Chertov +Ovrag +Wappingers +Dutchess +feces +typewriter +Prunus +GameSpot +Parramatta +TFL +Nuneaton +Steaua +Yeovil +Emeryson +Mischogyne +bushes +thecae +Ashraf +peeling +Repair +repainting +Stucco +cleaners +polishing +Vandenberg +inefficient +MIDAS +IIAS +Samos +Charlier +Sanin +Stapleton +Rosendale +nonpartisan +agglomeration +intrastate +Texcoco +Xochimilco +Circula +IMECA +pollutants +decentralization +Coacalco +Huixquilucan +Naucalpan +Pachuca +Circuito +staffing +Slope +Tandon +coverup +Patronato +Archivo +Greenfaulds +Subuh +Brandsnes +Hedmark +Gather +Monsters +Missy +Lineage +Hucker +Waitemata +Puketapapa +Roskill +Cots +faulty +Marlboro +Kolesnikov +occitan +Pardol +Ruisseau +intruded +Dogger +Toarcian +limestones +occurrences +baryte +Hinshelwood +Sunbury +Jacana +Eastbound +Hellenistic +Apamea +Soter +Lysias +Heracleides +Cilicia +mercenaries +Philometor +Sidon +undisputed +Nicator +Apollonius +dominion +strife +purge +Balas +Tyre +soldiery +offensives +deteriorate +dynastic +Tryphon +joked +EAA +Consejo +Mundial +Viernes +transmutation +demonstrator +radium +isotope +buttress +subfield +transcendence +functor +receivership +sedimentology +Notch +Expression +autoimmunity +coreceptor +medulla +myeloid +Dill +Casablanca +fleets +Meissner +Magicians +NLM +intercontinental +Schiphol +complimentary +commando +Sayeret +Nagaragawa +Jaulian +Mohra +Sahiwal +Workshops +Gallup +Mormons +Agnew +Bantamweight +MMA +dribbled +jumper +Flu +Toni +Shandon +Costas +reddening +Wreck +entanglement +guideline +shotline +pillaging +looting +Truk +Doria +trimix +Petertide +Thynne +Wraxall +Draycot +Hagarty +Nahin +Srimangal +csc +Lehane +Oblates +evangelization +Khadim +NBR +Leuchars +derailed +Tycoon +Ling +Ding +Equatoguinean +IAEA +ramparts +Rauschen +wherefrom +Bangla +actioner +imposition +Plataforma +Ciudadana +Defensa +Craugastor +tabasarae +Amakusa +JMSDF +Avin +Startups +Sheely +DANU +Bengals +salon +Tabernacle +Overture +Romance +Turkulainen +Ardler +Clatto +Craigowl +Macalpine +Brackens +Camperdown +castellan +Hetman +reset +RESTORE +Debugging +Deglaciation +cryosphere +isostasy +feedbacks +TRF +kori +extirpated +Kellogg +placings +Schwerner +Stil +Oper +Jutta +Kleines +Moks +Vakpo +Atta +Landgrave +Dietrich +Osterland +Burchard +rue +Parachela +Patachela +siamensis +Kenyon +dreaded +Manasquan +Kean +Tomaszewski +Teatr +Mime +Mazovia +Vitreolina +Guterres +Alkatiri +Ahvazi +Ficquelmont +Stahnsdorf +Halle +Teltower +trackwork +Jalal +Ain +Galilee +Wadi +geopolitical +khanate +Ikh +Khagan +Dai +Jurchens +overthrew +Khamag +Tatars +arbans +zuuns +Mingghans +tumens +Transoxiana +slaughtering +Taoist +shamanistic +Tolui +Wanyan +Shouxu +Kochu +Koten +Tumens +Chormaqan +Naiman +Rum +Kaykawus +Eljigidei +Sorghaghtani +Oghul +Qaimish +Kitbuqa +Alghu +Jochid +Mamluk +Tokhta +Ozbeg +Chupan +Eljigidey +Oirat +warred +Toghan +Dzungar +ortoq +Basnicki +Curdton +Forester +quad +chairlift +Calabuig +Auschwitz +secretaries +registrars +Umpire +umpiring +Grieve +Zorra +Beachville +Harebrained +Returns +karma +cyberware +totem +PAL +Antichrist +Ubu +Tubular +MTH +HO +BLI +Trophies +Accord +Visualization +Wagga +easel +Taras +Miki +Comart +Pepinos +Moche +pepinos +pollination +thioester +Deficiencies +Glu +carbonyl +Mayes +synchronisms +Arpad +bullae +Cumulus +Pulkovo +constellations +zodiac +officeholder +Hinchman +Colfer +Hitch +omitting +hapless +unpleasant +bureaucratic +Galactic +Improbability +eons +supercomputer +roving +Ursa +excerpts +Radiophonic +Leadon +Joby +retitle +meshing +Griffiths +Horrocks +Pryce +Vann +Rula +Lenska +Lintilla +Hudd +Milliways +compere +Margolyes +Smelly +Photocopier +Blofeld +Trueman +Raffle +Jaffrey +Slater +Wonko +blurb +Gently +Magrathea +hardback +conspirator +teleporter +outcasts +Golgafrinchan +inept +Golgafrinchans +readout +subconscious +lettered +Scrabble +Brontitall +continuum +Slartibartfast +dolphins +InfiniDim +Fenchurch +settles +McMillan +anguished +Earths +Artemis +instalment +Bowerick +Nano +Prostetnic +flung +Leave +Elements +Cuse +Genre +Sanjeev +Bhaskar +Planer +Loonquawl +screenplays +Zooey +Deschanel +Rickman +Viltvodle +widescreen +UMD +Langham +Prak +ledges +Theatr +stagings +footnotes +shorten +Cule +Learner +Phill +Veet +Voojagig +Eilonwy +Cauldron +Oswin +LPs +Dyall +Cataclysmic +Hotblack +Milne +Dove +ISIS +Supersoft +sunglasses +datafile +PAN +Starwave +Publications +Preiss +Leialoha +Vokes +Nyberg +Cullins +Schenck +poke +laptops +merchandising +Touchstone +Plural +Demento +Collectibles +foam +Celebrated +Towel +hitchhiker +Answer +bidenichthys +erechthias +Thom +onboard +Elon +Roadster +heliocentric +starship +gratuitous +tricameral +democracies +revising +scrutinise +Bills +uncodified +obstruct +harden +Bundesrat +Peers +Seventeenth +Gael +Fianna +Grouard +Grosvenor +Alloys +Metals +Pupin +Ember +Examining +indeterminacy +Idea +Mediations +Beloved +Liturgics +identifiy +constructive +vivacity +humanists +utterances +humanism +Skeppsholmen +Castillo +Skool +panorpid +stiktos +Daohugou +Ningcheng +scorpionfles +Miriholcorpa +Nogliksky +Tym +vanishes +hoc +incommensurate +shearing +mesa +sheared +reproducible +nanomechanical +ultralow +stiffness +Teflon +repulsive +Waals +glycerol +quartz +lubricated +mucilage +Brasenia +Thermodynamic +macromolecules +entropy +superfluidity +dissipation +Nitinol +magnetoresistance +nonmagnetic +ferromagnetic +superhard +diffraction +Anirudha +Mira +Massively +computationally +ReaxFF +OpenMP +collectives +leveraging +microelectromechanical +TEDX +nanoscroll +sauteri +springtail +Achaea +leucopera +Pittner +Educator +Monaghan +Peckham +Rotherhithe +Septet +Chick +ECM +temptation +Aspidosperma +darienense +Finnessey +Darby +Fineveke +Mua +Lanutavake +google +Doctrinaries +Congregatio +Patrum +Doctrinae +brotherhood +Sadis +Dottrinari +catechetical +LaVell +Leszczka +Rodale +Amphoto +Gramercy +Giada +Mindy +Jillian +Nat +Wartels +overstock +remaindered +McBride +sublease +WaterBrook +Multnomah +photoelectric +charcoal +rollers +Radiophysics +Laplace +palaeomagnetism +ANU +Rundata +pronoun +Midgard +cosmology +Freyr +theophoric +byname +steinn +staves +Sm +alliterative +Ek +veit +Holmstein +rynasta +stein +Appleseed +cider +Diriam +centerpieces +Arimatea +Moyao +Parres +Hummel +Namibian +Oolitic +fabricator +Ketcham +grist +Newburyport +overpass +Shoes +woolen +Saugonians +Hone +Melzar +rotary +Waitt +cigars +airmail +aviators +Bancroft +Shurtleff +underbrush +Barber +Shop +Salemme +Whitey +Bulger +Mercurio +Stackpole +lingered +ESPY +Blacksmith +Oaklandvale +Nahant +Lynnfield +kitschy +roadside +neon +concurrency +Commuter +transferable +biennially +Ladder +SFD +programing +Advertiser +Item +WROL +WHDH +vivo +Riemann +Fet +topologist +variational +calculus +Danilovich +Aleksandrov +Castellano +PCAS +proner +directorate +PRCAL +crossbreeding +stallions +mares +Bullocks +Weinholt +Kaliyur +Sathyamangalam +Erode +Nilgiris +Tirupur +Algemene +Bijzondere +Ziektekosten +AWBZ +Fairleigh +hurled +Orioles +Ftu +tabulated +pascal +megapascals +SI +equivalently +newtons +recoverable +elastically +plastically +tensometer +nondestructive +metalworking +Carron +Rathnadov +Aengus +Nad +ringfort +Rathnadrinna +rath +henge +Cavill +Gal +Cyborg +titledThe +Momoa +Zachary +Asher +Prey +Fantabulous +Jurnee +Winstead +Dinah +Lance +Bertinelli +recur +reintegrated +Arina +Mariia +Polina +Egorova +Marrit +Veronika +Pridhoko +prowl +Sitting +Tickets +Treanor +Vertigan +Waterworth +Piggott +INC +Dikshit +NCP +Default +alliances +Jieyou +Zheng +Carnivals +Iztapalapa +reappeared +charros +wander +palegande +sant +Vithoba +Vijayanagara +Samadhi +solkhambi +Vitthal +Ellwangen +smith +thyristor +Technische +Compatibility +Axial +freewheeling +ruggedness +reliability +Uwe +Scheuermann +Oscillations +Achievements +Cosmology +Makate +hana +Gendai +Encouragement +Okinawan +Chanchara +Nukemairu +Rie +Eriko +Renka +Nakajima +Kaoruko +Oranda +Saikaku +Ihara +Nakayama +Dazzling +Aoi +Suggestions +bothan +mattresses +blankets +Elsick +Lairig +Leacach +improver +Horwood +Attingham +walled +Fionnghuala +Hellers +Darkover +Hilt +Dear +ballads +intruders +constrain +Haarlemmermeer +Slug +Ginger +Astaire +revue +Langdon +genie +Winnup +banjoist +Puffo +Clown +gruff +Toyland +ersatz +Vega +helpless +arousing +Lava +Isileli +Ofa +Abdulaziz +Nyako +Olusegun +Obasanjo +CNM +polity +irrespective +dues +Uchenna +Ahamad +Gubernatorial +Senatorial +Abiodun +Olasupo +Adeyemi +Adepoju +Ibarapa +Olusunbo +Oluyole +Lam +Adedapo +Akintola +Taiwo +transcend +Designers +rhythmics +Aphex +contemplative +TVT +Tri +rummage +creeps +hypnotically +cheesy +shit +unexciting +brutish +wring +vestigial +sonic +IDM +Marche +remount +Riccione +Massimiliano +rescinds +Sauro +Anconitana +phoenix +Terza +Stadio +percussive +Blackness +Adapted +hennotannic +hyacinth +tattoo +Lawsonia +Inermis +biodegradeable +chlorophyll +Strecker +naphthoquinones +methyl +Impatiens +balsamina +Cazenovia +Binghamton +Bixaceae +tetraporum +Chacoan +gillivraei +planchonii +vitifolium +Fallen +LiveJournal +felony +Kung +resellers +Gifts +trestletrees +crosstrees +backstay +foremast +hoisting +mourn +jibs +Jiading +Gongqing +Yangpu +Zhouhai +Rapahamae +Vicariate +fom +Kableshkovo +Chernoochene +Chatak +Uma +Bhende +grocer +Shankaranna +vet +Narya +benefitted +suitor +Lalita +Nilu +Bal +Karve +Shrikant +playerbase +Teasing +betas +DLCs +hitscan +personalization +Operator +personalized +loadouts +upgradable +multipliers +buffs +modifiers +craftable +Quads +scavenge +collapses +spawn +Cymbal +Fortnite +Modes +playstyle +respawns +Donnie +trillionaire +botched +enslaving +Mirela +Bonham +werewolves +Delacroix +Necalli +Stanton +apparition +cavern +hallucinate +gladiators +Snapping +demigod +horde +Scepter +Ra +steed +Dempsey +Takeo +Belinski +Shadowman +Mechanism +cryogenically +weakens +omnipotent +teleport +Zavoyski +Gersh +sucking +unleash +sucked +zombified +interacted +shard +APD +Pyramid +Pernell +teleported +Ludvig +devours +cryopods +Briarton +Marlton +Stuhlinger +Russman +powering +Agarthan +Kotaku +playtesting +Rockstar +Blizzard +Firing +COD +Nebulium +lootboxes +Contraband +Mauro +penciller +Polygon +customizable +unfavorably +monetization +Orders +inciting +buckling +inescapably +underperforming +Doubly +Arroyo +Ulf +Tyringe +SoSS +Eishockey +Rosenheim +Algotssons +Lundqvist +goalie +quarterfinal +Salo +grandeur +Levski +Hoffenheim +kurash +TASSR +sambo +Ulyanovsk +Tatarstan +RSFSR +Federative +Naberezhnye +Yerevan +Ulaanbaatar +Alushta +Termez +Khorramabad +FILA +Jakarta +Chala +Dakki +Girrom +salutation +Tokta +Bekar +Bengshan +Budgeted +Jang +lobbyist +TNmS +Barons +Schoonhoven +Erard +luxurious +Balat +noblemen +Chateau +Ter +Elst +Eois +mediogrisea +songbird +Mailee +Thaatch +pulchriceps +Boletaceae +spaceships +scatters +uncloaked +Fictional +Romulan +invisibility +spatially +frosted +coatings +camouflaging +bandwidths +RCS +Metascreen +micrometre +polycarbonate +micrometer +Choi +macroscopic +occlude +cloak +acoustics +Chiswick +Smash +Hepworth +timidly +Bedchamber +colonelcy +Dragoons +Dragoon +Ligon +bests +regressed +Butch +Breda +Caldwell +composting +vegetative +greywater +filtration +ecotourism +Lyly +biodiversity +Epinephelus +Roseate +terna +Robber +Repatriation +Rielly +Darling +Scarp +WA +McAuley +Fitness +Wandju +mesoregion +Horizonte +microregion +auriferous +Morro +Emancipated +Ferreira +Doce +mansions +Peixe +Rudraprayag +Madhyamaheshwar +Madhmaheshwar +Tungnath +Deoria +Anirudh +solemnized +Mandhata +penances +Utsav +Doli +Omkareshwar +Goddesses +Guptkashi +Akashvani +Miroslava +festschrift +memorialised +Almut +Hintze +catalogued +institutionalized +entitling +selectmen +oblige +Eisstadion +Schwenninger +Barbro +orienteering +Arja +Rabe +Annichen +Sundsvall +deserters +moratorium +Hamelin +Mewan +Imerys +Fowey +Par +Newquay +Refinery +Briton +Truro +Hennah +DL +Asquith +Bonar +Beatrix +Rosamond +Minsterley +Viscountess +Stattholdere +coadjutor +Bishopric +Margrethe +Baronesse +Vordingborg +Gyldenlove +Iver +Krabbe +Sehested +effecting +Peder +Griffenfeld +Charlottenborg +Grubbe +Margrete +Knyphausen +legitimated +Delmenhorst +Frederikke +Augustenborg +diprosopus +breeder +noses +staring +Armelle +deLaforcade +Owner +temperament +catapulta +arrows +Iyaz +Stryder +Tangled +RedOne +Runners +Kimondiu +Deena +victor +Galen +Rupp +Michalek +Rutto +Berhane +Adere +Liliya +Shobukhova +EPO +Symposia +FCT +Rent +Liqeni +Mechanicville +pheasants +admiring +Mengel +Josselyn +Miksch +figuring +Thunderbolt +Bf +Upupa +Epops +Ornithological +Ayle +Arbor +cardueline +Feduccia +feather +Carrol +birders +skyscrapers +falcons +analyse +pastimes +Lehtimaja +Aale +antisemites +Screenwriting +Anymore +Comment +regularise +Inte +Dun +shading +Cocks +Cinnamons +Inos +deepens +Greys +Opaline +lightens +Lacewing +aviaries +beginner +intermated +Dervan +Bulletin +normals +aviculture +Herr +matings +autosomal +fe +alleles +WPXZ +watt +Transition +GIPS +Utriculofera +aplaga +Rossel +Louisiade +Mahipathi +Madhwacharya +Hubli +Nimbal +Bijapur +egis +ophthalmology +Prashasti +FIE +Dedeaux +Outfielders +brokerage +Umweltbundesamt +Staatsrat +administral +Umweltsenator +REpower +Innogy +Wildtier +kalte +warum +Klimakatastrophe +nicht +Nir +Svensmark +Bauyrzhan +Orazgaliyev +Yogeshwar +Dutt +Krnovo +Golija +karstic +Leighfield +Jonah +Doman +FRBS +carver +Selina +Bust +Highfields +Putney +floodlit +miniaturised +miniatures +figurine +Arrecife +Hajjiabad +Hoseynabad +Anar +speedway +Miotello +Calumet +PCHA +francophone +underwrote +Athletique +Muzzy +Wejherowo +Wynford +Warminster +Prebendary +Ivor +Felsted +Lanarkshire +Roch +Lydstep +Torben +kommunalreform +Kommunalreformen +Midtjylland +Nuno +Universitatea +Pandurii +countrymen +Tosno +Avangard +Rui +Zizyphia +cleodorella +Yuki +Kaifu +Muroto +Sanoma +bolanderiCicuta +curtissiiCicuta +mexicanaCicuta +occidentalis +maculata +rhizomatous +leaflet +umbel +tremors +retrograde +Ingestion +Marlin +Reggimento +Fanteria +Catania +Mechanized +Logistic +Commissariat +Maneuver +IFVs +LR +Vere +Balmandir +camelliae +Allsvenskan +Sandvikens +Suelto +Heraclio +Zulia +Gualberto +Ibarreto +muchacho +por +lleva +entre +sus +cachos +hijo +Lucifer +varios +nombres +Carora +Coge +cruz +palma +agarra +agua +pongas +lloriquear +hacer +porque +asustar +hacerte +vayas +pensar +ron +tabaco +gusta +ese +suelto +puede +Literally +collocation +Hanfeizi +Daoism +disciplined +perfecting +forgetting +oneself +worldly +Rational +meditate +Idealistic +sagehood +emptiness +veneration +serenity +unaffected +cleanses +spirituality +dictum +Choose +guiding +rejuvenate +meditations +compassion +manifesting +abiding +Lebor +fairies +conflated +theonym +Paps +breasts +Danu +Sanas +collegiately +Terre +Sycamores +Prebaetic +Vuelta +Chava +Castalla +Molds +tumulus +charred +Cozens +Minchenden +Sutton +Hoo +coinage +Anastasius +Bunford +Concept +Pageant +Jurie +Chin +carefree +Roaring +Ransom +Choreography +tuneful +Paddick +Farjeon +Codron +Barlow +Grimaldi +Mdm +Hibbert +Helda +Dubonett +Waring +Mirvish +Albery +Quayle +Disley +Gemma +Dann +Rutland +Eyebrow +Rehearsals +Dilys +Millicent +Moyna +orchestrator +jazzman +Schirmer +Beauvais +Paxton +Secombe +Sag +Goodspeed +Bethe +Carlin +Eshelman +Faugno +Darcy +Pulliam +Barnhardt +Briedis +Otterson +Krysta +Santagata +Souhrada +Kirsten +Caprice +errand +mannered +domineering +unison +scolds +flirty +Busby +Schlesinger +Neon +Andover +Imperioli +Tensions +Rusty +rolls +hobbles +Nudged +embarrassed +Beretta +Giubileo +Fegoli +Fegolis +commending +Rate +Pandour +pierced +provisioning +asleep +cutlass +Galatea +awholehearted +electroclash +genba +Taiichi +shopfloor +Wandering +instinctively +Nastos +crowning +atyid +omnivorous +sympatric +interpreting +Melrich +ethnological +cinchona +Arranged +alphabetically +Malton +Baines +locating +liverwort +Hooker +Russel +trekked +Trombetas +Joas +Solomonic +Zerobabel +Merrit +Gugsa +Kenyazmach +Akli +interregnum +Narbir +Bhartiya +Badshahpur +Rewari +Mohar +Mahavir +Jatusana +Sohna +portfolios +Cooperation +Stationery +Gurgaon +OPEL +Synchronizer +Carryover +Nm +synchronizer +Andresen +Heddal +Johannessen +Notodden +Storting +Saunemin +Collinwood +Chet +enthralled +Zelodec +Trolli +WJAY +Joliet +hangout +frostbite +Lombardo +Vinton +Vaclav +Romy +Loeffelmacher +Whoopee +Duchow +NARAS +jokingly +Tick +Tock +Wolnik +Modeled +juried +calyptraeaformis +trochiformis +Pig +witchcraft +befriends +Berta +cantilever +unreliability +Cheju +enlightening +diease +WDA +Eugoa +africana +Junkerngasse +Abeille +patricians +Maverick +Loizides +Helm +Samaha +Polyvalent +sapote +pumpkin +curvicada +epicarp +hamata +hatches +carbohydrate +ludens +Braconidae +Ichneumonidae +Diachasmimorpha +longicaudata +Doryctobracon +crawfordi +Compendium +morphotype +Sagrado +backyard +tehuacana +mojave +Euphorbia +molecularly +synonymize +curvicauda +littoralis +picciola +proseni +recurcauda +nigrina +latus +distinguishable +Immature +antennal +Cephalopharyngeal +immature +synapomorphies +Morphological +dichotomous +Genus +Republica +Cooder +Dire +Montreux +gigs +Kracker +Cockroach +amp +Traveled +Inconvenient +Joplin +Provogue +Innerevolution +Steamroller +Crossroads +Waxman +Influence +sunburst +Klon +fuzz +pedals +Axana +Clavus +dolichurus +gastropoda +figs +Biologically +Architectures +DARPA +embodied +transdisciplinary +Schenley +outplayed +telegraphed +Bankers +Joining +Sixsmith +Patriotic +Komarov +janitor +skim +Carey +recruiter +flashbacks +Anatoli +Grishin +Turkin +bureaucrat +derailing +endgame +personified +ultranationalism +royals +Fist +banker +Swayze +Polkovnikovo +Kosikhinsky +Cemex +Reach +safeguarded +Redpath +Bottle +Christies +Diagram +turntables +overbridge +Fig +railhead +boulders +Caldon +conveyors +Conveyors +Luckie +Merritts +Ragsdale +Remus +Activist +Tommie +Alpharetta +precast +Concrete +splayed +planter +ignited +vehicular +Sydneysiders +upmarket +Darlingurst +Watsons +Speakman +Aynho +Standen +Grinstead +furnishing +Faulkner +Gables +ironfounder +Rounton +Barns +Briarmead +Linton +Gimson +Shikanosuke +Shikasuke +Amako +Amago +Akechi +Mitsuhide +Hashiba +Hideyoshi +Ukita +Kikkawa +Motoharu +Mariposa +Controller +Mogul +Redfish +Wabanaki +Falmouth +Acadia +carcasses +Aukpaque +retribution +Hurons +Bastide +circumstance +Marcenaro +Sepidan +Banesh +Hezar +vibration +diametric +Grodna +Rurikid +Yaroslav +borderlands +neighboured +uprisings +Grunwald +powiat +Sejms +Poniatowski +Antoni +Bisping +Uprising +Bolshevist +Ober +voivodship +NKVD +nationality +Belarusians +Mordehai +Yaffe +Cossack +yeshiva +Slovodka +Judenrat +deportations +looted +Jew +Zamkova +Lasosna +Yurysdyka +Kupala +Scotto +sumptuous +Kalozha +Gleb +Ruthenian +faceted +tint +frescos +pitchers +Probably +rocked +Bernardine +Basilian +Bridgettine +extravaganza +curiously +Nyoman +FHC +Ritm +middleschools +Grodnenshchina +Hamnett +Wilkinsburg +Furthest +Krambatangi +Smyril +Silo +Saltsilo +Hassels +IDR +neogothic +Lime +Nirvana +Influences +Diid +Oz +Indolent +EPs +Winton +Blur +Parklife +BPI +Pleased +lacklustre +Hodder +Stoughton +interviewee +Invincible +Honey +Bizarro +Parlophone +Dubstar +Vitriol +tracklisting +MacLure +Crouch +Shaped +Dodgy +Kieron +Lamacq +Frontwoman +TFI +manna +glamorous +pejorative +drab +amused +Fronted +Armadillo +Sanctioning +Supernationals +Height +sponsorships +Invitationals +Encinas +Jumpers +Freestylers +PURSE +quietest +Veltman +Summernationals +ins +collarbone +Actually +chill +NBmxA +Racer +leach +warp +phylogenetics +Kohanim +Judaic +unpalatable +Lactose +appreciable +discomforts +dairying +LBK +Shennan +behavioural +Heifer +Midstream +Sullivant +Wondrous +Pics +Doddle +ribFIG +Satomi +Flixton +Ramsbottom +Achyronas +Liopetriou +Romford +enameled +weatherproof +Boiled +weatherproofing +sienna +umber +tenter +Overlaps +amalgamate +dusted +pumice +Seams +unsatisfactory +overcoats +inevitable +flanneled +Chicana +Chicano +Akin +Gump +Strauss +Hauer +visioning +councilmember +Caucus +Resilience +Affirmatively +Furthering +Unlikely +Waking +renegotiated +protestors +marshal +Keisling +decried +reinstatement +decriminalize +Ratti +Esao +pyrams +protoconch +helicoid +teleoconch +intercostal +incised +sutures +Simurq +Absheron +Gjoko +Flavour +nightlife +unfussy +Awarded +Northants +Bedfordshire +Wellingborough +Manga +blitz +Chaparro +Viktoras +myliu +vien +Lithuanians +Lietuviska +Amerika +fortius +Macrambyx +suturalis +Donaustadt +extinguished +Terhi +Eveliina +RoKi +Naiset +Naisten +ZSC +Zurich +Leistungsklasse +Kenton +Dinia +subapicalis +Nagano +Matsumoto +Meisei +Monon +COSCO +Fawn +Annandale +shredding +orchestrating +Sugermans +Dolph +speciation +Gasterosteus +Guelph +Adaptive +Ricklefs +Paralar +Neaty +Murrey +Harding +Technosonic +Lindgren +Sweetie +quatrain +syllables +stanzas +rhyme +couplets +Wingspan +Hawker +forend +handstops +Handguards +crossguard +MOA +Barrel +propped +bipod +KeyMod +Presented +Gathering +Productivity +formalization +SMED +Sayings +Strategies +Continuous +Improving +Trams +Malvern +Showgrounds +Prahran +Orrong +Tramways +Glenferrie +MMTB +Ghrib +Borj +mosaics +Abdelmajid +frigidarium +affirmation +Snapdragon +Adreno +Unicom +casing +SIM +microUSB +TFT +capacitive +touchscreen +MIUI +Updates +fixes +updater +manifests +Uses +Told +AFP +Cantata +Philam +Shang +Quan +Enoch +CITA +AdvancED +NCA +CASI +boundless +dextrous +Lexington +enlistments +enlist +Cotesworth +Pinckney +Trevett +Marietta +accomplishment +Mound +reburied +eulogized +oration +worthies +avail +manly +virtues +yon +corse +steepy +bosom +recollection +melancholy +aboriginal +solemn +dirge +gloomy +nodding +Metheny +Claudia +Espinoza +Cia +Skylark +Almazan +Nectar +Sunnyside +Shai +Bashiri +Jody +Sachal +Find +Shine +Gina +Schwarz +Pannonica +Porgy +Bess +Chinen +agile +discographer +Tarakan +Leeuwin +repatriation +Impressionism +Bolotowsky +Ugo +Giannini +Dealers +Munier +Tefana +Hakka +Samine +compatriot +Efe +Cirsonella +consobrina +mollusc +FLSA +codifying +exempts +meal +Tempore +Mullin +Rendon +Wolk +Vidak +Roth +phasing +Supporters +exclusionary +vetoed +coauthors +clarification +Privacy +Bilateral +Clearwater +Kinsmen +Crassiclava +Shasky +Crassispira +Vilan +Malans +Taschinas +Elliniki +Republics +historiographic +ND +centrists +bipartisanship +capitalise +coloratura +Seebach +Gabriele +Conradin +Nachtlager +Hessisches +Staatstheater +Wiesbaden +Amadeus +baton +Konstanze +Gaetano +fille +Inez +Ernestine +Ernesta +censuses +interpolated +literate +lynched +Foxes +drape +stead +missy +McInnes +senatorial +Sayre +Mafinga +Iringa +Neckinger +prettiest +Chamfered +jamb +Bridewain +Walton +Retracing +Stary +Zbrachlin +southbridge +ULI +Wehrmacht +Twelver +Nikah +Quranic +zawj +aqd +shaadi +biye +harems +remarry +allege +nuptial +enjoin +forsake +annoyance +mujbir +Imam +celebratory +precede +asserts +companionship +channeling +mistreating +mustahabb +reminded +reminds +immorality +bully +propter +righteous +judicially +lessened +Hereafter +dowries +disbelieving +mujtahid +Hanafi +puberty +Hurayrah +Messenger +coerce +jurists +Prohibited +suckled +Surely +Merciful +Distinction +Ahadith +confirm +fosterage +Islahi +beckon +beckons +Unbelievers +unjust +monogamy +Polyandry +Sororal +polygyny +cohabitation +contestation +reaffirmation +Divorced +ISPU +childrearing +Salamet +Chaka +Bekchan +Qira +Hotan +Outlaws +Sprint +Waltrip +Staropoli +McAnally +NAPA +Camping +Keselowski +ThorSport +Eldora +Roval +Vogler +Reddit +Ineligible +Ceromitia +Banyo +Adamawa +Fula +Fakultet +za +vaspitanje +Univerziteta +Crne +Cantharidus +Thalotia +interstice +lamellose +Cucullia +orbicular +reniform +dingy +absinthium +consultative +clerics +protofeudalism +Leiria +ratify +assent +convening +burgher +punishing +aggravamentos +artigos +capitulos +prerogative +promulgate +circumvented +bourgeoisie +Infante +eassembled +inheirited +Coimbra +Guzman +Absolute +Gerais +Portuguesa +legalize +Volstead +Throngs +Invasions +nitroglycerin +DuPont +Margaretta +officiated +Lammont +Carolene +Stollenwerck +Durant +Polk +widower +Auchincloss +Rutherfurd +Beene +Celestine +Emu +merienda +dipped +kneaded +breads +pinagong +slang +Euroregion +Trento +wineries +Cavit +drinkers +Teroldego +Rotaliano +Laghi +Vino +DOCs +Constrained +Input +Format +decoder +subroutines +Knitro +CUTE +AMPL +SIF +MPS +QPS +Netlib +Maros +Meszaros +Hock +Schittkowski +Dembo +UNIX +Hanselman +Genesco +bobsledder +FIBT +Willenbucher +Pascagoula +retrofit +canister +Harpoon +qualifications +Nose +refurbish +berthing +deployments +Underway +Spending +sortied +Missile +SOUTHERN +antisubmarine +Mayport +upkeep +Transiting +chopping +crewmembers +Spruance +Historically +Longdendale +seeping +Brownhill +DAB +Emley +Moorside +clears +Harriers +Cartworth +Crowden +Bareholme +Laddow +Holmbridge +Woodhead +cols +Bjarne +Blel +Kadri +Shahrom +Tajik +Istiklol +Samiyev +tilt +paraphrased +entitles +Nonconformist +exhorts +solemnities +Levites +predawn +caretakers +extrapolates +congregants +Midrash +Tehillim +Yochanan +Birkat +Hamazon +Simeon +ben +Pazzi +Kohen +Zohar +Blessing +pronouncing +handwashing +levitical +Levite +firstborn +Sukkot +Hagadol +Siddur +Avodas +Verses +netilat +Arlo +Duba +Lassus +motet +Psalmes +Ise +Mie +alternatively +Kameshima +Kajima +Shinto +Yatsushiro +Kofun +Muromachi +Mishima +Mahratta +Ruler +Hyder +ascendancy +Kilmer +kiosk +RUEP +bikers +NJ +fearsome +wrathful +Kartik +Amavasyant +Margashirsha +Purnimant +Kalashtami +ashtami +Dandapani +Danda +Swaswa +Trimurti +slighted +ganor +Instilled +Brahmahatya +expiate +beggar +expiated +vigil +arati +Shaivas +libations +oblations +vahana +curds +holier +weekdays +devotee +Bhairav +Vaishno +Ashta +Kala +garland +middleweight +losers +Junichiro +Koizumi +Pseudoechthistatus +granulatus +Honored +Worker +Valerij +Bandmaster +evaporite +epochs +Geestinsel +Schauenburg +Holstein +Hartwig +Reventlow +reincorporated +reversion +bats +septentrionis +inconsiderable +Visits +leached +Archidioecesis +Erzbistum +Electorate +Reichsdeputationshauptschluss +secularized +ecclesial +Aachen +capitular +Caspars +circumscription +amounting +archdiocesan +Regionalliga +Hofstetter +newgrass +VSV +goldmine +Equalisation +duodenal +Requiem +Nudgee +parliamentarian +Corneliabai +Accountant +Wardha +Pranab +NKP +Betul +Privileges +Indira +Rajiv +Narasimha +Nirman +Thereafter +Harish +Forman +ESRA +synergize +Tashtagolsky +Kemerovo +Coclois +Aube +disbandment +synthpop +Silly +Gillies +Tender +Staring +Lichen +carnation +disjunct +southeasternmost +glaucous +purport +noxious +lovelorn +Propagation +deadheading +Fusarium +Wilt +droop +revisited +Saponins +Potted +closeup +bud +halo +tinge +husbandry +Mukhtasar +spices +flax +wheat +almanacs +catalyse +Abbadid +Windpumps +Fayyum +Guadalquivir +Cordoba +Saruq +Khazar +dwell +abounding +silkworm +copiously +plentiful +splendour +Goths +Crops +mechanization +Dorn +centralisation +archaeozoologist +Moslems +pomegranates +Arabist +Paulina +Fatimids +mango +shaddock +unimportant +detracting +Eliyahu +Ashtor +Campopiano +durum +sorghum +commonplace +Sassanid +cultivators +watermills +Sonderweg +millennium +Calling +Squatrini +archaeobotany +Cosmopterix +floridanella +Jamaica +Labial +annulated +tegulae +Femora +foreleg +tubercular +indistinctly +hindwing +SFSR +Trud +Trybunalski +FIDE +Tournaments +FLS +FRESB +aphid +Interpreting +professorships +Entomological +quarrying +ESC +Reuter +Yehudi +Menuhin +Chas +Graaf +Annexe +Starving +DGM +spinoff +improvisations +Shed +Soundways +bodhran +ZAUM +Nasty +Springsteen +faat +jai +Breakfast +Cantopop +Mandopop +Eason +Leehom +Stephy +Vanness +Kwan +Belas +Gyeongnam +Ulsan +Hyundai +Norge +Sola +Reiten +Helicopter +Royall +Draugen +Ormen +Kristiansund +congregational +timbered +Chah +Margan +Hirmand +Sistan +Baluchestan +Shape +awkwardness +sprawl +anew +uncommonness +individuality +discontents +richly +Sadoski +Perabo +naturalness +delicacy +belies +Kieran +Mathieu +Bibb +Cookie +Wired +flavored +almond +griddles +cookies +machinist +Goodloe +Strader +Fosdick +Bonner +Hoagland +Azealia +Okayplayer +Remixed +Sinatra +VP +Scarnecchia +Heisman +Kostantyn +Heorhiovych +Merited +bandurists +Andriy +Omelchenko +Kobza +bandurist +Bandurist +obstetric +gynaecological +interventional +echocardiograms +newborns +luminary +Toshiba +Aquilion +scanner +firsts +pancreatic +endocrinology +transplants +accommodates +acutely +nontuberculous +avium +leprae +Disorder +Consultation +RANZCP +Consultant +psychiatrists +irreversible +referrals +Neonatal +Dependency +sepsis +Translational +interchangeability +acommodate +Spacelab +Drawer +Middeck +laptop +adapters +drogue +NDS +berthed +CBM +Gasman +Karian +Minab +Pterygium +unguis +scarring +nailfold +lichen +sarcoidosis +commercialize +plating +solder +pastes +alloys +epoxies +complexion +Melanocytes +pheomelanin +eumelanin +oxidative +Melanogenesis +melanogenesis +sunlamps +dermatologist +Overexposure +mutate +impair +triples +melanoma +carcinogenic +activators +psoralen +Tanning +pallid +sunbonnets +parasols +lightening +Finsen +lupus +vulgaris +leisured +Coco +Chanel +sunburnt +Tanned +longing +Parisians +idolized +Patou +capitalized +ailment +bikini +undesirable +cocker +spaniel +tugging +reflectors +FDA +Mattel +Malibu +lotions +perforated +swimwear +stainers +dihydroxyacetone +Spray +sprayed +Traveler +BRAVO +Hamm +Vergara +TVtropolis +Instant +Brides +TLC +Giving +Thicke +McAllister +Tres +Sycamore +boarder +Centrale +aerodynamics +facelifting +Renaults +prototyping +Giorgetto +Coggiola +Trenchless +trenchless +Osar +Callar +Ipecac +Zapopan +Hormone +ANTEMASQUE +Rainbows +ORLG +Mobil +Classified +Positional +Mancala +Reicher +McNeill +Brokenshire +bucking +eke +redistribute +notional +Xinpongnaobao +Zwanenberg +Aliments +luncheon +Spam +Aaliyah +Alleyne +congressman +moorhens +roosting +garter +Upland +mallards +curly +Rodent +invertebrate +tadpole +clam +conservancy +pepperweed +Grassland +burrowing +Riparian +Nesting +Sinks +assemblage +pheasant +dove +wildflower +grassland +Umbrella +legumes +burr +thatch +forbs +wildflowers +Wilms +Theampall +Supplementing +emplacements +QF +Brigades +unauthorised +presumptive +unpledged +leaned +Leeland +strapped +Diebold +disparities +Vote +miscounting +Quin +Retinal +foveas +occipital +Abnormal +Anomalous +Nedbank +Brand +Crescenta +Stacey +videotaped +batons +Ventura +Lench +Gorillas +Edinboro +Bleuler +psychoanalysis +Berliner +Psychoanalysis +Psychoanalytical +Alix +psychosexual +fixated +enemas +infantile +masturbation +disinclination +passivity +obsessional +neurosis +melancholic +prooedipal +oedipal +vicissitudes +psychotic +neurotic +dementia +praecox +hysteria +Pharaoh +Amenhotep +Olde +Bruford +Hurstmere +Chislehurst +hops +farmbuildings +oast +Rhinoprora +palpata +Palpi +tufts +irrorations +Forewings +speckled +specks +crenulate +Cilia +detainment +Internment +Kristine +Lamberg +Kafele +Petition +Certiorari +habeas +unclassified +CSRT +Detainees +Jed +Rakoff +redacted +repatriated +awaited +interrogators +Jenifer +Fenton +DIA +Paksi +VVER +feedwater +Fuel +wholesaler +Villamos +Reactor +replaceable +hovered +Ganz +Alstom +Forints +Varga +Kiriyenko +winched +spilling +criticality +moderating +boric +Ammonia +hydrazine +Framatome +Neutron +Detector +Malfunctions +Abtei +Trappists +Blessed +Bottenbroich +Oelenberg +Kulturkampf +abbeys +bookshop +liqueur +Luceat +vestra +Feast +Vezey +Cranleigh +Leicestershire +Diener +upraised +Inscribed +unsigned +Cattedrale +Duomo +archiepiscopal +Bevignate +lozenges +Arezzo +Loggia +Montone +Fioravante +Fioravanti +Pietra +Giustizia +repositioned +Galeazzo +pulpit +Cosmatesque +Crucifix +Polidoro +Ciburri +Carattoli +Chiesa +aisles +Baglioni +Urbano +Chiusi +Pinturicchio +reliquary +Bino +Cesarino +masterworks +Agostino +Duccio +intarsia +Giuliano +Maiano +Domenico +Tasso +Baglione +Sacrament +Pentecost +Nebbia +Baptistery +Giannicola +Deposition +Barocci +sacristy +frescoed +Pandolfi +architectonic +Pisano +Meo +Vanni +Agnolo +Dottorato +emperors +Own +Lowry +Unexpected +chaperone +luckily +endocrinologist +Eastbay +circumcised +uncircumcised +Ilona +Prokop +Tulln +Liese +Administratively +Gino +Lindbergh +summits +Bonington +Cortauld +Kangerlussuaq +doomsday +Mythology +whims +falters +cloning +SETI +Arecibo +unhinged +Duane +assassinating +theirs +Contacting +Hosteen +nursed +Paperclip +infiltrates +operative +guise +silo +canary +Covarrubias +Tunguska +gulag +dissuaded +assassin +mutilating +decoy +gestate +nanorobot +debilitating +incinerate +rubbings +desperately +taskforce +impregnated +Fully +Reluctantly +Chloramine +Kersh +Follmer +disfigured +indestructible +Rohrer +looms +depopulate +explodes +thread +pretentious +UFOs +reappear +relaunch +extinctions +Rabwin +reacted +Annabeth +masterminds +mytharc +inscrutable +fizzled +Deans +Sordi +Geek +groundwork +VanDerWerff +Sauter +coherent +Cressey +derided +hiliades +Leivaditis +Mimis +Dinos +Kaiti +Mer +Leivadiris +Prodromos +Meravidis +Menelaos +Gabri +Auri +Auru +Dobele +desegregating +Lovecraft +WTBO +sonorous +luminaries +aesthetically +smoker +Berklee +Magma +Aqune +crumbles +Portia +dressings +Baranya +Borrowed +nightmarish +rampant +strategist +Theocracy +Erring +pessimistic +statistic +Dictator +Darwinian +prophecies +Scopes +predictor +predicts +Kargil +megawatt +constructions +megawatts +Universitesi +Tuzla +Anatolian +Zaheer +Fencepost +Yahzee +NYPD +Slipknot +Squanto +Hostiles +Juanita +Saulteaux +Farley +Mowat +Touched +Jury +codetalkers +Marines +JAG +Skinwalkers +shaman +Knee +Fond +Lac +Henrickson +Favreau +Backstrom +putter +Panchkula +Devlen +Hindaun +Karauli +Fazza +FAZZA +Medals +Put +Discus +Throw +Goddess +cricketing +Crimble +offbreak +Tendulkar +Talkin +panelist +decriminalised +Khoikhoi +Khoisan +Afrikaner +recipes +teas +Longitudinal +Interpol +Voorman +sapped +Coolie +Consolidation +Coolies +storekeepers +prejudices +Excises +Chanock +Dangerous +Tyen +Weeds +occupant +Concern +Substances +unjustifiably +infringed +Narcotics +aerially +paraquat +posing +Amapondo +SAPS +Iqela +legalisation +Stobbs +ingest +Marijuana +Inkatha +legalise +unproven +criminalisation +legalising +Medicines +reclassify +Couple +spasticity +disallowing +infringement +transgression +chairperson +Bayever +Zondo +Soheuksando +Namhae +Gageodo +meteorologically +Ethanol +overrun +Rajat +Kanta +catastrophe +depleting +impoverishing +Poppy +Ferguson +imperialism +impeachment +famines +fatality +urbanised +commercialised +buoyant +deindustrialized +stagnated +spinners +Prasannan +Parthasarathi +protectionist +Shireen +Moosvi +handicrafts +Factories +imparted +Jamsetji +spin +Dorabji +Jamshedpur +Haridwar +Chintadripet +Dalhousie +Bori +Bunder +mileage +Headrick +budgeting +jute +meteoric +Jah +thallose +tubular +disaggregated +phytodebris +Jonker +cuticular +Niklas +Smocovitis +Strother +formalise +nematoclasts +epidermal +waxy +parenchymatous +stringy +Caradoc +cuticles +Downtonian +Maughams +lowermost +Cooksonia +Steganotheca +Whitcliffe +lichenous +lichens +coalified +biomineralization +Butterfield +charcoalified +taenia +lobata +Laevitubulus +Shenyang +commuting +priciest +snobs +Nedre +Brickellia +huahuapana +Mambiche +barrio +zec +chasse +Missionaries +Chauncy +Maples +Nkhoma +Mlanda +Kamuzu +Roppongi +TCV +Millenia +trance +bundled +MATRIX +NIGHT +TRANCE +Techno +Hamasaki +LAST +FINAL +wallscreens +CNRS +solids +physicists +Chirac +Sherbrooke +lubricates +rheological +roomier +annulus +Bentonite +Barite +Salinity +derrick +driller +overlapped +subsurface +logger +checks +cracks +seep +wellbore +YP +SWACO +Schlumberger +Baroid +Oilfield +Weatherford +Waste +legislated +Palouse +decommission +unbuilt +Naked +dioecious +decasperma +prostrate +rachis +dasyneura +Bernal +Aphandra +natalia +anxious +Stedman +Navarre +Babbington +plume +oriflamme +spoiled +swish +serialization +Marston +dash +farcical +Burnham +Denys +Bookman +headstrong +undisciplined +dissolute +spendthrift +mildest +terrorising +madcap +cobwebbed +highwayman +robs +bigness +phenomenally +recognizably +slackens +Bash +Presidio +Stonehills +Wormwood +freehold +Clubmark +Ludorum +Karaoke +Matchplay +Floodlighting +competitively +Tedo +Didube +Pantheon +Vincentian +Latham +Duraisamy +Lourdusamy +Somalo +Endures +Ratzinger +clamping +mired +Plawen +Goutchev +Salazar +outpointed +Dropped +Rocked +uppercut +Ruelas +Munoz +boxers +Jukabuwski +Terronn +Millett +overhand +scorecards +Branco +Dorin +Doroftei +Damgaard +IBA +McGirt +brawls +punchers +Acelino +pathologist +bruises +exhume +Coroner +toxicology +quipped +asphyxia +toxicologist +Bianca +giallo +Commenge +Cognard +Blade +Sitges +pleasures +visceral +flopping +nervelessly +relentlessly +Parliaments +Goulburn +Strathmerton +NSWGR +Tocumwal +Suitable +Reader +Rooster +Eben +clustered +Sigmundsson +Brynjar +Veigar +Lengjubikarinn +Haukar +Valur +Leiknir +glimmer +equalised +parentheses +Maxida +Annika +Seducer +Gaffa +Melodifestivalen +Lira +Garmarna +Elin +Theoi +Athenians +Arsinoe +Sibling +incestuous +Sotades +leaden +Woodthorpe +viceregal +Launey +geographer +epigraphical +strategos +Itanus +proxenia +Callicrates +emends +emendation +epistates +Hieron +Methana +phiale +panhellenic +excusing +Hauben +Patroklos +Koroni +deme +Ilioupoli +Sounion +harassed +attests +safeguard +Isthmus +beleaguered +Gonatas +hesitated +thalassocracy +Mongi +Landhili +Lyz +Glamour +orthodoxies +lesbian +Biden +Piedmont +Aosta +renounces +Omodeo +Changement +notre +transparence +hijabs +Cedent +Emond +CPUs +Zhaoxin +VIA +Chengdu +HMC +subcontracts +processor +Computex +Entity +hampers +EPYC +Waniphok +hustling +tantamount +ministered +pastored +enveloped +evangelistic +COOLJC +Barger +Emmanual +busloads +boomtown +bungalows +reinstate +homicidal +Darkness +ointment +torment +resigns +combatant +shikomizue +sensuality +madly +frustrations +belittle +criticize +Captured +futility +vassals +snuck +distrusts +underestimates +Koshiro +slashes +secrecy +shaved +unsatisfied +tomboyish +Tomboyish +Kunoichi +unapproachable +vomit +sleeveless +kimono +hakama +Daisy +perceptive +Jimushi +slices +grotesque +malformed +gangly +prehensile +secretes +fling +toying +glue +scythes +defiantly +spat +kouga +deformities +spit +Distracted +usurped +natured +ninjutsu +tricking +Yakushiji +revels +hesitate +bystanders +Himura +Genma +conceals +ravages +unbeknownst +purged +absolvement +ostracize +secrete +pores +squirt +mist +stab +whirlwinds +suck +kama +woodcarver +carve +ruthlessness +arrogance +disturbs +Loathsome +cowardly +Jingorou +viscous +tightest +ooze +dehydration +shrivels +immerse +Forcefully +melts +petite +drapes +defers +feminine +comically +tantrum +overwhelm +viper +unmasks +summoning +delusional +smile +simian +boisterous +infatuation +lethally +Draping +wrap +Boa +skewer +writhing +wildly +underestimating +defenseless +strangles +statured +temper +felling +unarmed +contort +grapple +threateningly +caresses +brash +garotte +alchemically +flay +stiffen +calves +cleaver +sheathe +twilight +hateful +pains +revoke +enacts +alcove +subordinates +entrusts +Aekuni +vowing +forgot +Tenkai +Tokugawas +rages +saintly +Ashina +Kenjutsu +reined +demons +godmother +redeeming +risking +abdicated +Oeyo +witted +Tadanaga +Dainagon +Onimusha +ghostly +elegantly +mustache +dislocate +Dreaming +illusions +Firefly +Katon +Lyre +Lasoo +Yasha +Itaru +Tsuibamu +Kazusanosuke +swordsman +Kokuuzou +Bosatsu +Adamantite +Tadgh +zebra +Fenagh +townlands +Gubroe +Killmacsherwell +Rossy +Dasmurt +Ahmadfedaleh +Sardasht +Dezful +Keller +Hutchison +Sowa +forecasting +Casewriter +Wachovia +OGE +INSEAD +Dogshit +Camille +Briefing +briefings +Finalists +BlueCamroo +Cirrus +Insight +Accenture +partake +Satisfaction +BPMonline +Crowdtap +Absinthe +Fx +Door +Bunny +Peep +Bexley +Kawartha +portage +cellars +Laidlaw +Continuation +Nipissing +Aros +Aylesbury +Oxfordshire +thegn +Giffard +Ashfold +Decorated +piscina +Sanctus +chalybeate +spa +Leamington +GWR +Halt +Chiltern +Neah +NPS +backpackers +Mihama +radiocarbon +Makahs +halibut +bows +knives +intertwine +homebrew +raycasting +Zombie +Udai +Hardoi +Dajipura +cordon +Ashoka +Yellin +Sylvania +Southview +Cougarettes +UArts +Lizbeth +Seagate +Fulbright +Biennale +hybridizing +rethink +Arapahoe +Sparta +Slavia +goalscorer +prolifically +underrated +Critic +righteously +sideman +immediacy +sax +saxophonists +Turrentine +spirituals +Idle +Acid +Cuscuna +McCarty +Epiphone +maximizing +Kidderminster +Jermaine +Grandison +youngster +Holops +Dehlvi +Nagore +Qadir +Hakeem +Jainul +Farsi +Qutbuz +Zaman +Abdus +Munazara +Usool +ul +Latheef +Darul +Uloom +Deoband +Peshawari +Reformers +Zameen +Attur +Ghouse +Thambi +Aalim +reformists +Khanqahey +Baqiyat +Tariqa +Qadiriyya +Sawlatiyya +Syllabus +Sunnah +Minded +Izhar +Izalathush +Izalathul +Poul +Preben +assertions +sleight +Paranormal +slander +reputable +feats +erratum +unintended +misinterpreting +malice +reckless +Kroeger +cyst +Trick +concurrently +Lausanne +multicolour +conceptual +watermark +Borden +vignettes +Idlout +Baffin +seiner +Polymer +dismal +BABN +printings +Karsh +virtuoso +Lunenburg +Hedley +Doty +Offset +complementing +counterfeits +Banknotes +wrappers +loonies +Acceptance +Withdrawal +tailing +Persecution +Baiguang +Expenses +injustices +perpetuated +Peng +Alimujiang +Gulmira +Khenpo +Karma +Lobsang +Dhondup +Tenzin +Delek +dissident +Guangcheng +Wirz +cotta +inset +flowered +Golema +Peyvand +Kitchener +Umbro +Lynxof +Lynx +Treble +Astros +Invaders +Ryerson +Beddow +MRCS +LRCP +Theosophical +gangsters +Scala +Naples +Hammerstein +Lunts +Lunt +Trock +Winterset +Cauchon +Dore +Thuggee +Gunga +Stands +Attila +Sophia +Hitchcock +Detectives +Houseboat +Vittoria +Cimitero +Seizure +epilepsy +Reuber +Karlsruher +Timo +BFC +Kessler +preseason +Wolfsburg +bernois +Minvilier +Orval +smelters +Moutier +Tavannes +Birs +Mont +Girod +Pontenet +Sorvilier +administratif +blazon +Argent +Gules +pales +widowers +cantonal +payers +agnostic +experimenter +conditioned +unconditioned +Pavlovian +SOP +counteraction +Ghirlands +Ibadullayev +inverted +Hines +nicks +Caribou +bypasses +Canaan +Puritan +Interregnum +Borrowing +entertainments +Weaver +Midsummer +Falstaff +Bouncing +vintner +gulled +Valour +Humorous +Stallion +Custom +Pharamond +prefigures +Beggars +Bampfylde +furriers +Animals +skinning +fleshing +Mammalogy +untrue +mummification +innards +dehydrate +taxidermic +mountings +taxidermied +recreations +chinchilla +sublimating +wolf +trimming +PETA +evokes +Imagine +Furniture +Alvar +Armchair +reindeer +Rozin +bitforms +faux +kinect +pliable +supple +delicacies +soilage +sterilized +Infestation +piles +frass +casings +Rodents +droppings +humidifier +dehumidifier +soften +Motes +Exposure +soiling +grease +altering +Wearing +Friction +Stitching +unraveling +rips +musculature +physicality +rhinoceros +varnished +naturelle +Provided +brushing +dislodge +Stiffer +muslin +filters +vulcanized +sponge +sponges +soaps +dyeing +swab +absorbent +spraying +misshapen +restorative +freezer +pared +bulbs +sparse +Filling +spandex +tinting +premetalized +backings +ethers +evaporative +resins +rabbit +unbuffered +pharmacist +taxidermist +Taxidermist +Borax +arsenical +crystallizes +moistened +flask +chlorydric +vacuumed +twill +flatten +Tyvek +Straps +wrinkled +mannequins +Acrylic +Padding +Exhibitors +footcandles +exhibitor +Silverback +banging +zookeeper +MPM +mannequin +gorilla +implanting +LaMalfa +Yutaka +Breconshire +Ashburnham +shamanism +tenets +hylomorphism +Heaven +Augustine +Hippo +gnosticism +limbo +unbaptized +purgatory +neoplatonist +Varieties +perceives +connectedness +broadest +harmoniously +Sheldrake +maizes +COCOBOD +Kpedze +Afadzato +Adaklu +Tongu +Andromedae +magnitudes +dimmer +arcseconds +blackbody +circumstellar +periodicity +starspots +Tratado +Estanislao +caudillo +Sarratea +Preamble +navigability +Gervasio +centralist +Artigas +expansionist +Quadrilateral +Corrientes +psychobilly +Matchbox +satirises +preseidency +lusting +Yaya +Darlaine +Coulibaly +ASFA +Yennenga +Brama +Monofatsi +Heraklion +Asterousia +Agia +Arkalochori +judgements +justifiable +lawfulness +compensated +stateless +Batasuna +Herri +Chechnya +Zura +Shakhid +Nura +Luluyeva +indiscriminate +Novye +Aldi +Kononov +Vassili +Duma +balancing +upholding +diverge +Dahlab +headscarf +Dogru +Kevanci +Sikh +Lautsi +crucifixes +positives +exaggerates +click +Remover +conveniently +Adware +Spy +Loews +Merigot +Promenade +screenings +Milkha +Satwender +Swaran +unlucky +Reims +Barbadian +Wetherall +SinoSat +geosynchronous +Participating +Nocera +Catalysis +chiral +nitriles +chromium +isomerization +Hospitallers +Survivors +offences +unreserved +Darklands +psychologist +molested +extradite +Garchow +Americus +Lumpkin +Chattahoochee +riverboats +Ocmulgee +Altamaha +Brenson +Portraits +brooding +turbulence +Dona +Ostendarp +Sigmar +Oliveros +decompositional +Harithas +eighties +Waltemath +delineating +fluctuation +dualism +Insel +Neuss +sureness +Auf +Yaddo +Albee +Heiner +Pietzsch +Weatherspoon +Uytendale +Bryn +Mawr +Neuberger +Formative +Formlessness +CORA +COHEN +militaire +Champagne +Crunk +warlike +vines +pinot +gris +Riesling +Kientzheim +pseudokhoii +mite +loam +Lagerstroemia +notogastral +areae +rostral +interlamellar +dorsosejugal +sensilli +eyalets +geostrategic +precipitating +overlords +Fearing +unilateral +philhellenic +Maj +stall +derisively +sympathiser +Philhellenic +Exact +Leteiller +Compiled +interdict +Zante +outraged +Abney +Itea +directives +Patras +shadowed +picket +unabated +futile +impracticable +deploying +unarmoured +smoothbore +ironclad +rifled +admirals +pounders +carronades +Frigates +warhorses +cannonades +raking +gallant +impressment +brig +Fireships +wreak +graphically +Scipion +impeded +extricate +signalled +abreast +replying +allotted +trumpets +fireships +musket +summarised +horrendous +midshipman +Gangut +Iezekiil +rejoicing +bonfires +mountaintops +Parnassos +Celebrations +Rumelia +echelons +grossly +provoking +gravely +untoward +deepened +restrained +biding +Hellenophile +centrepiece +Helonaki +Pylos +philhellene +Navarin +Samassa +Trieste +Sveti +Helade +Eh +Janam +Tumhare +Lekhe +Puran +defatted +Culinary +thickener +enhancer +pastries +lightest +roasts +Powdered +Defatted +bidirectional +splitter +NVIDIA +HDCP +decode +tuners +Nvidia +WDM +XP +Scalable +bios +initialize +SLI +KCMG +multilateral +Fuels +FCO +Lemps +Voiron +Industrielle +Berthon +Ader +Philotechnique +philotechnique +Electricians +parachuted +Vercors +Palmes +Polonais +avec +Glaives +Iwatsuki +Musashi +viscount +kazoku +Usuki +Bungo +Goggle +Miglia +goggle +DHB +casuals +Aya +unsuspectingly +Famer +alienates +Dubois +commiserates +Hiroko +swallows +homophones +sportsmanship +contagious +complicates +outs +NPB +Merrick +Complicating +Matsushita +braving +Meitetsu +shuttling +Kanayama +Makoto +Kannon +clanging +Sengen +ballplayers +Yakult +Okazaki +Taiyo +Whales +BayStars +insignias +donned +Senichi +taunts +tauntingly +Leron +expat +shave +moustache +quips +racks +Diehl +Siskel +formulaic +lackluster +Mohicans +marketability +recouping +Sneakers +Screenings +Gymnoscelis +idiograpta +Prout +Baronetage +Calthorpes +Barony +pommel +Yun +ploy +mouthed +shouted +Wenchang +librettist +Dougal +Pelham +Hun +Allyson +Thin +Celecrations +Arlen +Lerner +Producers +Facundo +Sisto +Deportes +Urretavizcaya +Montevideo +Tanque +Mainly +UCAM +Murcia +Jalkapallokerho +Independiente +Kine +Trotskyist +Kandy +renounced +Ludowyk +deport +whisked +Lavinia +Debating +Esmond +Wickremasinghe +Ranil +Saboteur +Constructive +Abhayawardhana +Samajists +BLPI +Mankind +bourgeois +SLFP +Goonewardena +Soysa +Speier +ou +trompeur +Troubadour +Deceiver +Afanasieff +Fitocracy +geek +Greatist +Lifehacker +Mashable +Edris +Yassir +Kilani +Criminology +lamb +Garry +Roundy +Litoral +Mbini +Kittens +Turlay +kittens +Caldecott +demonstrators +Paseo +Castellana +Moroccans +Kangoo +congregating +bashed +dustbin +lids +Rubalcaba +sue +Demonstrations +Pasqual +Ibarretxe +Acebes +unambiguous +echoing +Letizia +Antena +TVE +Telemadrid +Riay +Tatary +Sympathy +tightened +toughened +condolences +outrage +Visible +Intermodal +ferocious +jeopardized +condemns +hasty +Inocencio +unapologetic +Kofi +complied +explosions +responsibly +Stocks +Airline +Emotional +Luz +Casal +Oreja +cinco +Canto +homenaje +Quinteto +Hemingways +diaries +Poetries +Loir +conventual +Geraldo +Espinho +Padroense +Bracalli +Varzim +Trofense +freshly +Bellbrook +Centerville +Magnussen +Tuesdays +Rooted +Polyethoxylated +emulsifiers +wetting +agrochemical +hydrolysed +oleic +palmitic +stearic +myristic +linoleic +amines +nitrile +exothylation +impure +MON +Inert +Ingredients +Surfactants +uptake +Washout +EPA +Glyphosate +midge +dilutions +genotoxicity +teratogenic +developmentally +subchronic +potentiating +Mycetobia +gnats +Tertius +Duddeston +endeavour +armaments +Anglicanism +polyurethane +elastomers +Quite +Erzbergbau +Sauckel +Brandt +functionaries +Seiling +Jadid +Kuhin +Pretzel +AEG +Intermission +Pollstar +Antes +Skinnet +glebe +skyscraper +courtrooms +Dearborn +Brownson +eternal +Farhad +Declared +Mikoszki +Hanging +WMC +Transformer +Dewsbury +Hunslet +Featherstone +Odsal +Batley +Landlord +Thornes +Castleford +Cretu +Cherednyakova +liturgies +Regards +Soloists +stifling +premieres +concertized +Maariv +Badische +Benedetti +Concerts +monographic +Quatuor +Cherednikova +Tupikin +Preludes +Exotic +conservatory +Baranov +Shnittke +Gilels +Kirchzartener +ballades +Aula +Schulhaus +Marissa +Poesia +FIRST +SECOND +Brahms +Ballades +Sonetto +produsser +Squires +Savva +Gennady +Vladmir +Philharmony +Witt +Lew +Burdette +Roller +Espondeilhan +subdermal +traumas +Repeated +excision +Clew +Rawle +Yoko +Ono +Aravind +Radha +banners +Harsha +Vardhan +Vinod +Nooli +Vaikunthapuramulo +accidently +Manohar +inexpressive +Appal +Paidithalli +Vaikuntapuram +speculations +Rayalaseema +jayanth +Tabu +Sushanth +Lido +Samuthirakani +roped +Raghava +lyricist +Sirivennela +Seetharama +Sriram +Kasarla +Shyam +Anurag +Mangli +Diwali +Arha +Kash +Chaitanya +Ramajogayya +Sastry +Armaan +Yesudas +Shruthi +Harinarayanan +Angu +classy +Dusshera +Hemanth +wordsmith +latch +Koimoi +bgm +Neeshita +Nyayapati +Keramalu +Quint +Vennela +Pethuraj +wallow +Bhavana +Sangeetha +Dundoo +feasts +Bretteville +ETH +Swissair +FLORAKO +calibration +OPA +testpilot +Dassault +Rafale +Eurofighter +aviatics +Directions +ADDC +Payerne +Schenkeliobunum +tuberculatus +harvestmen +Komi +Poggi +Neoclassic +Monarchical +Titles +flurry +iVillage +Poparic +rammed +Albrecht +Screamers +Cabo +Arable +Irrigated +sirocco +harmattan +haze +moderated +summertime +wintertime +Aousserd +Sparse +Absa +Bonus +Malpais +Oscura +cosmogenic +dune +pahoehoe +dissected +zonohedra +triangles +polygons +equidissection +Proprietary +sitework +Billiton +Trepell +Paleoproterozoic +Mesoproterozoic +metamorphosed +overlain +extrapolations +pinpointed +McArthur +Mt +freibergite +Endotricha +chionocosma +Amselina +stagonophora +Airto +Moreira +Lillehammer +Kvitfjell +Girardelli +Wasmeier +Olympiabakken +Tapa +Edelaraudtee +Elron +Vilciens +Hallsberg +Tornio +Korvpalli +Linnapark +Vabaduse +Sepa +checkpoint +bestowing +mountaineering +Dass +Supplies +Mewa +RKSD +Shaheed +Baba +Mamata +Phawararang +Kanth +Morni +Inderhara +Idland +outlay +PWD +Randeep +Surjewala +Jindal +Amneet +Bachendri +Santosh +Apa +Khumbu +sportsperson +Augrabies +schotiaphaga +Simochromis +diagramma +cichlid +Tanganyika +Tubby +Bakersfield +Sportino +Nikkie +naturalized +QE +Accident +Clinics +Tei +Sin +Youde +Successfully +Healthwork +Tsim +Mong +Thoracic +Radiotherapy +Radiology +Redevelopment +Ambulatory +Coleophora +bactrianae +disclosures +clamps +lenders +Implemented +CFR +Greenspan +subprime +superfiring +Akigumo +Cdr +Savo +Krishnarajpet +pincode +Yellerker +Microphthalmini +Virna +Lisi +Donatello +Bongusto +Nastro +Clio +Enter +Dimitrios +Andromedas +Championnat +Expatriate +Calliostoma +hirtum +exclave +Kalinin +Sambian +Tuwangste +Sambians +crusade +secularization +fiefdom +Latinised +Brandenberg +Koenigsberg +Attlee +repopulated +delimited +Ivanov +Medvedev +Wityaz +Pregel +Neues +colonnade +Bolshoi +Puppet +Friedland +Friedrichsburg +remoulded +Cosmonaut +cosmonauts +Alexei +Romanenko +remembering +Leningradsky +promenade +Bridges +Oktyabsrky +Staraya +Pillau +isotherm +showers +pleasantly +foggy +Abraomas +Stanislovas +Lik +Garmonika +Stari +Ostmark +specialities +salad +bovine +tripe +Borshch +okroshka +Pizza +sushi +Shawarma +Khrabrovo +Devau +Passazhirsky +Ostbahn +Elbing +dayly +Pobedy +VMZ +trolley +FEZ +Ericsson +Telebalt +Cadillac +outstripping +militarized +Chernyakhovsk +Donskoye +imperative +hegemon +overflights +counterparty +Podbereski +repopulation +Baltika +Putin +Biedronka +spinner +Birkenhead +Appointment +Brentham +Willesden +Overground +Selhurst +Beulah +Hutchins +troopers +skirmishing +Fredericksburg +Rappahannock +Brandy +Loudoun +Pennock +Bristoe +Ulysses +Overland +Trevillian +Appomattox +Armies +Taneytown +cavalryman +Conus +gigasulcatus +cone +Fiji +instructional +Leggott +Span +pipa +Motherless +Kambara +Bootleg +Briar +Righteousness +fRoots +Rusby +Kellie +Acoustic +Proms +Murmurs +Topic +Grinning +Aqaba +Ervin +Ekstrom +Cottesloe +Sabse +Dhiraj +Thakur +Kabra +Renuka +Pandey +Ayushi +Brijesh +Amit +Heera +Yadav +Rakhi +Sawant +Madhukar +Anand +Rodez +AF +Yzeure +Diplome +Doctorat +coherence +Gayby +Lisecki +awkwardly +stunted +underling +reinvigorate +longterm +condoms +devastates +exclaims +Satisfied +ruse +Enticed +relents +enthusiastically +cuddles +Merlinka +VC +despatches +Instructional +Hobart +HMAT +troopship +ANZAC +hospitalised +Fleurbaix +Sausage +trenches +Praised +downgraded +Bernafay +Casualty +pyrexia +Barque +strongpoint +hospitalisation +Stationary +Demicourt +feint +mislead +Hermies +Rallying +shellfire +Lagnicourt +Whittle +KING +undermentioned +utter +dispersing +Broughton +Meritorious +Maintain +bunting +GAR +Kennesaw +Bentonville +Holder +Dies +appendicitis +Entered +Date +Capture +Tortosa +Fajardo +Requesens +trialed +ISBA +Duilio +Loi +freemason +HB +Berglunds +Thulin +Zagorice +Lykoshino +Jodice +PRCPE +Crum +Balfour +Microscopical +Bleaton +obelisk +Bilecik +Onfray +Eugenia +XII +Blindado +tanquetazo +uncoordinated +sedition +declassified +Jara +aggravated +warn +skywave +omnidirecional +dipoles +coordinative +Deutschlandfunk +Heusweiler +Mainhausen +transistorized +DRM +polarisation +inductances +radiate +radiated +ionosphere +dB +ionospheric +Fujita +schoolhouse +Sherburn +Faribault +farmsteads +timbers +spearing +Freeborn +unpaved +unimproved +passable +Scenic +collaboratively +BLM +brochures +Townsite +shingle +sheathing +Mastedon +Tents +Kimbrell +Trends +Utne +SLR +autofocus +damped +Petitjean +Artistes +decorate +Beurre +standardizing +tvtime +Tvheadend +bugs +LinuxTV +locator +cantilevers +cantilevered +monotonous +hallway +bisected +eventuates +overhang +Petros +Katholos +Marconi +Stallions +Monocerotis +Luminous +Novae +outbursts +Sogn +Fjordane +Sunnfjord +Vestland +narrowest +Steindalen +vafra +formannskapsdistrikt +Schei +Riksdag +beydd +tesserae +ropewalks +Jarrit +Meads +Packet +Vauxhall +Spraying +overflowed +Chessels +Hartcliffe +Racecourse +flapping +NGRC +Eastville +Bishopsworth +Portishead +Phipps +Meetinghouse +Quaker +Ripley +octagonal +Sammar +Umm +Salal +Rayyan +QR +Dunkirk +Evacuation +Chelmsford +toolmaker +stormed +Fuld +Hiscock +Almet +Jenks +Theseis +Syriza +Tsipras +Lankesa +Yousuf +Synack +linchpins +Jobert +Michiel +COO +Merijn +Marten +Bug +Maturity +Moussouris +codify +unsolicited +Salesforce +Dropbox +Yelp +Dragoneer +Ventures +Disclosure +VDP +Hack +Airbnb +Shopify +Hacking +Oath +Uber +Parkland +batters +Pos +Nese +Alzano +Azhdahatu +Mojezat +Vasek +Pospisil +Berankis +Agonopterix +sabulella +Walsingham +lanatum +Anrig +Maldon +serjeant +Winefred +Harrow +Seungri +Avalanche +Avs +Meelick +Restored +Subsystem +waveform +heliosphere +gravitationally +magnetospheres +Adriano +Egidio +nunciatures +nunciature +Titular +Falerii +Bergoglio +scandalous +Evangelization +Espenes +Grimstad +Skaggerak +Fevik +Televizija +Podgorica +Eutelsat +RTV +Moto +blockbusters +Comedies +afternoons +Becker +Heuters +Nikolaus +transcends +Sirius +FHFF +Compilations +crudely +gag +Granddad +Strips +pets +Barfy +mutt +stray +tabby +racket +Racket +daydreaming +parodied +gremlin +rummaging +scratched +smugly +picturing +absurdity +Mom +Iacocca +Iaccoca +pentastar +Sunbeam +captions +Dizzy +Gillespie +Nichole +Kathryn +satirized +compliment +surreal +Pinhead +Dilbert +Dysfunctional +entendre +webcomic +juxtaposes +dysfunctional +patriarch +Pinky +Drawn +Diary +Wimpy +Cutie +parodic +Rat +poking +harboring +Fun +Frisch +handbell +carol +Wish +redone +Ogilvy +Mather +Apicella +Willat +Exit +Ingleborough +voie +choked +Winch +Pitch +bottomed +Shaft +PBXs +Alamos +RDX +polystyrene +dioctyl +phthalate +Surface +Package +hexanitrostilbene +Fluoropolymers +brittleness +Elastomers +elasticity +Crosslinked +radicals +Rubbers +Estane +polybutadiene +Silicone +rubbers +thermoplastic +polyurethanes +nitro +azido +derivates +plasticizers +plasticizer +Thermomechanical +delta +softens +stiffening +crosslinking +porosity +vaporization +Ostwald +intragranular +Presence +Libertyville +Patineurs +MacMillan +Pas +Deux +Graziano +Balanchines +Valses +Nobles +Sentimentales +Offering +Celcom +Kuala +HighNote +Prestige +Savant +Hagarite +Ishmaelites +Naphish +Nodab +Manasseh +Gilead +Theodor +prophetic +Magret +Garros +Roddick +Lleyton +Hewitt +chickenpox +Bharatiya +Patharkandi +Helms +Cuppy +Qualifiers +ceding +aspen +characterizes +WMA +perimeters +Warroad +Laurentian +biking +backcountry +Trails +Zippel +Watchmen +Insidious +demonologist +Annabelle +Solverson +WTVT +Shorecrest +understudy +Curly +McLain +Raoul +pedophile +Adamson +Scissors +Lakeview +Dreiberg +slimmer +Earle +Farmiga +Cinematic +Bone +biopic +Rollie +Barbra +Sing +Sondheim +Mandy +Dagmara +puppies +Niven +marooned +inhospitable +refuel +sleeper +Harlequin +moons +hibernation +terraforming +arouses +unfrozen +realise +refining +refinement +travellers +sums +Biafra +Eagleburger +Hunger +energizing +Hamid +Karzai +Orderly +Departure +Kenshi +Geminar +humanoid +mecha +AIC +BeSTACK +Koji +Hideki +Hajime +Akifumi +Shoji +Yasuo +Yoshiyuki +Seira +Kagami +maxi +Energie +Karlovy +Vary +lockout +Seating +nr +DB +EXP +Wurzburg +Mannheim +termini +Rudrangshu +erudite +Yashpal +yashpal +Bookshelves +apocryphal +Manmohan +Deve +Baldassarre +archivis +liber +singularis +Girolamo +Adria +archpriest +archdeaconry +Trevigiena +Ludicra +Scriptoribus +excerpta +Bodino +Vossio +MotoGP +Blata +Superbike +Padmos +Hidayat +Padmo +Retno +Heru +Ipah +Martubi +Juminten +Riri +Arsal +Ibunda +Ayu +Usmar +Tiga +Dara +Karya +invokes +Kerta +Budiono +Darsono +Herreschoff +Inshore +Nocturne +Prettier +JYP +Matsue +disassembled +switchback +Centralized +Kawato +Taisha +Izumotaisha +Superliner +Dichomeris +obsepta +Ishtikhon +Samarqand +Wymore +Vinland +Xishan +Pacification +Theoretically +contingents +Huailai +Datong +Yanmen +Itagaki +Huaili +annihilate +Lingqiu +urgently +Caijiayu +Guangou +carts +Kuomintang +conceptualization +succumbing +blunders +Biao +Branchinella +denticulata +Cooperating +Petabytes +GPFS +Grid +OPTICON +compute +retrieval +genomics +stringent +eScience +Schomaker +MONK +Popovic +paleography +genotype +phenotype +SURFsara +Genome +MICADO +datacentric +Infoversum +Kiptenden +Soar +orphanage +PINAC +restraint +busking +gigging +Bontemps +Roulez +bassline +Angie +Apatow +Drake +Miyake +Warmed +Kuroshio +Tourists +Kisen +Miyakejima +amentifera +branchlets +obliquely +Coleochaetales +Phaeophyceae +Neighbouring +permeable +stromules +plastids +lamella +luminal +plasmodesmal +phospholipid +bilayer +Plasmodesmatal +gating +plasmodesmatal +interfering +meristem +chaperones +cytoskeletal +Actin +microfilaments +Fluorescent +mesophyll +stabilizes +Microtubules +associating +localize +GFP +hormone +Enzyme +hydrolases +Arabidopsis +thailana +Gain +mutants +Somali +Parliamentarians +parliamentarians +Somalia +embattled +Qanyare +Federalism +Abdullahi +Abdi +Mareye +Sudi +trios +Palembang +Jakabaring +Bowling +neonatologist +Mandela +nephrology +Zanobi +Magi +Nativity +tempera +polytych +predella +Angelico +iconographical +kneels +Biagio +Sanguigni +imitates +cruder +eliminations +Celis +Nemie +Villegas +Ginebra +Suprabhatham +Adoor +Thikkurissi +Sukumaran +Muthaiah +Devarajan +Vayalar +Bruna +Screenplay +Breakthrough +Moniuszko +homophonic +Mendelssohn +Amra +Ostapenko +Celik +CM +switchboard +installer +Ivey +rejoining +Citytv +forays +Glassbox +Broadcasters +Chakori +Nigar +symposia +inquisitive +combing +Lucile +Danson +Oceana +Gersemia +stewardship +kelp +underrepresented +Sloboda +Svoboda +Internationals +communicating +Shikhar +Sanju +Mayank +Bhuvneshwar +Shardul +Navdeep +Pooran +Touchdown +Oakie +Thorpe +unbilled +Ape +Weissmuller +hurdlers +diligent +brains +dodged +sprinted +drubbing +Aggies +savage +prized +surest +spice +circling +dwelt +Mercersburg +McConnell +Benbrook +Stagg +Scio +Mullagh +Ballinasloe +fostered +tasted +Cortoon +contemplated +Tipp +favourites +tribesmen +clawed +scoreline +hurlers +Birchwood +Mobi +Distruction +Boyz +Riky +SABC +Somizi +Dineo +Ranaka +Mpho +Mafikizolo +Mbongeni +Ngema +Highest +Download +Kwaito +skyline +wellbeing +cruciform +buttresses +stencilled +Repo +Searchers +Pushing +volley +leveler +storming +Keates +Rougier +Lebia +scapula +Anqi +cafe +Wanda +Baoshan +perpetrator +Lojze +Goethe +Cvet +pelina +Nekje +tam +robu +Levstik +Sonce +knjigi +je +lev +Cocteau +Paulhan +blending +seamlessly +Sollers +Femina +caucus +Hanford +Explosion +postmaster +Angeline +postmastership +Anaptilora +Autostichinae +Shahid +Babar +Saima +Ilyas +Khawaja +Waris +Yasin +Hazin +Noor +Guldsolen +Apotheosis +Tenzo +Ritsumeikan +driveline +lockable +Braking +driveshaft +FWDs +camber +spoked +haulage +pontoon +Demand +Peerless +Kissel +Surplus +Luella +Clintonville +barnstorming +plows +IWM +Duxford +Islas +Utila +Camogie +camogie +comebacks +Dunloy +Pearses +Majella +reflex +Principle +racked +Wanzer +Richie +Regan +shaving +Meadowlands +Concerns +Gonzaga +Wolfpack +Jayhawks +Khadeen +Desi +Ismael +Myles +Wofford +Terriers +Magee +Pickett +Villain +Digest +Germaine +Lansing +usurp +Corinthos +McCall +Damian +Spinelli +Jacks +exonerate +fool +doctored +terrorizing +avenging +apologetic +Lennox +Apsley +Pooh +ALARM +rhymes +Mayhem +obstructing +Culmhead +unserviceable +baling +Runnymede +Biggin +Arenac +Melita +Nester +Selkirk +Whittemore +Sisemore +Hells +BBB +supremacist +Collier +paraphernalia +Paradoxically +taxed +Bling +Traveling +realigns +Bucoda +Binghampton +Nisqually +Spanaway +Chehalis +renumbering +Yew +congestion +Supervising +Assisted +Amadee +Wohlschlaeger +Shackleford +spared +prosthesis +Reiner +Westmed +Lukas +GmbH +EHS +Sana +Kliniken +Bochum +Becton +Odeneho +Afrakoma +Petes +Stingers +reimburse +Tuitioning +indifference +prompt +ineffectiveness +outperform +subtracted +cohabit +outweighed +enacting +Preferential +Subsidy +Chileans +subsidized +friskolor +Myth +PISA +Bakhsh +bureaucrats +FAS +benefiting +Parental +Effects +skimming +FedEx +satisfies +CATO +Hayek +privatizing +Cory +billionaire +revisionist +EdChoice +reallocate +disallowed +aptitude +uplifted +segregate +Welner +donates +overcharged +disbursed +foregone +Dissenting +indoctrination +OSP +Appropriations +Majorities +Draper +refocusing +Danner +cavemen +seeps +Firefighters +hazmat +Charge +Michaud +waits +cornfield +grates +cocooned +regains +disregarded +Strughold +disinformation +codename +cracked +Spielberg +Liams +Pileggi +Landau +purposely +Glenne +Headly +revolve +MIDI +copyist +novelization +Ebert +enormously +sardonically +Alspector +pic +underwhelming +uneventful +scorning +charms +tritoleuca +Patience +Nigerians +protectorates +Abimbola +remarrying +Kawng +Kiao +Intaleng +Tip +Durbar +Deruda +Olympique +Amiens +Ajaccio +animatronic +adorns +rabid +Porte +Hyppolyte +historique +rehearse +instigator +Houti +Apocalypse +Catacombs +crocodile +gargoyle +desecrated +Gastromyzon +venustus +Vyacheslav +Plaksunov +Spur +Bluebonnet +palsy +Maclean +chestnut +informational +Ostalbkreis +Asperg +Swabians +Welzheim +Crailsheim +Redfearn +Warriner +Breidnes +Vestfold +cartographers +Replotted +ANARE +Sadeh +Boks +clinician +Musica +Mundi +KI +HS +ARCI +Choirs +Voce +Burdett +Techniques +Dalcroze +Eurhythmics +Maturing +Conducting +Saneakira +Heian +waka +Kintada +Diffutidin +Canscora +Diffutin +glucoside +Sofya +Kondakova +Allround +Breath +homepage +Zeche +SYMS +Merinsky +sportscaster +Cortlandt +Glickman +Tamarkin +Funeral +Yeshiva +underwriting +Iittala +Recipients +Oiva +Toikka +Kukkapuro +Orvola +Eero +Aarnio +Simo +Harri +Koskinen +hundredth +Hietaniemi +Jubilees +Theophilles +Hatchett +McDougal +operable +barbershops +Rufus +Basics +gyobok +blazer +Joseon +jeogori +Clothing +veil +Paichai +Hakdang +Hansung +durumagi +Sookmyung +serge +bonnet +Jeogori +hanbok +ChungShim +workwear +Equalization +pleated +Datta +Meghe +Yuvak +Shikshan +Dattaji +Atre +Layout +YCCE +Wanadongri +hiked +Elkins +Aprovecho +photojournalist +sectarian +Shon +Miwan +Sulaymaniyah +consular +Mothers +precancerous +Masoud +Illegal +innocence +lenience +Javad +Noam +Philo +Shafiei +Haaretz +Qaboos +Rouhani +geometrical +bisecting +Semicircles +Lemmas +circumferences +OE +Should +Yishuv +Safed +Beit +Ramthaniye +kushan +Hovevei +Hashomer +Nebi +Bernstein +Arabs +Negev +povstania +cuisine +Panoramic +diavolessa +Baldassare +evoke +rhythmically +voglio +andar +credulous +fossi +maritato +Nastri +chi +crede +annoyed +allured +undying +donna +apprezza +decoro +distingue +dal +nobil +vile +detto +tesoro +Colle +colle +dame +cielo +precipiti +Tenta +invan +suoi +vapori +misunderstandings +Conte +tutti +titoli +overcomes +pensate +prima +panders +tenero +affetto +wangle +glance +moro +lasciato +testamento +Sior +omo +generoso +Donne +bramate +terrify +whishes +devils +stato +dir +bel +diletto +deception +Veleggiar +secondo +vento +povero +mio +padre +qua +comparite +Bettina +Kremena +Vieweg +Doerthe +Wesford +Lienard +ICPC +Guerra +Asakusa +Servant +Ryoo +folktale +Chunhyangjeon +escorts +seducing +outing +astounds +consternation +assuaging +seduction +dismisses +errands +eunuchs +lowly +subservient +Hyangdan +madam +sleeps +remorse +uncooperative +implores +commotion +insolence +beg +confesses +Sarangga +pansori +Chunhyangga +lovingly +snowflakes +Buil +Busan +Baeksang +Thuja +cypress +Chengkou +Daba +foliage +stomatal +ripening +scented +nagar +Navami +Qalatak +Mashayekh +Doshman +Ziari +Mamasani +Maloti +Senqu +Matsoku +Semenanyane +indicator +assessing +TIROS +Radiometer +MultiSpectral +Scanner +Deering +biophysical +rangeland +confounded +radiances +Remote +Sensing +Kriegler +Goddard +canopies +photosynthetically +photon +nanometers +overheating +PAR +reflectances +VIS +numerator +limitless +compressing +qualitative +Atmospherically +Resistant +biogeophysical +Fraction +Absorbed +Photosynthetically +multiangular +PrecisionHawk +agriculturalists +Seyd +Whiteley +Borisoff +Gully +Knock +Decca +Struck +Huff +Regal +Revue +WMOT +Newberry +reinvented +Doo +duplicated +POTS +simplifies +Disadvantages +overheads +LAN +RTP +WAV +softphone +Streaming +Wartburgkreis +Marksuhl +valuing +respecting +unto +legislating +legislate +Universalists +Thornburgh +exempting +governs +authorizing +smudging +Peralta +Wiccan +Cutter +Inmates +Hildale +Jinjer +Fundamentalist +eradicate +polygamous +Filatima +kerzhneri +Wit +Bulle +orators +Klerk +Springbok +Roos +Afrikaanse +Getter +Beatport +sandbox +Swire +Pendulum +Knife +circumvent +treaties +Kampfgruppe +Milice +quelled +quilted +Papp +Gor +bodyweight +echocardiogram +Intersocietal +Rosalind +LeMoyne +Parkside +HealthSystem +MRI +Augustana +Credentialing +LEED +cyberknife +Spine +Minimally +Laparoscopist +fibrosis +downs +Downers +obstetrics +Ahmedabad +Nucleic +Transmitted +Eradication +Tavush +Sarkissian +Dilijan +Hakob +Kojoyan +evergreens +Wildwood +Wasatch +Garard +voided +Monona +Quann +bikes +gymnastic +Rager +Vellner +Tennil +Garret +workout +sequentially +dumbbells +sledgehammer +strongman +yoke +sled +Katrin +Davidsdottir +squat +lunge +overtake +Platytetracampe +Fyodorovich +Tsardom +statesmen +Lukich +childless +Iosif +Semyon +salterns +Kolskaya +Guba +Arkhangelsk +obrok +Kama +Chusovaya +Seizing +colonizing +druzhinas +Joasaphus +Rectiplanes +Antiplanes +Mpiskos +Diatsigkos +ESKO +Ethniki +artistes +Patoranking +Tuface +Osakioduwa +Crowborough +Hyson +Dragonheads +Spinefarm +Ensiferum +Hinkka +Janne +Mastered +Finnvox +Medley +Karjalan +Myrskyluodon +Pakisutan +Taishikan +fuzoku +Nihonjin +Diplomatic +Enclave +ltd +Tobishima +severing +Depeche +splashing +airtight +ventilated +falsetto +NIN +reflexively +Vimeo +iLike +constraint +kinky +odes +Larut +Selatan +Horry +Harbort +Nar +Sahrawi +Nayim +IANA +identifier +Ethiopians +Scheppersinstituut +brewer +Montaigu +vocation +postulants +Sterckx +apostolate +Pecci +Merode +venerable +sulphur +Sulphur +sanitarium +Sanitarium +Farr +Farmstead +Thruway +ginger +garlic +Barenaked +leveraged +Colfax +Gorski +Poloncarz +Lean +Brighter +Hepatitis +vaccinated +Assemblyman +inappropriately +smear +disclaimed +Lewinsky +Bellavia +Kastenbaum +Capabilities +roundtable +Conferenced +GED +pasts +Essential +waiver +Batavian +stimulators +Artvoice +defibrillator +TENS +unmoored +inflaming +Arnolds +deduction +egomaniac +probing +OCE +phoned +Pelosi +SEC +Broderick +Icons +Grassi +Formeaso +Friuli +Carneo +Genovese +Nicolo +Cassana +Dannet +Twopothouse +Doneraile +Schutz +Conditions +crystallography +Cheetham +Verwey +cation +valence +characterising +rationalise +synthesised +colossal +Meldola +Wayback +Perfecta +photoplay +MacNab +reciprocity +Rankin +Postmaster +Lambton +Printer +Baan +Thitiya +habitable +Brewhouse +Arbon +Oaken +parlous +Cowper +Moorgate +Havana +retraced +Aden +Departing +Gibraltar +Culebra +Honolulu +Magdalena +reentering +Conestoga +refilled +chasers +oiler +ARO +phoneme +inventories +articulatory +perceptual +Values +conversely +perturbed +Quantal +redundant +alveolar +puff +plosive +loudness +robustness +Curling +Universiade +Pinerolo +Palaghiaccio +Heidt +Zou +Milos +Krystof +Radek +Tomas +Novak +Margheritis +Yusuke +Tsuyoshi +Masanori +Tetsuro +Ogihara +Peju +Hayley +Katrine +Lu +Chunyu +Elettra +Sakurako +Asuka +Yogo +Liudmila +Nkeiruka +Ekaterina +Svetova +Jeung +Stina +Sigrid +Kamp +Niki +Chantal +Lysa +Hambley +Playbills +Freed +Classroom +balconies +atrium +soundboards +adorn +Baur +directresses +gargoyles +Schmidlapp +DVAC +mainstage +carpeting +TAIT +exposable +Balcolm +removable +Ticket +Pricing +Diplomas +Lyric +Makeup +Wig +prosthetics +Cirque +specializations +Abbeystead +Dunkenshaw +Tarnbrook +Wyre +Pickford +Milchan +Nikola +Grams +Charlize +Theron +Gotta +triplettes +Ngila +Rosenblum +Helgeland +Coppola +Djimon +Hounsou +Bello +Cooler +Pieces +Chiklis +CCH +Pounder +Kaczmarek +Meryl +Streep +McDonough +Boomtown +Steenburgen +Bateman +Looney +Mancha +Theft +Variimorda +villosa +Mordellinae +Apiaceae +Garnett +Yanow +sextet +Mabank +Malakoff +Neckline +waistline +Necklines +halter +Dress +necklines +gauzy +picots +accentuated +Ruffs +haute +bodices +Bardot +midi +trendy +resurfacing +FIPS +Estimated +Bosanska +Posavina +Moja +oomycete +yellowing +oomycetes +genitive +defoliation +Infection +amenity +entomologist +Maddison +Beever +Waipara +Clade +oospores +bumpy +optimum +microorganism +epidemiology +germinate +waterlogged +gnaw +snouts +tanekaha +mingimingi +rewarewa +Astelia +mycorrhizal +Northland +Biosecurity +Dieback +Plenty +tangata +Trigene +detergent +scrubbing +trialled +Waitakeres +iwi +Te +Kawerau +Maki +breached +Hunua +antifungal +injections +Horner +dosages +Ramon +Habana +Mostra +Grafica +Latinoamericana +Istituto +Italo +Foire +Contemporain +Cintas +Monsignor +Linder +draftsmanship +Connoisseur +Diez +Hasbrouck +Outwin +Boochever +Wounds +Portraiture +Hoodoo +Hamlets +Eider +rapporteur +Integrity +concurrencies +Azua +Domingo +yellowtops +sunflower +Tamaulipas +Kazue +Matsuyama +Iyo +Masaoka +Shiki +Akiyama +Saneyuki +Kiyoura +Konoe +Machado +Disagreements +figurehead +discredited +balloting +Avance +outcry +heeded +Conrado +denouncement +Osvaldo +Iturraspe +Majid +Futtaim +leasable +Emax +Hamleys +Clas +Ohlson +Areej +VaVaVoom +Nayomi +Armani +Hackett +Destination +Mamas +Bhs +Lakeland +Jumbo +fragrances +Dolce +Paloma +Abdel +Uno +Laiuse +Sootaga +Kaine +Billed +immersive +Commonly +Chrysididae +parasitoid +kleptoparasitic +mimicry +odours +ruby +Chrysidines +abdomens +pill +sawflies +Chrysidids +microhabitat +Compositae +Cliston +Beau +Geste +Fluxus +Amabel +Scharff +Vassar +VFW +Dobro +sam +ja +Nenad +Predrag +snowfield +Fryingpan +flatter +Ohanapecosh +Meltwater +Cowlitz +Ervauville +Loiret +Catawba +Belton +Arie +Hickory +Paintings +freelancing +Newsdesk +presenters +Secrets +Helicops +Nightwatch +Shillibier +Bullseye +DCI +Zhijin +Qiannan +Buyei +Miao +zhuanzhi +Zhengfawei +Habitats +insistent +GATT +IFAW +Fink +Exemptions +inuit +Liyanage +Piyasili +piilars +Trilicia +Kusuma +Merlyn +Thilaka +Jayampath +Balika +Suneetha +Bimal +Yoga +Poorna +Priyadarshani +Shesha +Matha +Surathali +Kalyani +Peries +Palihakkara +Ganga +Awaragira +Yahalu +Thattu +Dolosweni +Raathriya +Selalihini +Sudu +Himi +Ahimi +Uda +tele +Ratakin +Dag +Thomassen +Pors +Almenning +Neoterebra +auger +narrowness +eventuate +packager +koala +Agata +sopra +Ridolfi +Preachers +Tommaso +Turco +Disquisitionibus +moralibus +Creators +Quesada +manhood +Cort +Furth +Hambry +Benton +Shellsburg +Parkers +Jardin +actuality +enclosure +Zbrun +Margitson +Cloak +Hogle +stranding +speeders +centercab +FLIRT +Commodities +Promontory +rehab +Numbered +EMD +Facilitation +VN +Hrvata +normalise +TR +Moya +Talca +electrodes +electrochemistry +catalysis +electrocatalytic +Eduaro +Frei +Rudolph +Bulnes +Milenium +Zanlungo +chilean +ECS +Carbonaceous +Iberoamerican +coedited +Scopus +citations +electrode +keynotes +profesional +postdocs +Dodelet +Bedioui +Electrosynthesis +Chimica +scottish +Caiphas +Superstar +firemen +Presage +MacSoft +Digmo +Monks +freeform +Deathmatch +Gowan +databases +Toolkit +XBoard +Crafty +GNU +Rybka +Move +Searches +FICS +Przewozy +renewing +Sudamericana +NORCECA +unmistakable +Argentinean +laborer +KPCC +Gereat +Veniyambadi +Khilafat +Moplah +Riots +lathicharged +Kannur +Malabar +Subhas +sahib +Pottashery +Chennamangallur +Kodiyathur +Eriad +Nasrul +Orphanage +Indianness +Achuthan +Maranamillatha +Manushyan +amity +imbibe +Abdurahiman +Kunju +Narain +Hameed +Chennamangaloor +insinuating +Pavlo +Dieter +Tekin +warehoused +nondescript +Geographically +deploys +receipt +Marshals +vaccines +countermeasures +Walmarts +deployable +acuity +FMSs +Influenza +swine +influenza +Pandemic +Reauthorization +reauthorized +Iyokuni +Ehime +Bodhisattva +Precepts +precepts +Sanron +Mittler +Edson +Padwa +Glauco +prodigy +Benjamino +Gigli +Rami +multiculturalism +disseminated +Promote +Latinos +Binnie +bridging +SJSU +Racially +Ethnically +Diverse +REFORMA +Lakshmipur +Belbichhawa +Rautahat +Narayani +Shinonoi +Shinetsu +Shinano +Kawasaki +Birinci +Selimiye +Sabis +Thrace +pasha +Mirliva +Nureddin +Miralay +Mehmet +Emin +Fahrettin +Narta +Passersby +donkey +Chaleh +Siah +Mian +Nur +Mazandaran +Stillwater +Stolte +platted +Rapelje +Molt +Riopel +mercantile +gripped +Robideau +placard +Helmut +logics +Informatics +Adjunct +Refinement +checkers +FWF +Doctoral +Methods +Rigorous +honouring +checker +MAGIC +ERC +Harnessing +Checking +Nicolae +plenipotentiary +Uppingham +Rudan +Salamon +Walby +Stanwix +Milecastles +destructions +Counting +giemsa +virulent +lytic +turbid +lysogenic +phages +gingival +opiplasi +Spontaneous +proinflammatory +cytokines +Necrosis +nouvelles +importants +Trappes +tenfold +bicycling +Europcar +Immeuble +Quadrants +UEC +velodrome +Bommasandra +Anekal +Rhytidops +Dramdul +Youtso +hierarchical +Damdu +Khangnyi +compassionately +Chaksam +Darjeeling +ammunitions +Kalon +entitlements +Trimon +Chamba +Manchu +Aufschnaiter +Trabshi +disunity +centralised +restructure +Tibets +Drepung +fracas +Gangtok +Grwa +bZhi +dNgul +Khang +Trunyichemmo +Cawtang +mint +Trisum +Kyichu +girders +Potala +Cheleh +Gharb +Awarding +forgettable +Reviewing +respite +mirthless +Indicating +Breimeier +Freak +Assigning +shun +Runaround +ventriloquist +Knucklehead +Smiff +Mort +clicking +tote +Voting +concejo +coalitions +Juntas +consejo +consell +cabildo +Ecologist +Candidacy +Fauverney +Migdal +Jebatha +Jacotin +Karm +Ukhneifis +Khuneifis +Nazareth +Exploration +Daher +Dayan +Sursock +Zebulun +Grinding +Wheels +Pinchas +Shlomo +Fischer +Offences +Garda +TD +Bakke +Steffensen +Aalesunds +Flughafen +Kirchberg +Simmern +Ramstein +Bitburg +Spangdahlem +airfields +AFB +inactivations +Lufthansa +Qatar +Aeroflot +reloading +basing +Yiqian +AirBridgeCargo +terminals +Dreamlifter +Bullay +Autobahn +Earldom +assession +Customary +Villani +Liberi +Stipite +Villeins +Aqua +Keychain +Bonjour +bookmarks +tabbed +Vidur +Apparao +Netscape +Macworld +QA +browsers +WebKit +moveable +testbed +ONZM +Akatarawa +cyclopropanation +methanofullerene +bromo +derivative +diethyl +junctions +hexagons +steric +abstracts +proton +enolate +bromine +intramolecular +cyclopropane +halide +tetrabromomethane +diazomethane +Esters +methano +electrolytic +Nylander +Seaward +minesweeping +Elands +Malgaskop +HMSAS +Smuts +NARYSEC +theorical +Maryiln +terrifically +Carlita +wowed +Noelle +Jasper +Add +Catalano +Arianna +Hernandez +neurotransmitter +conclusive +methodologies +depletes +DanceSafe +Backing +depletions +abstinent +cerebellar +Cmax +dosage +hematological +causal +antidepressants +MAOI +SSRI +parenteral +precipitate +nausea +carcinoid +preclinical +EMS +contaminant +Tryptophan +hydroxylase +Tmax +enhances +dosing +decarboxylated +nutraceuticals +pharmacokinetics +impractical +attenuates +abolishes +responsive +serotonergic +intermediately +simplicifolia +satiety +IndyCar +Lai +gilded +Ayutthaya +Thonburi +Burmese +namya +horadan +detach +unpainted +thong +Bolero +Altos +Caceres +Ecuadoran +Arauz +Elsa +Stairway +Callao +Yma +drift +Puma +pulmonary +Bronco +Regals +WHL +Chevies +Millers +fluently +Curtain +Hokej +Hokejowa +Shero +Metropol +Kissinger +Aleksandra +Valeri +martini +Eagleson +Makarov +Dealing +Bossier +asthma +McKinsey +Payor +Provider +Olsson +Alfie +Hewett +campaigner +Nell +Thurber +Brick +Medlin +eloped +restrooms +Appalled +Impatient +childcare +audited +Amon +TXU +publicist +Garson +Pidgeon +Ill +unveil +Stinson +sculpting +pedestal +etched +Eupterote +nigriceps +speck +undulation +Foxwoods +Mohegan +Commissions +denials +suspensions +computerized +Hearst +Lepilaena +appended +Austral +marina +Marpessa +cytotoxicity +humoral +immunoglobulin +Antibodies +Receptors +perforin +proteases +nucleated +integument +Effector +PBMCs +mononuclear +Cytotoxicity +quantified +assay +radiolabel +scintillation +calcein +europium +Fluorescence +fluorometers +cytometry +assays +lysed +GAPDH +luminescence +rituximab +FcgRIII +myeloma +daratumumab +Purine +phosphorylase +Pippin +interpret +undiagnosed +Toshiko +Grazia +Deruta +Majolica +expressionistic +Kooning +eschewing +douses +intrude +majolica +Totem +Carnivale +Lustre +Pinnacles +lustre +apogee +dousing +paraffin +waxes +Liquitex +interplay +echoes +Kitty +recede +Coalescing +linework +incisions +inscribe +tracery +oeuvre +helpers +porcelain +pugged +repugged +Powerhouse +bluegrass +Trischka +Statman +Stift +Krems +Donau +craftsmen +Cumans +Hussite +Moravia +Heidenreich +Refurbishment +Ambros +Minarz +Swastika +Sturmabteilung +Knappek +Vitus +Prelates +vestibule +Judgment +Assumption +Natur +im +nurseries +Religionen +meadow +plum +grove +Apothekergarten +Apothecary +herbs +Stille +hives +hobby +Kreuzganggarten +Frescoes +Crypt +Garioch +Dividing +Macalister +hugging +Cosmoball +Galactik +CosmoBall +earthlings +Mironov +Yakovleva +Mosfilmovskaya +Babylon +coziness +hubbub +kaleidoscope +Relaxed +beauties +interplanetary +coexist +hovers +Amazonian +Bonanza +kinohit +Gambit +Furious +Stalingrad +Duelist +Twister +Andrey +Spaceball +Gatekeeper +Alekseyevich +Sidakov +Andreyevna +Singing +Shchukin +Mikhailovna +Yefremov +chroma +creepers +dusty +rags +seedy +sullen +nicer +stitched +Serj +SOAD +Arman +inertial +Complement +Nashe +Kino +Jinja +Sserumagga +rehabilitate +Caf +Bunamwaya +Rayon +Sculpted +Munkegata +cobblestone +calibrated +Closeup +colocation +Trafford +Farinet +Verbier +BusinessCloud +Equals +redress +CSR +Inflexion +ethos +maternity +ICCC +Influential +collated +Spraggett +Francophone +Bareev +Callie +Hollins +Browder +Godbold +Mims +Adidas +Goalkeeping +Ayala +Kilmarnock +protracted +Uncompleted +Waghorn +Rumoured +swoop +loanee +Blanchett +Notts +Connolly +rectify +tumble +Fees +Thefigure +Month +patience +conveyor +juddered +stalled +rusted +Lewin +Nyatanga +goalkeeping +Oakley +bundling +Ishmael +mazy +mauling +enquired +Perch +sandwiching +destabilizing +referee +clouded +Lafferty +slumping +sandwiched +midweek +Hotspur +Leacock +Ekstraklasa +Noel +Jos +Lallana +Tadanari +Danns +Bournemouth +Huseklepp +Ince +rupturing +patellar +Hadji +Diouf +Pitman +strugglers +Elland +Varney +eqaulising +runouts +midfielders +Stamps +Crooks +Lelan +Kwame +equalising +scorers +Metgod +Kneehab +quadriceps +Hoare +Antiquaries +succeded +Surveyor +Fabric +Verily +Templewood +Doliones +Fleece +Cyzicene +mistook +Dolionians +Fractionism +Comunistas +Carnation +musseques +predominance +multiracial +fractionism +Belamino +realisation +Mandibulata +stocki +myriapods +Triops +crustaceology +pereon +pleon +moulted +somite +sternum +exoskeleton +mandibles +pereiopods +maxillipeds +uropods +Crustacean +antennules +cavity +circulatory +haemocoel +haemocyanin +alimentary +kidneys +pleopods +hermit +Cymothoa +headfirst +branchiurans +mitten +Eriocheir +Hemigrapsus +Parthenogenesis +fertilisation +Marmorkrebs +fertilised +decapods +isopods +pouch +Branchiura +ovisacs +leptostracans +naupliar +Zoea +cephalic +mysis +megalopa +Belon +Systema +nomenclaturally +Morten +Thrane +Fundamenta +chelicerates +Hexapoda +nested +Mystacocarida +Canadaspis +Perspicaris +Maxillopoda +Ostracoda +Tesnusocaris +Hoplocarida +Phyllopoda +Aeschronectida +Canadaspidida +Leptostraca +Cumacea +Isopoda +polychelids +Ophiomorpha +Camborygma +Nurra +fluvial +burrows +crayfishes +lobsters +decapod +biomasses +consolidates +CUDI +federated +digitizing +geneticist +arthrosis +professorial +GenomEUtwin +Wellcome +Sanger +Gysel +Academician +Suuret +Finns +postage +Peltonen +assassinations +Embassies +Takhlakh +Trout +Pinchot +Basson +Chartist +Lalor +peacemaker +aggrieved +pauper +Pedagogical +formalism +Khrushchev +Baiba +Haralds +Inta +textural +decoratively +splinters +Laimonis +murals +muralist +fishermen +kolkhoz +Figurative +Decorative +Kupa +Deeg +Marathan +Marathas +Holyrood +trigonometrical +calculator +Pipework +Plumbing +Townhead +Kelvingrove +Anderston +Falklands +defying +McElhone +televising +Chapple +expulsions +Heffer +Meacher +PLO +WPC +Gorbals +Hutchesontown +Ancram +Penal +parole +mistreatment +Owing +NUR +Garner +Tiles +Hsu +Madari +Pillyalil +Sabashahr +Shahriar +Cessnock +premiers +Woodruff +unwritten +USTA +WKDY +Weakest +Gameshow +emcees +humorist +Concurrent +Rowthiram +Mangum +Vallavan +Kalam +Garnier +SmithKline +Beecham +Launcher +IHI +Disarmament +Fruits +Chibi +Fullmetal +Junjo +Romantica +yaoi +FB +LN +Stanislawska +cadres +Drutyskie +antitank +Yelnya +cadre +counteroffensive +Bukryn +liberating +MLRS +Strictly +beag +Lowland +vocables +learner +memorization +Nether +Lorn +Kilberry +simplification +tempos +embellishments +accented +singling +doubling +categorised +personages +Unjust +verifiable +Dunvegan +isle +clearances +emigrating +suin +Cha +Og +Campsie +reframed +Hanoverian +anxieties +aggrandising +romanticism +concur +arpeggios +Likely +Craoibh +Cumhadh +Matheson +crann +Ruaidhri +Hay +Freuchie +bowing +scordatura +Iarla +Easpuic +laments +Gow +Aristocratic +Uist +recounts +Morar +MacAilein +Tarbh +Speckled +simile +Largie +transcribers +dispraising +Sloinnidh +Piob +Pipes +MacMhuirich +screeching +limbed +slaver +filthy +Clanranald +Mhaighstir +Mhic +shout +Sighing +deft +shamed +untuned +Dull +Rousing +Bardic +Bagpipes +supplanting +dynasties +orally +Hannay +MacAuslan +supplementary +asymmetries +phrasing +collating +prescriptive +McCalister +Distinctive +Barrie +Orme +wrenching +extemporised +Dhan +Taigh +Bhuan +Leat +Dwelling +compositional +Molard +Brest +Pioba +Manx +Stivell +appropriation +Violaine +Marshalsay +ms +Info +Ceol +Burns +Caniad +Silin +Vaunting +Caswell +McGibbon +Violinists +Bruch +Granville +Bantock +reworking +Hardie +Ioarla +Wigton +revivalists +bagpipers +luter +Fiddler +Gordons +cerdd +dant +tabulature +reconstructing +theorises +Clymau +Cytgerdd +Clairseach +Ereanach +Thomason +Lively +Drochaide +Isleman +Timoney +MacRae +Va +Bec +marquis +streetnames +Bourg +marl +fertilize +deneholes +sainte +Nave +Lustschloss +Nasturus +Surus +Fatimid +nathaliae +Milford +Vivian +Listeriosis +deli +sincerest +Words +Officials +microbiologists +peroxyacetic +ammonium +isopropyl +refrigeration +disinfect +McGuinty +tainted +propane +CFIA +mousetrap +Atamanenko +NDP +Layton +tasteless +cheques +Rata +Claimants +Llave +Krasnoperekopsk +Tudela +juris +chairmen +CNMI +Korn +Halldorson +Balding +Knudson +anticipating +cohesion +Golfers +reborn +Arron +RBC +Opens +Beisiegel +Million +Talor +deadweight +rpm +Ferus +Smit +Delfzijl +ballast +tugboats +Coppel +discography +Morbid +Cogumelo +Schizophrenia +Arise +SPV +XXI +Clockwork +Eloy +Casagrande +Dollabella +Mediator +Hands +annuals +novellas +Felicity +Libby +Liljana +spokeswoman +Declan +Ringo +Liebmann +intruding +dodges +edgier +Sienna +Greta +Raindance +vlogger +Louna +fanbase +visuals +Woodburne +Jakubenko +Robbo +Rosenow +Morey +Calen +Brinsford +Imogen +Milly +Tenplay +Grixti +Heron +alarming +Adz +Rawlings +Mullins +Colette +Crocker +Angry +Janine +Caitlin +Stasey +Kinski +Saskia +Hampele +Revealed +Reunite +Junkeer +Georgie +Lachlan +Millar +Turland +Jonathon +memoribllia +Topps +Signed +castcards +Impulze +trifold +Toadie +Sonya +Revised +Hymnary +hymnal +polyhydroxyalkanoate +polyesters +polyhydroxyvalerate +polyhydroxyhexanoate +polyhydroxyoctanoate +Cupriavidus +Methylobacterium +rhodesianum +assimilation +glucose +polymerize +compostable +renewables +copolymer +PHV +Metabolix +fermenters +TephaFLEX +bacterially +recombinant +Tepha +proteobacteria +Acidovorax +fumigatus +Variovorax +paradoxus +microbes +Alcaligenes +Illyobacter +testosteroni +stutzeri +tripartita +Mozambique +toddlers +Jugend +Mita +Sakurada +Hijiri +ghosts +Mori +Zamoshye +Tver +Galleri +Vestre +Separated +quieter +Universitetsparken +Nobelparken +radiates +Skanderborgvej +Mejlgade +Gade +bikeways +Brabrandstien +Hallssti +taxis +Lille +taxicab +pedestrianized +shoppers +commandeered +ceasefire +Invalides +Ruhla +Gotha +eyewitnesses +Ohrdruf +Thuringia +Bhallowal +Phillaur +Jalandhar +administrated +Sarpanch +Caste +Finis +Opus +Coronat +diplomas +Rockland +dip +Invent +dorm +shrinking +Netherthorpe +Springwell +Rufford +Grimston +Lillooet +levy +confederation +pressuring +Transliteration +colonist +uninterested +Mersa +vowel +Raigad +rentals +Agar +Borli +Velas +paddy +Kolad +suru +belu +Harihareshwar +PUNE +BUS +Sheelahar +ganesh +murti +murthi +watchmen +speechwriter +Hobert +addict +counsellor +Stagedoor +Salafist +Benno +Annemie +Turtelboom +Vlaams +Belang +Morel +Crem +Anjem +Opstelten +Gerd +Leers +burglary +resisting +extradition +Dyblin +Lipno +partied +McGregor +Caramanica +Blender +Cinquemani +Kelefa +Sanneh +Warm +Inventions +Seminal +Jansch +Mornings +Wildflower +Doorway +chimes +spellbound +summation +Smile +Mojo +terrorised +Plavay +Moldovan +Murderers +Moodna +Wyntje +Mouth +moeder +modder +possessive +apostrophe +Semipodolaspis +jawless +Chadwell +geriatric +superheroine +interceding +Kirkland +SAFL +Norwood +Rebnise +Kusumoto +Ryota +Epics +Fumihito +distressed +Houki +Jikyuu +Nakano +indescribably +splattered +grinning +utters +guesses +capeside +Capes +saki +Acknowledging +Soken +Famitsu +DSiWare +Oku +Appli +Kkoi +Eijaz +Poorva +Gokhale +Sunidhi +bubbly +Guajrati +Punj +orchestrates +playboy +blaming +miseries +complicate +Premala +Thambyapillai +Designs +Junius +Fintown +Deepa +Niraula +Jeetu +Shiwakoti +Choreographed +Gautam +Khadka +Ujwal +Baniyas +Tartous +Pointe +Mouillee +Ash +Artium +Baccalaureus +Myself +Jabs +Saved +Falkous +WILL +KILL +PLAY +FUCKING +Patashnik +coruscating +unordered +Veitchia +pachyclada +widget +Programming +robotic +analogue +phidgets +Flex +Robotics +Livorno +AmLaw +Kendall +FTSE +insurer +GTS +biotech +Sokol +DiUbaldo +Dannel +TheStreet +Becket +TiE +Strategist +Dailybreak +Partner +bootcamps +Thrive +Vito +sorely +Reactions +reinsurance +executing +earners +trusts +Equality +Paulette +Corners +Trulls +nurtured +titans +SuperLiga +Surdulica +Varvarin +Noting +Gradina +Borac +Prva +Petoletka +Borko +Kraljevo +Pazar +Topola +FAP +Pairing +Toucheng +Keelung +Taihoku +Daha +Kalikot +Karnali +affable +extricated +indispensable +Folan +Craye +Stiffy +Byng +conniving +Steggles +Fairfax +Dany +Screenonline +flavour +Highclere +Wrotham +Halton +Stanway +Sidmouth +Clandon +Horsted +Keynes +Zakaria +calligraphy +censored +brutality +Tahrir +bra +gorillas +paradox +aniconism +internationale +Barbican +Monde +Arabe +DIFC +Barjeel +Perrin +Meneret +Parr +Grappenhall +righthanded +jurist +Offender +Haggadah +Klingspor +Offenbacher +Buchenwald +Eugenie +Wertheimer +urns +Nabam +Atum +Caudron +draughtsmen +Sopwith +BAT +Teterboro +flyers +Autogyro +Pitcairn +revolved +seaplane +tubing +fuselage +stringers +seismologist +seismograms +Seismic +Cascadia +subduction +Gutenberg +AGU +earthquakes +therein +eikonal +Astor +doorman +steeped +Bodiam +Puck +Fairies +Alley +Lockwood +Heathfield +Hawkhurst +Wealden +watermill +Darwell +Bexhill +Cricketer +Relf +frontman +Daltrey +Holmshurst +Lakedown +Kingsley +Amis +Callaghan +Salesian +Libertad +urbanized +RENIEC +Ayacucho +longitude +meridian +Humidity +Eidolon +Abominations +Skolnick +sincerely +beatboxer +beatboxing +Compulsory +Mona +Reijnen +Rohda +WVF +Vitesse +Arnhem +Zwolle +Eredivisie +Kaayla +Metropolis +Battlestar +Lasky +Halo +Unto +Fartullagh +Anneville +Dunboden +Rathduff +Rochfort +Tallyho +Enniscoffey +Carrick +Kilbride +Cythara +citharoedus +Linkerton +Welle +Conny +Plank +Tours +Roskilde +clarinetist +COMMISSION +Duo +improvising +Affaere +personen +Hohenbild +Dittmar +Tholen +cleric +Sayed +seminaries +Karbala +Sadiq +Mujtaba +Eminent +Ulema +Tenn +Jaxx +Maddux +underachiever +underperformance +Perunin +Absent +Headlights +Hoosiers +livredd +Keiino +Ulrikke +antiarrhythmic +MI +contraindicated +Wyse +infarctions +PVC +asymptomatic +Holter +ejection +PVCs +arrhythmias +ischemia +spelt +pergunnah +shiqdar +amin +munsif +arbitrator +karkun +subahs +sarkars +Pargana +Governorship +zamindari +zamindars +dastur +tehsils +Tonk +Florent +Piles +biographical +Romeyn +Hooghe +RKD +referendums +Elachista +kleini +Amsel +Punchi +Dissanayake +Schilling +honorees +Tug +philatelic +Philatelic +Guisande +Takhinsha +Haines +Chilkat +Riggs +Tlingit +Creighton +ACL +DBL +NBB +Mathis +Bluestar +Rickie +Jonsin +Unusual +Suspects +Mix +Kallman +auditioning +Bow +mixtape +Kadcyla +competitiveness +rebates +Alternatively +frazioni +Amapano +indefinability +EYE +Layla +Dias +Jayasinha +Ceylonese +Mahinda +Balut +Donbaleh +Jonubi +Dehdez +Izeh +Saranghal +intertwines +fevered +shark +extortion +Semirom +Isfahan +Padena +Holmberg +pockets +gluttony +disparagingly +transnational +Tentative +Philibert +Naillac +Seljuk +Boileau +Mausolus +Decree +Maussollos +cornerstones +cisterns +langue +manning +Eventual +assailants +projectiles +Cem +Bayezid +Fabrizio +Carretto +Docwra +smashed +slabs +Amazonomachy +Marmaris +Kos +Evliya +hamam +toppling +Mycenaean +Coin +Jewelry +Uluburun +Turquoise +peacocks +Yassiada +yondan +godan +Tatsukuma +randori +sparring +dojos +concussions +Shotokan +Gichin +Funakoshi +Chojun +Gogen +Oyama +dojo +Aikido +Gozo +Neichu +Toshio +Jiu +gi +Ibirapuera +harai +uchi +mata +Helio +ukemi +mat +armlock +Kokusai +arteries +partying +Katsushi +Murata +dantai +peritonitis +yakuza +Toshiya +Masuda +IPWF +Rikidozan +Masakatsu +Funaki +Imura +capoeira +smiled +grappling +judogis +hane +goshi +Correio +Manha +Balbino +capoeirista +headbutted +elbow +headbutts +disengaged +Mei +protease +adolescents +UNDP +medicaments +MSM +undetectable +prevalance +naive +plateaued +hovering +Prevalence +bisexual +Thane +focussing +cashless +cataract +HealthTech +Turisas +Somers +Merricks +Epikh +Syk +Sango +Rihanna +PopSugar +dreaming +BoomBox +MCMXCV +Vocalist +roommates +Scaterd +moot +Dennee +Pelt +Collective +Denison +Marrs +Torreano +moan +Abacus +Praising +presale +Marketed +whitening +baking +peroxide +imitator +reformulated +peppermint +minesweepers +Unclassified +Reconstructive +Broelsch +enticing +regenerative +regenerating +cranial +Adipose +mastectomy +Opting +Kotaro +Yoshimura +ADSC +engraftment +nanoscale +biomaterials +graft +neurodegenerative +Alzheimer +rejuvenates +liposuction +cirrhotic +Lilienthal +Reichsmarschall +swastika +Chaudhary +Charan +Lamia +Moulay +cleanest +sensitize +Hassanate +Khalil +Benharbit +polychaete +chaetae +chitinous +rimmed +filamentary +scooping +swallowing +eversible +sizable +Alibhai +Mulla +Mombasa +resentment +resentments +Obote +symposiums +Karine +Getty +Frick +Indaver +heavenly +Maerten +Vos +Michaelina +Wautier +Antoon +Dyck +symbolist +Valerius +Saedeleer +expressionists +Permeke +Edgard +Floris +surrealists +Vanriet +Atelier +Minnebo +Tuerenhout +CoBrA +Pedersen +pharaohs +tunicas +Schvartz +cityscapes +Blaeu +patrimony +artworks +Zot +Dymphna +Goswen +Norbertine +Tongerlo +Geel +Thuis +bij +Psyche +maximal +OER +integrally +Gossaert +Waasland +Constantijn +Huyghens +Rubens +aan +Stroom +DIVA +Snydershuis +cooperates +VOSSEN +Keizerskapel +Snijders +Vijayakumar +CLAT +Sasakawa +USIS +PostBank +Rooms +Cotting +Hortensia +infrequently +Hortensius +nomina +Urbinum +Umbria +Hortensis +Campania +nobilis +Cicero +Sempronia +praenomina +Ferentinum +Aulus +Assembled +Alternatives +systemization +Sulfur +robotically +Biotreatement +hydrolysate +biosludge +Biosludge +decontaminated +Problematic +PCAPP +Demilitarization +DPE +encapsulated +Samples +PuebloPlex +Orecta +acuminata +liga +Bystrica +modestly +extrovert +Riddare +huuss +platzen +Riddarehuus +Torget +Vasa +Increasing +Discussions +Slingerbultsleden +Riddarholmsbron +Riddarholmskanalen +Riddarholmen +Riddarholmskyrkan +Myntgatan +Kanslihuset +Kanslihusannexet +Chancellery +Storkyrkobrinken +Nygatan +alleys +Vasabron +Occupying +Vingboons +Adelcrantz +Corinthian +bricked +Isak +Clason +Riddarhuset +Nicodemus +Tessin +accommodating +Ionic +restorations +Sackenhielm +troubadour +Salva +accessibilities +availed +Baka +Mundu +Nabanga +Maruko +Belukha +altitudinal +Teletskoye +Beluka +Elysian +Altaians +mummified +tattooed +sacrificed +archeologists +Scythians +seizes +herding +endangering +Sofiyskiy +argali +steppe +Tombs +Menander +Aemilius +Exploring +permissive +spar +monologue +urbanity +openhandedness +pleasantness +dicam +quod +isti +facilem +festivom +fieri +vera +vita +neque +adeo +aequo +sed +Sostrata +chateaux +Olomouc +Oubapo +Ouvroir +BAnde +Maguen +Synagogue +Honduras +Guido +Mantua +noblewoman +senatrix +patricia +Liutprand +smothered +Theodora +margrave +Adalberto +nobiliary +enactments +Scholastic +Altair +SECO +LOX +boosters +typo +postflight +Oneida +zipcode +Rosette +Pofessor +Kavita +Lalit +Bahl +Bioactive +Lipids +Inflammation +triplet +delegari +delegatus +delegare +subdelegation +conferring +ROY +SCC +vires +Enabling +Yeates +Noarth +transact +delegata +Carltona +caselaw +novum +novissimum +codicis +Sacraments +Sacramentals +Pars +dynamica +Canonization +Journals +apostolic +Bakel +Tambacounda +Gastein +Sesil +Czechs +Committeeman +misapplying +Calvert +Answers +Pseudostomum +klostermanni +flatworms +Seas +Myoxanthus +Chaetocephala +Duboisia +Reymondia +Kuntze +Colchester +Zacatecas +Ensenada +Montezuma +Alphonsian +presbyterate +incardinated +Promoter +Presbyteral +Assessor +Savage +Wintour +couture +Ripper +Stalks +Widows +hologram +convolutions +Cotter +ethereal +steers +Menkes +decadence +Damien +breathless +stimulating +Thurman +bother +astute +Patrons +memberships +sprinter +Bingu +Mutharika +Kuchak +Shahr +Evaz +Larestan +Marshes +Brezovica +Veliki +Notranje +gorica +gora +altars +Zajc +Janez +Performers +Dido +Alcina +Theaters +Midwinter +Rinaldo +Sephardic +lieder +Virtutum +Jonatan +catchphrase +conceit +Safire +etymologists +Rosemarie +Ostler +topper +streetball +Mixtape +dunking +entrant +Noise +Dwan +Bodil +undercooked +protozoa +sicken +Gastrointestinal +irritation +Parasitic +uncooked +unwashed +anilingus +sandboxes +reinfection +piped +turpentine +anthelmintic +Ardie +Wick +Jakwob +Ting +Facsimile +Zayn +sexualized +heteronormativity +Polari +homophobia +Bote +treads +hookup +normalizing +Kowich +Ghazal +Lol +Nyaruet +Kar +Ayuel +Mabil +Akenyjok +Dinka +Suit +Ealing +Dighton +Mangan +Stratton +Tointon +Omey +Doon +Mweenish +cursing +Teampaill +Seacht +cures +conservatories +Comptroller +audition +mountaineer +parbat +Karakoram +Owais +Khattak +Margalla +Chenab +motivational +Khurram +Sarwali +Toshe +Neelum +Azad +Nanga +Awais +Kel +Judging +Videoland +TVB +Relying +Takayuki +Miho +Shiraishi +homages +Daicon +Toei +Maetel +Shift +JIS +chronicles +Kazuya +Jinkama +Ushijima +woo +Introductory +Sore +fAKE +metafictional +Rabbit +Koide +ME +STARTING +Replicas +Arrangement +lentiginea +viscid +obscurely +terete +elliptic +shallowly +Racosperma +lentigineum +Pedley +Igneous +geochemical +motivating +Collecting +Motives +scoured +Demi +cashmere +compensates +Psychologists +Freudian +propensity +nourishing +blanket +clings +Stuffed +bowel +frightening +flushed +archetypes +unemotional +guys +Petrulis +camaraderie +artfully +Tops +drawers +dumpster +objectively +piled +neurologist +subcortical +limbic +hoarders +hemispheres +Palimna +annulata +Delano +vigorously +implementer +Innova +Sonoloco +Loco +Nordin +Couture +tutelage +Mirko +Semen +Musicale +Chigiana +phonograph +Uneventful +Mstislav +inimitably +august +Fanfare +glad +cellists +gentler +sensitively +harpsichord +vibrato +glissando +alike +Michaela +Miroslav +intently +Gagliano +Competition +Bohuslav +Disque +Supraphon +streetfront +Hally +Seiei +Akio +undercard +Empresa +Mexicana +Chavo +Tatsumi +NJPW +Seiji +Sakaguchi +Maeda +Kido +heavyweight +Yoshiaki +Riki +Choshu +Masa +Hiroyoshi +Tenzan +Masahiro +Chono +Takashi +Ishikawa +BJW +Shiro +Heisei +Ishingun +Kuniaki +Akitoshi +Tatsutoshi +Michiyoshi +Inazuma +wrestled +Schmeidler +Assume +monotonicity +envies +Leontief +GIven +Weakly +Efficient +WPE +Kakutani +contractible +shrunk +Eilenberg +Svensson +EEF +maximizes +logarithms +Summing +subsets +Chokishgna +Lemuel +rectangles +tinctures +delineate +cadency +legitimized +chequy +Cullimore +gules +crescents +Diaphus +lanternfish +Shuishang +Marki +haywire +Arrowhead +Ceylonolestes +Kimmins +Anal +Segment +Tandem +davenporti +Shreveport +MTR +racinos +Presque +Nemacolin +Mountaineer +Racetrack +Salmson +Estelle +Baskerville +ignition +penalized +Jurong +Ayer +Cantus +Buranus +Corvus +MCHR +DNC +Liberties +Studs +Terkel +NSA +warrantless +Bergbaurevier +MSV +kleines +Revier +Schalker +Kreisel +Revierderbies +Oberliga +Ingo +equalized +VfB +slim +derbies +Borra +Theodoros +Skilakakis +Kyriakos +Papapanou +Marianna +Tzani +Tzannetaki +Mitsotakis +Bakoyannis +disagreeing +Antypas +Morbihan +Lourdes +Sebastien +archconfraternity +miraculous +petit +solemnly +Pardon +Sainte +Nicolazic +Inhabitants +voeu +Bouguereau +Osgoode +alderman +Scent +deCarteret +Mornington +Bennetts +Ockenfels +Promotional +Rosalea +Rosealia +Kleinman +strays +Bianco +Sawaal +Majha +Mane +Dhakti +Jau +Manini +tamasha +looser +lugada +Jyotiba +Arun +troup +sorry +animosity +lavanis +Jagadish +Khebudkar +Suman +Kalyanpur +Mantaha +Tareen +Eshita +Aashir +Rasheed +Shehzad +IMGC +cinemas +bosses +Pashtun +Zeezo +Omer +Sep +Rafay +Mahmood +Rubina +Behroze +Sabzwari +Atiqa +Museu +Transportes +Alfandega +Bowie +Perimeter +Institutional +BRCC +Jevon +choking +Grupe +furor +Cardia +Spiritist +Titchard +contemporaries +Atherton +Grapenhall +Bramhall +Dergenagh +townland +Dungannon +Killeeshil +Llanelli +Lampeter +reforming +floodlights +Begun +officiate +Jeannie +Longo +schoolmaster +Airco +Reuben +Hammersley +downing +Rumpler +Servais +DFW +gazetted +Hongkong +yogic +tirthankara +essentials +Digambara +Jainism +moksha +Arihant +Bahubali +Namokara +devotes +instructorship +simplicial +hermits +thanksgiving +rehung +rood +Ease +graveyard +meats +unrealistic +justifying +emulating +grazes +fertilizes +graze +pastured +coops +fertilizer +compost +harvests +Respecting +pigness +sellers +Pollan +efficiencies +slavishly +Handled +Safran +autobahn +candidly +Sheer +Ecstasy +pasture +localizing +Multilingual +LIP +SKUs +glossary +Glossary +Odiham +Crabbet +reappears +Pensioners +Dacre +executors +Neighboring +Hom +Ngao +Phrae +tambon +Generating +commensurate +Excavations +rai +thumbnail +ZUIs +hyperlinked +resized +CRT +Sketchpad +Archy +ZVTM +INRIA +Squeak +Grauman +Adriana +Zoomspace +PDA +Miura +HoME +Deepfish +homescreen +folders +Browser +Collage +notepad +Zircle +Gustava +Wheaton +Wingate +Neglect +Geologic +Sedimentary +Strata +Creationist +theologically +creationists +disgrace +provisionally +Themistian +Schelte +Siding +Chagall +Kharu +Baqeran +Birjand +lb +Jalen +Ollie +Scavolini +Pesaro +Juniors +Thabang +Thopola +Chippa +Abramson +Akai +Terukage +Tatebayashi +Utsunomiya +Equipe +Pianist +Urcola +bassists +Avishai +Universiades +abutting +Bleaching +Dyeing +mitigated +Supplemental +Cupen +Varbergs +BolS +Ljungskile +Superettan +Airdrieonians +RGV +Sounders +Whitehawk +Hamble +Albertine +Bogislaw +Sophienkirche +Geschichte +Schattkowsky +Raaz +Virendra +Premada +Bhavani +heroine +governess +Shreeraam +Badly +shouting +extorting +deductive +culminates +Visas +Oakington +Nationality +Biometric +Permit +enquiries +Gov +tryout +Yibai +Xu +Jinglei +Geng +flirtatious +briefcase +blossoming +Yao +despondent +Chuan +hawks +ashamed +contemplate +schoolteacher +bandages +Yadong +Jia +Zhangke +optimism +Maroof +captaincy +Abdelaziz +Nezha +Pembe +Extricate +Sledge +Faces +Funny +Nervous +Enough +Artful +swirls +Christgau +approachable +swaggeringly +unmarketable +scratchy +Hanley +loping +Hear +Sturgis +Hinckley +Ygnacio +Bautista +lagoon +embarkments +trackbed +Unsubstantiated +UCV +OAS +Bolivarian +froze +Smartmatic +Anzola +Jaumotte +Preparation +Preparations +Jeju +trot +Korail +Baek +Seongnam +Ilhwa +Sangju +Sangmu +Jeil +Jumunjin +Coaching +Narayanganj +Daud +Jahan +Ardestani +Shuja +Munshiganj +Dhaleshwari +Sonargaon +Abul +quadrangular +polygonal +bastions +machicolations +storehouse +Lalbagh +machicolated +merlons +tetraptera +Wheatbelt +spindly +Manius +Acilius +magistracies +aedile +praetor +peregrinus +Publius +Scipio +Aetolian +intercession +Titus +Quinctius +tribunes +concealed +Acilia +intercalary +Whiteknights +Slingsby +Infants +Loughrea +Kilconnell +Longford +Scalp +Slieve +Aughty +easterly +bisects +Mountshannon +Whitegate +frogs +Itombwe +cupreonitens +Reproduction +subducting +Luzon +Apo +Visayas +Bicol +Ierpeldeng +Erpeldingen +Ettelbruck +Burden +Ingeldorf +Rappers +Taalat +DMC +Dopey +Bodog +MCs +Busta +Bif +Limore +Twena +Navarro +unreal +Warped +DCS +Hertzog +Purified +Nowopole +Chak +Logar +Maidan +vase +Kharoshthi +Kushan +monophyletic +Faboideae +phylogenies +Styphnolobium +ICBN +inclusive +kentukea +Rudd +Dermatophyllum +secundiflorum +Reveal +Swartzia +Orvin +Geovanny +Firpo +Pato +Sula +Narlin +Nayelhy +grindcore +Foel +digipak +Greenway +criticising +Sophomores +alliteration +unranked +Biselli +FG +Lancers +Streak +Amantia +Anais +Kial +Tynan +Saville +Drumline +Lombardy +Pavia +Giovita +confraternity +Alagna +Germano +Souls +Purgatory +Groppello +purganti +Confraternity +Evangelista +Crucis +parrochial +Bozzole +Lazarus +signor +Antona +Traversi +Sannazzaro +Delirium +Ermengarda +Adelchi +Enrico +Albertina +Cavalleri +wrest +NWC +sailer +Phoebe +Cherub +imminently +transpacific +Joazen +Whampoa +Anchorage +stale +Gaspee +gale +tents +unsalvageable +wresting +Comiskey +toned +schoolers +Minersville +Frankford +upstart +Signal +Collings +Purchasing +cooperatively +Westbook +intensely +Rayburn +midwest +Westbrooks +downpayment +Cooperatives +Lanham +Carmody +socialization +kickback +Companhia +Baylor +Parlay +RESTful +JSON +OAuth +Zaruby +Avanti +Abruzzo +Talsperre +Reinerzau +Freudenstadt +impounds +Purpose +bunter +Overflow +spillway +forebay +Steinau +Caspar +Engela +refreshment +Baird +Seventeen +Heren +Chiron +Christoffel +Freemason +Goede +scalar +LOO +affine +Obtaining +computation +minimizing +Hinton +Sascha +Nivek +Pennepack +Markel +Aranburu +RadioShack +testicular +peloton +Denny +Smithers +Tunkleys +caravan +larrikin +Cremen +pratfall +Sandow +Competently +resorting +stereotypes +AFI +pesos +embezzlement +scam +Gorda +Pudor +Hibbertia +longifolia +Botanist +Mueller +Fragmenta +Phytographiae +Australiae +Rockingham +oss +feire +Celebrate +Dalbackk +Espevoll +bandmate +Skorpe +Dalbakk +Elisha +Keep +Someday +Westcoast +Anything +Falkner +Albrigtsen +Torun +Eriksen +Stroband +Ienari +jigaki +Tsuruta +Ichikawa +Raizo +laserdisc +Villainy +Katsuhiko +Kobayashi +Yoshi +Hamada +Taro +Marui +Masumi +Wakayama +Fujio +Machiko +Yoshio +Yaeko +Ryutaro +Gomi +Shigeru +Tatsuo +Endo +Mikio +Haruko +Mabuchi +Takahiro +Eitaro +Ozawa +Mako +Fumio +Minori +Terada +Kayo +Ehara +Matsukata +Hiroki +Masakazu +Takao +Ieyoshi +Mizuno +conspirators +unwillingly +Kinpachi +Funasen +Yoshihiro +Yanagawa +Portions +Raijin +Gackt +Buraihikae +Nissay +Gordie +Akyurt +Tire +Yozgat +Manisa +Adana +Bursa +bribery +Izmir +Fetullah +Avni +Nurten +Swell +Canyonlands +Schaafsma +pictographs +Hevea +Cryptostegia +modernizing +tracts +mentality +Yields +sisal +Wohlenberg +Wallander +Galicians +Asturians +supplant +Suebi +Britons +Suebian +Visigoths +springboard +Cantigas +corale +revivalist +politicised +spectacles +Internacional +Mundo +Celta +Aspiring +Campaneiros +Estou +vivindo +ceo +Roca +Xacobeo +Celebration +irmandade +das +estrelas +Vieja +Trova +Chieftains +amores +Berber +Seivane +Lebedynski +Llan +cubel +Interceltique +revitalized +pito +pastoril +fipple +ocarina +requinto +requinta +Plucked +Pimentel +flamenco +fairs +Faustino +Rivas +Feijoo +Harps +Quico +Percussion +snares +pandeiro +beans +rattle +vieira +rubbed +Charrasco +tambourine +canaveira +clarinets +Galacia +Zamora +Irmandade +Ovalle +smashes +darling +Xuacu +Gaitas +tambourines +parades +tumbal +grileira +redonda +tambor +inflates +ronquillo +sprightly +chouteira +alborada +Alboradas +Fandango +Capriccio +foliada +fancily +Melodies +polyphony +coda +swift +marcha +baile +giraldilla +Baile +palillos +espadas +arcillos +pasacorredoiras +rumba +Kristiansand +Kjos +Luxxury +Forza +Lyngstad +remixer +Tie +cowritten +Daou +Astin +Manimal +floaty +Gees +Patti +Lovers +Geffen +Verhoeven +Saunders +cornucopia +mandolins +cellos +starlit +sirens +attics +Malins +Vox +Zoom +OTT +nugget +disquiet +Khaled +barristers +Solicitors +brokers +quantifies +Litigation +Profession +billable +coveted +revise +cultivates +scoped +intermediary +Taproot +altruistic +combating +liaisons +synthesiser +trumpet +Bikini +Fortitude +Alastair +Galbraith +Varispeed +Rave +melded +Ridgewell +Snowy +Electone +Ambiguity +Releases +Asleep +shimmering +Moteer +Maseru +Vusi +Lamola +SAFA +Klipspruit +Soweto +Ratanang +Maholoisane +blistering +tombstone +Bataung +Avalon +clinched +overshadowed +mitt +infielder +Ryne +comfy +confines +Jerpe +tallying +Hoak +NL +Cooperstown +Apologizing +ovation +Buckeye +Rayland +saluted +Havlicek +annoying +Ungar +Heywood +Hale +Broun +inglorious +Maz +FSN +Kilsyth +Nunawading +Sukkar +Misty +Elbow +Lougheed +Swifts +Narrator +Reluctant +impale +swords +debunks +sickness +unborn +keepsakes +compliments +Lounsbury +falsified +disconnectedness +dramatized +disconnected +hearted +whimsical +musings +refutes +redefines +infinity +Bennie +Oosterbaan +airliner +Vortex +ILS +Roach +Piston +Airliner +Guangxi +Guijiang +Kancha +Ass +vallani +microhylid +Ambohitantely +Jhalokati +Jhalakathi +huller +Shudhangshu +Bhushan +Aswini +steamers +siren +Moka +Kaori +Edgecombe +Bungalow +Foucherans +transwiki +Hooyah +Oorah +Hooah +Seabees +Dmanisi +Kvemo +Boned +Bekah +Oleynik +Peoples +Zulfikar +Cass +Damon +Groefsema +Bieke +grownups +inoperable +Supernatural +Congreve +Bram +Aurealis +Kirstyn +Pug +Trudi +Judgement +Wanniarachchi +Negombo +Kantha +Vinischaya +infancy +BAW +Sangawunu +Pilithura +Umathu +Wishwasaya +Handa +Iranganie +Mathabedaya +Daiva +Vipakaya +Wanaliya +Vivahaya +Kawata +Andare +Jeewithe +Pujawe +Mangalika +Poruwa +reinvests +externally +Discount +Denahan +CreXus +Keyes +Pingora +servicing +Policing +Locations +Constabulary +Folio +Theakston +recomissioned +Courchevel +Helgesson +Nordics +Kostner +Tuktamysheva +Nebelhorn +Ranked +Mimi +biomechanist +Cum +Postdoctoral +quadrat +Integrative +Biomechanics +Wainwright +Medalla +Oro +Bellas +Emporia +UNR +sua +movant +prudent +testificandum +infringe +subpoenas +waives +ALR +restraints +Wingo +Loud +impartial +permeated +annunciated +concedes +mandamus +Odle +voir +Swindler +excused +remanded +Continuances +procuring +Discretion +legitimately +depositions +tending +unanticipated +issuance +entitle +tecum +intoxication +unjustly +prejudiced +Surprises +Lack +confers +Failure +Affidavits +induct +Rasagola +foodie +Pujara +Swada +Sachitra +Chandamama +Oriya +Miscreants +mafia +raj +blcock +Nayagarh +Nexus +Puri +felicitated +Khurda +NAC +Raipur +Migrant +Labourers +Janatara +Boudha +Pustak +Sanjay +Rout +Sambadika +Kaas +Lykke +Presentations +rundown +hungry +dearly +Waiting +stroller +trucker +cot +shaken +mitigating +Naismith +SaskTel +Sportsplex +Hence +Grocery +Bourgeois +annulment +Binaud +INAO +Lichine +Caisse +abseiling +Aussie +Rappel +Mulgrave +Hinge +Surfers +Qazan +Pir +Taj +Almas +Bijar +Moir +Williamstown +Exhalation +exhaled +propel +intranasal +fluticasone +polyps +Fier +eshe +Convincing +Registry +Frakulla +Latifaj +Pesujkut +mezhdurinave +eu +reapportionment +Hjalmar +Nygaard +Denanto +woodcutter +Heteroleuciscus +Hampala +Cyprinidae +Murfree +anticorrosive +Wladyslaw +Juliusz +civics +appellations +syncretic +settler +homesteads +Quickly +Elys +abodes +rechristened +tillage +rowhouses +rude +milling +Tragedy +perish +unsanitary +Emigration +stonemason +Croton +razing +ashlar +wrecking +Demolished +pave +subcontractors +Potato +immigrate +Marcella +unionism +Joiners +reorganize +Steelworkers +testimonial +Weavers +unionize +foremen +fining +satins +unionization +Pittsfield +hoard +earnestly +profiteer +lionized +inductees +Fergy +appraiser +assessors +Adjutant +Posthumously +Attending +Greenhalge +loomed +incline +Choosing +curbing +tuitions +waiving +Mummers +Treacy +Ancestral +Fridays +lauding +notwithstanding +patronized +Texts +Wherehouse +DBA +circulars +Transcript +Esmonde +Received +rousing +applause +reciting +Chapin +ramifications +Wyndham +Kettle +Nellie +remarking +Morse +mirroring +resonates +devise +deacons +energetically +Cousin +Fitzgerald +Emigrant +Feminist +Kirkus +derision +uproots +lace +Ploughshares +uneasy +interethnic +wharfing +Barbados +retailing +overtuning +extratropical +latitudinally +solstitial +TW +sensible +intertropical +humidities +warmed +adiabatically +paleoclimate +Palynological +biome +savannah +dryness +Saras +Haribhai +Jamnabehn +Gujrati +Durgabehn +Champaran +Verrier +Elwin +Rajmohan +thrice +Motilal +cyclostyled +flogging +Navajivan +Patel +Devdas +deadlock +Disobedience +clampdown +Belgaum +Gita +satyagrahis +Declaration +Vir +Vallabhbhai +Khudai +Khidmatgar +Gaffar +Farbas +Satyagrahni +Saratchandra +Vartao +Virajvahu +Prachin +Chitrangada +Viday +Abhishap +Autobiography +Jeevanktha +Dayari +Narhari +Parikh +Chandulal +Bhagubhai +Dalal +Navjivan +Amrita +Unworthy +Vardha +Eclipse +Righteous +Selfless +Anasaktiyoga +Akademi +disobeyed +samadhi +Gandhian +Samajseva +Oven +sonnet +ovenbird +Bloom +pear +Kylie +renditions +tunefulness +improvisative +tasteful +texturing +Roza +Quoctrung +Bui +tabulate +metaltail +gorjivioleta +hummingbird +Trochilidae +Tone +Alis +Groven +innovator +Eivind +Ragna +Thorolf +flavours +duets +Syv +impresjoner +Dances +Impressions +seventieth +Tonebilder +Elling +Raynor +Spuyten +untiring +Physically +debilitated +prostrated +fitful +tranquil +sinewy +Quixote +lustiness +honesty +commendation +fruitlessly +expended +daunted +Pushed +reformatory +strenuous +Timon +soubriquet +Embittered +epitomize +impassioned +zeal +apt +fathom +cogent +assailable +keenly +laurels +Conspicuous +gallows +ridding +forgers +bitterness +clung +Attorneyship +congenial +tedious +vastly +Resigning +ardor +exponent +Bowery +Devoe +engendered +impugn +genial +companionable +Lipstu +Dorrian +Ivie +Okujaye +Apostle +Nollywood +Alero +Holzhaider +sculls +Lasius +clypeal +concolorous +Prairies +Nests +reproductives +Orsis +Precision +adopts +cheekpiece +Varmint +Picatinny +CheyTac +Pairs +heels +terminology +Leyendas +Niebla +Hijo +Mephisto +Averno +Mascara +Guerrero +Valiente +Volador +Raeburn +Stockholms +Stambanan +Swedenborgsgatan +Mariatorget +wharves +Hammarby +SL +Hahncappsia +jacalensis +Capps +Mondale +Dukakis +Salli +Saffioti +Hunnigan +Damnation +Furia +Widow +Arcanist +Fable +Clawdeen +Celeste +trackage +Garrick +cp +Palanca +Wala +Akong +Litrato +Noong +Nasa +Kolehiyo +Jahanie +Maiquez +Sandali +Makalipas +Huling +Araw +Alwynn +Pakpak +Binunot +Tuvilla +Ilalim +Kuwento +Rogelio +Jimmuel +Kamatayan +Pampelikula +Barros +Lacuesta +Gracio +Ganap +Haba +Rodolfo +Kanto +Wakas +Katotohanan +Anril +Tiatco +Yugto +Layeta +Bucoy +Gagamutin +Magtoto +Teodosio +Tulang +Isinulat +Bata +Evasco +Tiyanak +Loob +Aking +Bersong +Dalit +Axcel +Ate +Aling +Ako +Laban +Nangungunang +Bansa +Calisin +Damdamin +Kuwentong +Pambata +Genaro +Gojo +Kahon +ni +Nanay +Cebuano +Corazon +Almerino +Richel +Dorotan +Mananap +Hiligaynon +Gid +Sang +Arroz +Tan +Joselito +Ilokano +Danilo +Antalan +Tabag +Ayup +Bantay +Reynaldo +Duque +Kararua +Ni +Nga +Immulog +Floy +Quintos +Lucasi +Gratia +Tantengco +Vincen +Angelita +Bombarda +Vincenz +Collapse +Separates +Cayanan +Heidi +Eusebio +Abad +Penones +Sigfredo +Lagamayo +Katigbak +Chikiamco +Erlinda +Enriquez +Panlilio +Junco +Iglesia +Ribadesella +Kagaku +leds +Shuji +Nitride +Ammono +ammonothermal +Nikkei +Everlight +Epistar +litigations +Reichsmarine +Displacement +minefields +Damaged +Ersatz +Nominally +retroactively +compartments +metacentric +Boveri +Flak +rearmed +Tbts +Zenker +Marinestation +Nordsee +alterations +Cocos +Wilhelmhaven +Arnauld +Souda +Sabang +Lobito +Konteradmiral +funnels +gaffs +Ponta +anchoring +Schillig +roadstead +Bachmann +Cagliari +Dardanelles +Padang +embargo +Massawa +Belawan +Surabaya +Reykjavik +unfriendly +Sudeten +Bight +gunners +degaussing +coil +Oscarsborg +Oscarborg +Sworbe +Gotenhafen +soldiered +uneventfully +Skagerrak +Oslofjord +leisurely +reinstalled +disinterred +Hipper +Scheer +Heikendorfer +Deutsches +mottled +striped +mayfly +basking +madidum +Bayard +Ora +Armeria +Seriphidium +Gentianella +Paniri +Dokuheh +Seh +Qaleh +Sarayan +SNCF +ANF +Saurer +skipper +MONA +Hodges +invisus +Osterhout +HMCS +Bytown +footpaths +walkways +Leverock +Westergaard +quadrants +bluffs +TDSB +Viamonde +MonAvenir +CSCM +TCDSB +Sylvan +Lakeshore +intercity +Morningside +amphitheatre +Help +discharges +Deepwater +spill +Thad +Xinhua +woodblock +nanjing +jiangsu +Yuanmingyuan +Unsatisfied +ministering +Cosmopolitan +Promised +Mingo +Biscuit +laced +warmly +handshake +vehement +Sydenham +shuttles +Wairarapa +dials +Dispatchers +defibrillators +Qualifications +NZ +Resuscitation +Organisations +AEDs +Penguins +Cadets +Caller +FEDs +whanau +familiarisation +Topics +Healthline +Pets +PRIME +GPs +unwell +outnumber +equate +Contracts +ACC +Boards +volunteering +lifesaving +Muay +Sitsornong +Bangkok +Pentai +Ponkrit +Chor +Chaisiri +Rovira +Genuine +Falange +Wivianne +Gunnar +Pocono +Arsha +Vidya +Gurukulam +Dayananda +Saraswati +Ledian +Albanian +Sarzanese +Liguria +Paganese +Chievo +Portogruaro +Scommesse +Reggina +Virtus +Lanciano +Ternana +Appelt +Pires +Vicenza +microphone +Benevento +Qemal +Stafa +Klodian +Duro +Biasi +Valdet +Roazhon +slumped +Rhema +propositions +Rhematos +translatable +verbs +grammarians +disciples +Patsy +maneuver +northeasterly +quickest +coupe +idling +enticed +babysitting +sitter +drifter +recanted +detectives +Shankland +cellmate +Dealer +purporting +infirm +beforehand +fearful +Locals +hastened +Ricketts +Stokes +Garmaker +Gola +McGaffney +Rasquin +intangible +Mesrob +NEC +Latif +Jameel +McKnight +Economies +Hanan +Jacoby +impairs +nutrition +payoffs +Sylvie +flipcharts +Kremer +Nauman +penalizing +homework +RCTs +Statistic +thenceforth +socialism +Politiques +francaise +Statistique +sociale +theorique +statistique +Progres +depuis +Bevolkerung +franzosischen +Kaiserreichs +Bevalkerung +Spaniens +Portugals +Machtstellung +Staaten +Allemagne +moderne +Legitimism +Dreyfus +Faisal +Fahd +Eleventh +Mahanagar +Trana +Schieren +patrolled +Tsafack +SD +Huesca +thrashing +Cartagena +Levante +Lugo +Lleida +Esportiu +smoky +counterculture +Whiyabul +Dreamtime +Nimbinjee +Forests +loggers +festivalgoers +permaculture +Interviewees +Terania +spiritually +hotspot +espousal +philosophies +Protest +Ganja +Person +rave +doof +fluctuated +leaseholders +leaseholder +Kavasilas +slap +accommodations +backpacker +NRMA +Centrelink +buskers +shopfronts +Shops +Telford +parasitize +pigmented +tropiduri +vacuolation +skinks +cunninghami +Watertown +Fabrique +hurriedly +phenolic +MAG +Rheinmetall +outclassed +CONARC +marksmanship +controllable +LeMay +calibers +conflicting +ARPA +impartiality +unjustified +gastube +tines +disposable +Mohanur +Namakkal +Karur +Tiruchirappalli +Carlyle +pagodas +Shamanism +Poseokjeong +Seochulji +Tapgol +Tumuli +Banwolseong +Gyerim +Anapji +Imhaejeon +Cheomseongdae +tumuli +gourds +Excavated +Heavenly +birch +Bunhwangsa +Bunhwagsa +Bomun +Bannenberg +Katsourakis +Designer +refits +Bleu +Croisieres +Fusus +elegantissimus +Meath +Groundwork +Metaphysic +Morals +imply +worthiness +fellowmen +selfish +obeying +virtuously +overshot +intuitively +angels +Mono +Skier +podiums +DeGeneres +TED +defund +Parenthood +Unprepared +orchestrate +Watching +Silent +Aretha +Lenika +applaud +depravity +persistencia +Melting +pocket +epitomizes +relativity +Prigogine +Camembert +reappearing +Hieronymus +Earthly +dreamer +iconography +exactitude +waking +Creus +Disintegration +fragmenting +Nobility +masthead +Membrey +Gosford +Micrographics +Pty +reel +Trove +Ongun +periodical +Magnetically +Confined +Plasmas +Multilayer +Dispersive +plasmas +freshmen +Vella +Assemblywoman +Ciattarelli +Freiman +Caliguire +Palaiologina +placate +Rusokastro +Thrippunithura +Km +Malanad +tamil +Desham +desam +Mattathil +Kovilakam +Marthanda +MATTATHIL +Neyyattinkara +Pandika +Shala +Karikkodu +Pallakku +Annamalai +Ninnar +Sarvadikari +Elasmprathi +Udumbanchola +Peermade +Punalur +Painavu +Neryamangalam +Kakkanad +materializes +Kalady +Meenachil +turmeric +changeover +Kanjiramattom +Kothayikkunnu +Mangattu +Kavala +Townhall +Thodupzha +Munnar +Kumaly +Panchayat +Panchayats +Karicodu +Chandran +Safiya +UDF +Chairperson +Kuriakose +Ayurveda +Chazhikattu +Archana +Azhar +Ezhallor +Smita +Moovattupuzha +ICUs +Thommankuthu +Kaliyar +Palkulam +Feet +hillock +Olamattom +KSEB +Ilaveezha +Poonchira +Spread +Ilaveezhapoonchira +hillocks +Kudayathur +Pathanamthitta +hydrolectric +Kulamavu +harnesses +Billion +permissions +graced +paragliding +buffaloes +Murugan +Vasanthiyum +Lakshmiyum +Pinne +Veruthe +Kadha +Samastha +Keralam +Pappy +Swapnam +Kondu +Elsamma +Enna +Marykkundoru +Oridathoru +Sringaravelan +Swapna +Evidam +Daivathinte +Drishyam +Om +Uthup +Thoppil +Kattappanayile +Rithwik +Swarna +Aadu +Kaypa +sleepy +Kaatadikkadavu +midlands +ghats +Pala +Kattappana +discoverers +biosensor +magick +Tempi +Nellis +Soror +invoking +Agape +reconstituting +Silenus +formalising +jure +AL +Motta +Germer +Marcelo +preeminent +Sangrael +Cordis +Cincte +Serpente +Stansfeld +aspirants +Conversation +Adeptus +Account +Occultism +illuminated +Pyramidos +Devoting +vel +Folly +Sutin +DO +WHAT +THOU +Kaczynski +Churton +Biography +Macfarlane +occultist +esoteric +Aberth +Bransford +forewords +Vel +Aeon +Inward +Liddell +MacGregor +grimoire +Footage +Maclise +MacLise +Kugelberg +Swofford +Psychic +Fractured +Reset +garde +Christopherson +Colegio +Ospina +Cipriano +Mosquera +Subsequently +Rionegro +Largacha +Eliseo +Payan +Dios +Tanco +Eustorgio +Borrero +Regeneration +Panteon +Proceres +Notman +Folsom +Dimitri +Hadzi +Doner +yew +hacker +Hagbard +DOB +Urmel +Egg +rained +Giggleswick +Chatham +SAGE +overviews +Perspectiva +salsa +Gilberto +Lo +Nuestro +Syracusan +syracusanus +Registrations +Coughran +Birrpai +Darkinjung +hearth +Lancelot +Mulubinbakal +Mulubinbakalleen +Gidley +unruly +Kingstown +commandant +gaol +Nobbys +Prisoner +Steamship +Namoi +plied +shelled +Parnell +breakwall +Merewether +Waratah +smelting +Broadmeadow +Sulphide +Hemisphere +Candle +Bulker +throughput +arresting +Longworth +deco +NESCA +Berlinches +Albacete +Fuenlabrada +Roda +Totmacher +sterilization +shipboy +Elbe +epileptic +ate +plundering +Pleid +Zorge +Hoffmann +Konrad +Axthieben +zonal +Kriminalpolizei +Schutzpolizist +Celle +Landgericht +returnees +boastfully +speculating +Hula +Steelers +Gomez +Tropical +Marilyn +Gambrell +Mufasa +Rutaceae +glandular +boronia +dictation +Procured +negating +mixtapes +befitting +Buzz +complimenting +deactivated +Gabrielle +Pixel +Hit +Blazing +scouring +farmlands +Telly +Morasko +disproving +regmaglypts +Aerolite +Meteorites +intrigued +Hockeyjournalisterna +Teesdale +frontiers +Segedunum +Pauls +Gospels +Higbald +harrying +heathen +rapine +Northumbrians +Summers +sunnier +coastlines +Dundonald +Leblanc +alum +hydrochloric +fertilisers +Urlay +Nook +Egglescliffe +Sadler +aniline +alzarine +dyestuffs +Solvay +invigorated +caverns +strorage +rationalization +anhydrite +creosote +Nylon +Invista +ovens +naphtha +refineries +biorefineries +biodiesel +bioethanol +aromatics +impervious +glaziers +fifths +Bottleworks +underpinned +Asa +Yarm +Seaham +Wynyard +cauldrons +smiths +coals +cylinders +Humphry +Bishopwearmouth +Whitehaven +resounding +Monkwearmouth +Thatcher +gasification +Accessing +lifeblood +ironmaster +Lowthian +resilient +diffused +furnishes +neapolitan +startles +haunt +invincible +Lackenby +boomed +ironstone +Menville +Hendon +shipwrights +Pickersgill +Haw +Thorneby +Pearse +Ribband +seagoing +collier +Bowes +Mitchells +Slipway +Continent +Hunters +colliers +liners +transatlantic +Riband +Rosse +slumps +Armstrongs +Elswick +Palmers +Hebburn +Shipbuiding +Furness +antipodean +Cramlington +UKs +Netpark +Printable +Centric +Logistical +Tesco +Value +Biotechnology +Renowned +foreseen +Turbinia +turbo +Whitworth +crane +Shortages +acrylics +Nayernia +spermatagonial +UKTI +Employers +subsea +Estuary +CPI +biofuel +CHP +Sembcorp +Fabrication +Stonehouse +Steelworks +Matthey +Catalysts +Fujifilm +Dyosynth +Biologics +Exwold +extrusion +Aldous +perochemicals +Biffa +polyurathanes +Biofuels +Biodiesel +Greenery +Vertelus +Ineos +Nitriles +Vopak +Marlow +Santander +mortgages +Hazlerigg +bakery +Greggs +Longbenton +Findus +Contributions +Rowntrees +Armaments +Siemens +ingredient +Heineken +Carlsberg +Ale +Dunston +Tadcaster +Myson +Petards +ANPR +countermeasure +dispensing +AEI +Cables +SAFT +Batteries +lithium +Bellway +Cobalt +Remotely +Trencher +submersible +dealership +Halshaw +Hylton +Calsonic +Kansei +trim +Pennywell +Gestamp +oven +tyre +Liebherr +cranes +Berghaus +Vaux +ScS +Sofas +EDF +Doxford +Alcan +Aluminium +Hammerite +Cuprinol +Prudhoe +Delaval +aftershave +Clairol +Spice +McQuay +Bassington +Phileas +Fogg +Biscuits +Consett +caravans +Displays +cathode +Pity +Framwellgate +Esh +Schmitz +Cargobull +Harelaw +Electrolux +Thorn +Zumtobel +Heighington +Slightly +TKA +Tallent +hover +mower +Potters +GlaxoSmithKline +NSK +GWA +Gliderol +Mecaplast +Reckitt +Benckiser +indigestion +Walkers +Argos +Loans +Kitchens +Shrove +Ulgham +Organised +Spartans +Greenwell +Crook +Copas +greats +Hughie +Wilf +Raich +Gurney +Moncur +Fairs +incidentally +goalscorers +Porterfield +fondly +Waddle +Shearer +Woodham +Whickham +Geordie +Shipley +buckthorn +Hagen +Alliss +glowingly +undulating +Carris +Barabzon +Berwick +Ryton +Pandon +Brass +Brancepeth +Rockciffe +Slalely +Seve +Ferrie +Slaley +Priestman +unassailable +Cram +marathons +participateing +Kenenisa +Haile +Priscah +Farah +Butterwick +devolution +saltmarshes +bogs +Coquet +Farne +seabird +roseate +sanctuaries +wading +Saltholme +watcher +Magnesian +conceptions +stillbirths +Output +Indices +indices +LSOA +Wansbeck +Derwentside +Bernicia +Eurostat +privatisation +ECML +TransPennine +Leeming +controversially +Thirsk +rerouted +Dere +Hexham +DFDS +Haugesund +Bodies +Outcomes +recognising +Ingleby +RC +Whitley +Marden +Hurworth +LEAs +Mowden +Spital +Radios +Bilsdale +Chatton +Wooler +TFM +Rathergood +Ferryhill +Shildon +Pancras +Ingram +Kaloo +GCE +Taman +YB +Najib +Razak +Eco +Grimes +Kothai +Cyberjaya +RM +Hospis +Angkor +Sorority +Dunbar +Advisors +Boring +Pondok +Indah +Arbitration +CAS +Hirobumi +PMs +cumulatively +Yasuhiro +Nakasone +Barkaboom +abraded +Farafra +oasis +outcrop +saltating +bouncing +dreikanter +ventifacts +striations +Curiosity +Martian +Schist +Granite +Outcrop +Llano +Atacama +Altiplano +interconnectors +Tandragee +Louth +NI +Londonderry +Coolkeeragh +Carrickfergus +demerged +privatised +Viridian +EirGrid +Straidhavern +Taoiseach +Acquired +transformers +Carn +Omagh +Dispatch +Utilities +diagrams +escalation +escalate +linesmen +Eckoh +Answering +IVR +caller +HVCA +Normally +NASL +Culbertson +Landrum +Slobig +Koopman +Injury +Hampered +Estadio +Monumental +Vespucio +Liberti +Programmes +Organisational +accredits +Bereavement +Loss +Hospice +Youtube +instagram +Titikaveka +Avatiu +Pollentier +Flandria +Michelangelo +Dong +panam +IC +Tongyeong +Hwanghak +Dongshin +Daeam +Yongun +feathered +rebroadcast +smash +Poston +Longtime +ritualistically +bombastically +hermetically +mystical +billet +divining +Jokes +Sack +bloated +fleas +crotch +proctologist +Songwriter +Merritt +Saver +Letterman +Shaffer +Tract +farmstead +Barn +Gliricidia +lutea +Polkowice +sire +Canonbury +Randwick +AJC +Sunline +topweight +kilos +Flemington +Vientiane +embodiment +snowmobile +Snowmobile +WMPL +Snowdance +superstitious +Negaunee +Irontown +Fishing +bonfire +Teal +Snowblower +myopia +presbyopia +symposium +Optometrists +topographic +Cubillas +caretaker +napping +Sillalah +Myotis +sodalis +Chrosomus +cumberlandensis +Ecosystems +Dendroctonus +frontalis +Adelges +tsugae +Agrilus +planipennis +ash +Lymantria +Logging +Robison +doorway +Hensley +Skyland +Campground +tripoint +visitation +cancellata +IN +RNs +Woonsocket +RI +disaffiliate +Copley +clinicians +Rehabilitative +Retarded +Blais +VEF +Anwil +Eldo +Eurocup +Sidigas +Tecnyconta +EuroLeague +Ainars +Rytas +bandstand +Brandons +gales +Compensation +Impresario +Piers +rig +Openwide +redeveloped +Aravali +Kachchwaha +Rajput +MahaRaas +fountains +greenery +Aravalli +Maharaas +Ashvamedha +lushed +Garbha +Griha +parisar +Dharbawati +Lamhe +Jal +Anthyllis +Vulneraria +Scopoli +Primo +Nebiolo +Odlozil +sprints +Femi +Ogunode +SARMs +Organic +Sasha +Varlam +UMgungundlovu +Mkhabathini +isiZulu +eMkhambathini +acacia +Pietermaritzburg +Cato +tutoring +Shackleton +Formers +Phab +NG +Advantage +CNG +Reinvestment +Deloitte +Touche +WorldNet +BackOffice +Blooker +Fortin +Beauce +whitened +fittings +benefice +Blagdon +Ubley +emphasising +NETwork +NRENs +GEANT +Peering +JNT +UKERNA +UMRCC +SERCnet +sped +Coloured +OSI +backbones +SMDS +quintuple +winch +sponsons +propellers +ASDIC +Ivanhoe +Uxbridge +Leros +Forni +Humbert +Firebirds +Batavia +Muckdogs +Greensboro +Grasshoppers +ups +initialized +Verbny +Staropoltavsky +Zichy +Zich +Phir +Hera +Krazzy +Deewane +Huye +Chalte +Pagal +Pehchaan +Kaun +STAR +Chunky +Jhalak +Dikhhla +co +Kaanmasti +VJ +Devrek +Ducret +Screwgun +skronk +kaleidoscopic +groove +conjuring +uninitiated +Dahlen +nuances +Ture +Basie +Geraldine +Aramapu +harelipped +bondsman +worded +racism +monkeys +bigotry +stupidity +valueless +shrewd +resourceful +shunned +timpanist +Wayward +wigs +inordinate +Sedaris +knuckles +whore +whores +Hedaya +mauled +traumatizing +Klan +Horsemen +stepmother +slithered +sugary +sweetness +Raisin +drunkenly +despise +consoling +masturbate +entranced +Speedo +verbally +Stew +unthoughtful +devoured +Trojan +nude +idolize +shame +sneaking +resentful +McGee +chronologically +Dinello +letterman +romancing +candlelit +clueless +callous +Cherri +abstinence +Puffybush +Duffy +pops +caskets +Jazzy +Pinatubo +Abdo +reciprocate +crestfallen +disappoints +obsessed +nerdiest +Megawatti +Sukarnoputri +Littlenut +grasp +sleazy +Flatpointers +paranoia +Frequently +bullies +deadbeat +intimidated +Beavers +Tickles +jocks +loneliness +Stare +arranges +heavyset +Communes +Neefs +Cill +Chaoineadh +Ita +lamentation +Doonbeg +lodgings +Godolphin +Gleanings +quaintest +plies +liberality +Chatterton +Herne +cranny +mercifulness +scarifiers +extols +traveller +Exactly +Fulmar +Farrihy +Troon +Inishtrahull +consignment +evocatively +Homan +Rathcormick +replenishes +headland +triathletes +triathlon +toughest +triathlons +Triathlon +Cousteau +SCUBA +Tivoli +walkers +Dunlicky +Pollack +tees +tee +Ennis +Colour +Izibor +Hostels +Hisatsugu +NORINCO +warheads +radars +clapboard +redwood +milled +Roscommon +mule +Livermore +belfry +ALVHS +rents +Comorian +arbitrates +Groups +Operated +Aimee +Anshe +Chung +avatar +Ailin +Graef +Referred +arbitrage +EXAKT +Bergman +scripting +Starwood +BMG +OnRez +Linden +WSE +Wii +Waikeria +rapist +Banditi +Praga +Tatishchevo +Tatishchevsky +Saratov +Kosovan +KF +Regiments +Transformational +Millionaire +bestsellers +Seychelles +Loretto +principals +Expat +expatiate +Wink +Grow +Types +interlink +Butterflies +Screw +Disciplined +StepUp +Bullet +Rogan +Satchel +NAL +Ruppert +Umaglesi +Dinamo +EBM +bandmembers +Berlaymont +Atomium +Codenys +Corbijn +polite +COMA +FreeDB +Eskimos +Aghast +ThouShaltNot +relaxed +carioca +Chowder +Disappointed +Dacey +Competing +ECRC +PPL +Tubridy +maddogs +sevens +Wheeling +Schellbach +Bethmann +Jacqueline +Dahmen +Benz +convertible +Higgins +Muyaba +bulldogs +Prosper +Elton +Lameck +Ashlon +Courage +Chaibva +Tafadzwa +Kasiyamhuru +Maroto +Nyasha +Misheck +Tangai +Rhodesia +Boxer +Dereck +Chisora +Farai +Rwodzi +Matonga +Mugabe +Chengetai +Ngoni +Makusha +budding +Harare +Poseidon +Tsautsau +Clerks +Feist +Velux +SVT +schizophrenia +incarceration +Content +animator +Snip +stripping +Priscilla +Mayflower +imitating +delusions +cheerfully +lonely +elderberry +spiked +perished +digests +Frankenstein +bumbling +deprive +uncomprehending +Worrying +null +lustily +summed +macabre +Lardner +Higham +Forties +Bethel +Mennonite +Gunter +Brewsters +synchronize +specification +debugged +Symbolic +compilers +queues +heap +batches +supplanted +Automated +reassign +multistep +monitors +spreadsheets +gadgets +robots +Mythical +sixties +MFT +Overcompensating +timeshare +buggy +SCOPE +MACE +Kronos +NOS +DTSS +TUTOR +chat +patterned +Burroughs +ALGOL +ESPOL +Unisys +GCOS +MAC +TENEX +Batch +BPM +BTM +UTS +RSTS +VMS +VAX +Fortran +iSeries +minicomputers +Motorola +Zilog +rudimentary +hobbyist +identically +kilobytes +circuitry +MMU +Pong +porting +Yaroze +interfaced +videogame +accreted +Shack +commercialized +PowerPC +SPARC +AmigaOS +retrocomputing +EPOC +NTT +Docomo +Fujitsu +Symbian +PocketPC +ESX +KVM +virtualization +workloads +Leerdamse +uit +LRC +Klasse +Millennial +Certaldo +Stimulate +ECTC +assistive +Warehouse +Elmhurst +FieldTurf +retreats +Mattamuskeet +NRC +Intramural +Belk +paddleboards +Manteo +Estuarine +Harriot +Admission +rehabilitative +Addictions +Disorders +Dentistry +stutters +gastric +diabetic +Biofeedback +posttraumatic +fertilization +Myrmekiaphila +neilyoungi +Aptostichus +Algonquian +repositories +Hoover +Communism +communism +Moseley +Lambda +Theta +Cheerleading +EDC +departmental +Chancellors +officio +Mitchelson +Buys +Employer +McDougle +Nia +Imani +Acclaimed +Bullock +Roughriders +Valuable +Alumnus +Garrard +Titans +WWE +seaboard +outpouring +leftover +pitfalls +mire +Friendship +Empowerment +Jephcott +LTTE +Eelam +liter +technicians +Thiraimadhu +Seedling +Kallady +Navalady +waterless +conducive +resettlement +destitute +Wittig +Latinized +ecliptic +subgiant +Bayer +IAU +WGSN +refitting +untapered +struts +designators +Bomber +Refueled +refueled +Corp +pag +Hive +DRT +signified +Schauer +Beale +Oblivion +Wookie +Clutch +Passive +remoteness +inaccessibility +Adequate +relentless +unforgiving +Tippecanoe +Rundbogenstil +Mayer +Universalist +UU +Neolarra +vigilans +blogger +Asfour +Wafa +Jabal +orient +Firas +Otaibi +Shaher +outsource +revolutions +blogging +buyers +Hosni +ousting +Colloquial +caricatured +congratulations +satirizing +Bashar +Tiltsch +Macedo +Reguiba +endorheic +Touggourt +Chaoyang +puppetry +Wigman +Truda +Column +Massine +Stated +Repertoire +Moog +synthesizer +Danse +Contemporaine +Menotti +Vermeille +Capezio +Circulo +Criticos +Tiffany +Nik +traveler +MOBILES +Props +Mobiles +overused +barefoot +tactile +motional +clang +stomp +Stratus +Scenario +fiberboard +aural +theatergoer +sensorium +spectator +Receiving +spotlight +Abrons +KALEIDOSCOPE +AND +CANTOS +headdress +accentuate +Mayboroda +Maiboroda +Woodroffe +khum +Banteay +Meanchey +Elophila +tenebralis +zigzag +Lebling +Deadline +ZIL +Ashcroft +bluebloods +rub +moseys +gunbelt +stashed +snooping +feelies +Filius +popes +Scipione +Ludovico +Garzia +Mellini +nuncio +swarm +Cyclostoma +molluscs +specie +trematode +Aspidogaster +evaluative +Addiction +IVV +Visualisation +contextualisation +dataset +Matusevich +Rodionovich +Arslanov +Kyzylzhar +Ugliness +Nu +Rosina +SIAA +cleanup +yearbook +Tennessean +fanned +Woodruf +flied +slammed +Jetty +juggled +Neely +Kuhn +McCullough +Benning +Scotty +Sewanee +punted +Grailey +Berryhill +onside +Swenson +Zipp +Kaya +Ruthin +Curate +Culham +Cuddesdon +Crippled +Intellect +Hafler +Incremental +ISBN +booklets +ego +percussionist +Colley +musique +Brutum +Nuisance +Skozey +Interrobang +Orchid +Collider +Seghezzi +Birgitta +Mannheimer +Swartling +Sommar +Sveriges +Bara +ett +Bortom +varje +rimligt +Quicksand +FLX +Processen +Czarna +Krasna +Zalesie +gminas +Policzna +Kaphuka +eloquently +solidify +Onesimus +Muzik +Maskal +Exhale +Altagracia +Esteban +Amadeo +unforgettable +symphonies +Ravinia +Sphingidae +Palaearctic +pupa +Larva +somites +ocellus +blotched +Rubiaceae +Uncaria +tailhorn +eyespot +instar +Caterpillars +Alstonia +Pantai +Langkawi +JKR +Erzgebirge +Aue +Eintracht +Intertoto +Stover +rap +innuendo +countrified +hillbilly +Diffie +Moriconi +canoer +repechages +Tunzha +Paspaulskoye +Choysky +Isha +Bossa +Lozanes +Gipsy +Ilka +Illi +hemorrhage +Gee +Deutschland +sucht +ein +Does +Europop +europop +Bites +vibe +Chico +Say +Huey +Ha +Hip +Dancemania +beatmania +lookup +operands +accumulator +IAR +Raisonance +Interrupts +Indirect +Load +opcode +BCPL +BCCM +CPW +LDW +Calls +intraLATA +IntraLATA +LATAs +IXC +intermixed +uplinks +COs +softswitches +ITSPs +interexchange +Presubscribed +Codes +dialed +CIC +prefixed +slamming +cramming +PICs +humiliated +elope +dowager +abduct +Tullgarn +Pomerania +wast +marquess +Liano +Bigot +Ulrica +Brahe +Charlotta +Freemasonic +confidante +Eleanore +censured +Gustavian +poisoners +Rydboholm +Slott +Barnbruden +Pottungen +Laestadius +uphill +Zwalm +Michelbeke +Eneco +madagascariensis +rainforests +nostril +Andasibe +Ranomafana +pollen +quarrelsome +simulates +symplectic +Poisson +Ginzburg +Kobe +Lempira +Paz +Lucinda +Orytha +Asbury +DePauw +Wasco +Corvallis +Pound +Whidden +Goetz +Olds +carcinoembryonic +Tripos +Lambeth +Tait +Powis +Incorporation +especial +desirous +sober +affording +Lensfield +benefactions +bequests +rebus +woodwork +panelling +surcharge +Blomfield +tricolon +postgraduates +traditionalist +Demetri +cloisters +redecorated +dexter +countercharged +crozier +Mitre +Annulet +Corinthians +corked +JCR +carte +nobis +donis +quae +Tua +largitate +sumus +iis +muneribus +Tuis +laudem +gratisque +animis +Jesum +Christum +generosities +thankful +Benedicamus +Domino +Laus +gratias +disrespect +irreverence +Lecturers +Onora +Regius +friendliest +supervisions +Servery +Swaps +dine +gowns +inline +Streeting +treasurers +JCRc +MCRc +dramatics +Kiwi +Newnham +Cromwells +sexism +hazing +leanings +Controversialist +equinox +accomplishes +Cuppers +Vickerstaff +Bursary +headliners +Mumford +Rowers +Hollander +RHP +RIBA +Cloisters +Chubb +Deben +Rt +Sentamu +Pentregarth +Beckingham +Winkett +Zia +Mody +Tambon +Nai +azurite +Sudham +Pratu +footprints +Khua +theropod +hen +Discoveries +Phuwiangosaurus +sirindhornae +souvenir +Arkadia +Astarita +preeminence +Appelbaum +Wolverine +Lanigan +makings +ionic +Poelzer +Schedlosky +electrification +teetering +transformer +repurposed +Buses +Ish +Shekol +emails +Gambling +dealings +imager +Zamboni +Furthermore +Shana +Traurig +launder +Montague +Medfield +Solway +Lomond +Staffa +Flannan +geomorphology +Mull +Tiree +Sanday +Archaean +Lewisian +Gneiss +Corryvreckan +whirlpools +Pentland +unenforceable +Comhairle +Siar +Pabbay +Fuaigh +evictions +Hebridean +crofting +Noss +Declines +Arran +decentralise +Uists +Munros +Marilyns +Grimbister +Lochindorb +Leven +Inchmurrin +Maree +Ruairidh +Scalpay +Bernera +Hunda +Burray +Walls +Housay +Oronsay +Monach +Ear +Shivinish +Hirta +Orkneys +Rosyth +Ronaldsay +Ailsa +Colonsay +Egilsay +Whithorn +Isleornsay +Brint +Gluss +Sullom +Voe +firma +Liever +Awe +Aoidhe +Glas +Holmhead +Holmhill +Oldany +Mhealasta +Mealista +Crannogs +Nhema +Rhodesian +Chachacha +Chinogwenya +grooming +Burombo +Upenyu +Chamson +Tapiwa +Munyaradzi +Muhlahlo +Matamba +Mavedzenge +Tumba +Matongo +Tongogara +Amai +Mangoma +candle +Unki +Borsak +irrecoverable +Especially +theorizing +Ogarkov +Technological +foes +globalization +BattleSwarm +Builder +ardently +blunting +Rommel +Khafji +linchpin +experimenters +airships +dissociate +paragon +epistemological +cropped +distancing +disassociation +dehumanizing +Defeat +Londoner +Ask +earl +dandled +constable +psychical +Wendell +swindled +Worplesdon +Thistleton +seedcake +lapses +Chuffnell +Watkyn +Bicky +Bickersteth +masquerades +disguising +walrus +impersonates +Witherspoon +rehires +extricating +belle +fend +forewarn +youngish +dignified +Woostershire +Jaggard +Keggs +Distress +godlike +gleaming +Demonstrating +banjolele +courteous +dutiful +discomfort +expressionless +eyebrow +twitches +imperturbable +beard +Boko +sweaters +flannel +visibly +tottered +supposes +persuasive +soothe +calming +irate +leaps +beholds +acquaintances +Mayfair +Glossop +jewelled +gentlemanly +intelligently +unobtrusively +noiselessly +capably +Incredibly +encyclopedic +beverage +Worcester +pepper +typing +Finn +incapacitate +unscrupulous +Capable +Sippy +Sipperley +Inferiority +raincoat +boathook +Impending +gong +cosh +gratefully +crossword +hobbies +Appreciating +Spot +unworldly +devising +extricates +machinations +Somehow +frightful +appreciates +gratifyingly +garish +amiable +Mentally +Hearing +softened +commends +sofa +swiftness +Encarta +episodic +Faulks +stately +Brinkley +Totleigh +inconsistencies +Offing +Christmases +Edwardian +idealised +Illness +Futak +Catalunan +Talomo +Councilor +Sectoral +scandals +Sumera +councilor +Sangkola +Encoded +guanosine +triphosphate +POST +mimics +ternary +multicopy +GTPase +GTPases +hydrolyze +peptidyl +transferase +anticodon +loops +dissociates +Protein +codon +deacylated +Ribosome +dissociation +isolates +dissociating +thiostrepton +stably +dityromycin +Dityromycin +aureus +paralogous +subfunctionalization +archeal +homologs +rpsL +rpsG +tufA +spd +Rennae +Meghann +Shaughnessy +Clijsters +Orchard +NIB +SFAI +Fingal +Drogheda +Sligo +Derry +YC +Shamrock +PFAI +BATE +Alkmaar +Maccabi +Zenit +Horgan +Rejoined +Melendez +Overbrook +Britney +Spears +Osbourne +Ozzfest +Suburban +Noize +Odile +Singa +Odiah +Sidibe +Minutes +Midnight +Thousand +turntablist +LPTVs +rumor +SONiC +iPad +Encore +Autoball +Lanxess +Makuhari +Catucci +irreparably +Florino +Artistdirect +Unterberger +Popdust +Grierson +Hyden +preening +Vaguest +flipping +crappy +Playoffs +Perri +Tomkiewicz +electro +undeniable +intensifies +Greenwald +trippy +Genero +Jem +slid +Clock +Beetles +Dharmaratne +Foursome +Saxophonist +Shiromi +Dalrene +Noeline +Winslow +Priya +Eranga +stardom +Kella +Bookshop +hangouts +Bastian +Wijewardena +CHB +Sarasavi +Kelaniya +Harischandra +programmable +Marle +Hilliard +barring +Newcomb +Pretlow +memorialized +Tidewater +Myra +Holley +Chompoothip +Numanohara +Ishikari +rehired +Rehired +Nonappropriated +Instrumentalities +TSP +withdrawals +redeposit +voids +entitlement +repurchased +furloughed +severance +downgrade +recalculated +factored +cashed +Married +annuitant +Erroneous +annuitants +Tuxedo +Badini +Bouchet +Anno +Domini +Picking +Charley +staffs +Strip +Binion +reevaluate +Greenspuns +simulcasts +Eyewitness +NewsOne +cornerback +Lewisville +Divisional +Playoff +Redman +Gocong +Courier +bragged +Fullback +lineman +Ingle +Evas +Bourbon +adjuster +mishaps +Hagemeister +rescheduled +Evanston +APFA +Luckily +Brecks +rushers +Conzelman +Nevers +FIH +Relegated +Seeger +Rockhampton +Capricornia +ALP +QLP +Juno +CFRB +Lightfoot +Gregorash +Dodson +Myrna +Lorrie +Mercey +Cockburn +GRT +Eupoecilia +anebrica +Bandahara +Brettridge +Cope +Jermyn +grandchild +disobliged +Gwynnes +Hereford +Gwynne +sinecure +Jacobite +Ratoath +folklife +scoop +originators +inventors +templates +tinted +Deacons +catalogs +woodworker +lidless +pail +meticulously +acrobatic +stunts +sprouts +treehouse +bolts +pestering +circuses +starve +Baltinglass +eagerly +Stinkers +uncontrollably +laughter +antidote +tame +Fumble +devious +Landry +Determination +Midseason +AAC +Mavericks +Sixers +microbrewery +secondhand +personify +trucking +Schell +Burly +Lyndsie +organelle +Penetrans +pneumoniae +CDS +orthologous +hemadsorption +mpl +immunodeficiencies +Streletskoye +Krasnogvardeysky +Belgorod +actuators +beamforming +switchable +multimode +reshaping +soundtracks +Kemner +Measles +Kiraitu +Murungi +Kamau +Oraro +Nairobi +arap +Moi +Harun +Leakey +Anti +Corruption +Michela +Githongo +hampering +Mwai +Kibaki +overruled +Lumumba +prosecute +Arusha +Uhuru +Kenyatta +Arbitrators +Moray +Harrap +sparkling +longs +unrecognisable +inexplicable +Lav +Jadavpur +Vani +Prakashan +Jemeel +Moondoc +NoBusiness +pianoless +Ornette +slurred +pleasingly +plangent +Berezovskoe +Berezovsky +Yekaterinburg +raskolnik +Yerofey +Beryozovka +Pyshma +Sverdlovsk +aikinite +vauquelinite +cassedanneite +pyrophyllite +phenicochroite +embreyite +Lehmann +chrome +earthfill +Penganga +Kalamnuri +Shembalpimpri +Pusad +Umarkhed +Hingoli +shelve +Highlighted +Cutters +Lastie +guidon +OffBeat +Listeners +lingo +drumsticks +ashtray +Hawkins +Skeleton +Rainforest +Oren +Warshavsky +affirming +Atco +Wexler +Leiber +Longhair +Irv +Bannister +Chipaka +Jockamo +Shaweez +juiced +hustlers +Polynesian +Dustin +Tench +Hangover +McKeown +clefs +outfits +Lyn +Ricochet +Niebergall +Linguists +nonsensical +melange +nou +Linguist +Kimball +fehna +Chokma +Offbeat +Evershed +Amuzu +Akan +Ewe +Ewes +Vodun +rites +Musicologist +Sublette +Kata +Taino +Fi +Mandingo +Sybil +Kein +inclining +vengeful +Petwo +ethnologist +Rigaud +Shalodeh +pentameter +Vilcanota +Cusco +Canchis +Puno +Melgar +Pomanota +Jatun +Navigator +Radiohead +Teenage +Orcadian +metaphorical +Oto +afterdeck +Bolkow +marines +Conolly +drills +Kamilaroi +Unaipon +Retitled +playscript +Kooemba +Jdarra +UQP +Lesbians +cyberspace +delve +Kiowa +converging +Quivira +Moutrier +Ihor +Oleksandr +Acidman +Urayama +Satou +Takeshi +schoolboy +Staffordshire +Hednesford +Brom +Brierley +LIV +SoH +Anodize +Lazy +Mutha +Fucka +Unbreakable +Josie +Dorigen +Disuye +Kilowatt +Anonymous +retrieving +Augsburgian +Flintheart +adventurers +Nicolaus +Woodchucks +Guidebook +Unknown +digs +stinky +Oblivious +untold +Roley +Strawberry +Bettignies +Synanthedon +Gadakh +Nifad +Partyfrom +Mahany +Plata +Groundbreaking +Twentieth +Schofield +Contractors +sandblasted +rotunda +phytochemicals +clover +alfalfa +chickpea +phytoestrogen +amide +hydrolase +reductase +dihydrobiochanin +syndicator +Skid +Lofts +Hagan +Supportive +Debbie +Burkhart +RSS +Zawisza +Auditorium +EST +PST +SAG +honoree +Carpet +DiCaprio +Revenant +Brie +Larson +Newsome +Beasts +Vikander +Gerda +Wegener +Liev +Slattery +Tucci +Keir +Artur +Kabelo +Raelene +Dayna +Dane +Natascha +Schalk +Cody +Monelisi +Tomoki +Malibongwe +Anneli +Thapelo +Inge +Yasca +Reon +Fleur +Avril +Latifah +Spacey +Cards +Underwood +Annalise +Keating +Transparent +Maura +Pfefferman +Aduba +Raquel +Laverne +Kimiko +Vicky +Selenis +Taryn +Dascha +Samira +Wiley +Boian +Levan +Borislav +Rowley +Erol +Milen +Sian +Annabel +Danko +Sarandon +Munhwa +Hyun +miscellaneous +textual +mystiske +Magha +auspicious +mythologies +Gayatri +Mantra +jap +Savitr +puranic +Hinduism +Rig +steeds +symbolism +tremulous +thou +fastened +ratha +Zodiac +awaits +Ugadi +Kashyapa +Aditi +Kamboj +terminally +Bhisma +breathed +Rathasaptahmi +ekaadashi +Sapthami +fervently +Konarak +Biranchi +Bhimdev +Chaulukya +Navagraha +Martand +purification +benevolence +indulges +Argyam +Naivedhya +Sahasram +processions +Aak +Erukku +Calotropis +Gigantea +Tulsi +samidha +ceremonious +Kolam +Cowdung +cake +Vaishnavite +Srirangapattana +Venkatesha +Venkatramana +Mangalore +Kodial +Teru +Mangaluru +sapthami +Brahmotsavam +Malayappa +Swamy +Sridevi +Bhudevi +deities +mada +encircling +Balaji +RathaSapthami +prabha +Chinna +Sesha +Chakrasananam +Kalpavriksha +Sarvabhoopala +Venkateshwara +Veedhis +Tirumala +Tirupati +Corwood +Cricetidae +Isole +Nuovamente +Quattro +Suoi +lizards +purports +Trachylepis +atlantica +Amphisbaena +amphisbaenian +Storrs +mammalogist +Sul +mys +Pseudoryzomys +molitor +Oryzomyini +Sigmodontinae +distantly +oryzomyines +posterolateral +palatal +spinous +Skull +occipitonasal +postorbital +obscuring +interparietal +squamosals +jugal +incisive +foramina +parapterygoid +alisphenoid +ovale +subsquamosal +suspensory +tegmen +tympanic +masseteric +capsular +mandibular +incisor +PI +uniserial +myomorph +anteroloph +posteroloph +anterolophid +mesolophid +mesoloph +cusp +posteroflexid +entoflexid +lingual +entepicondylar +perforates +humerus +pelvis +hindlimbs +femoral +acetabulum +rectum +femoris +vulcanism +preying +passer +Quarterback +Gophers +Shortell +kickoffs +turnovers +Utes +Kalil +negated +bettors +Carswell +Toney +Grimble +slack +tailback +rushing +Nickell +Jawanza +Starling +Robey +JEMP +soundboard +frenetic +Collette +musicianship +filler +workouts +wherever +Zouheir +Helmi +Wafic +Nsouli +Bechara +Riad +Galatasaray +Militaire +Hisham +Jaroudi +Tammam +Hekmeh +Abou +Tygran +Manara +Ghaleb +Richi +Bou +Mazen +UAE +rebounder +Hariri +Maktoum +archrivals +doublet +Eskedjian +Nour +Chkeir +Talar +Marcusian +Nisrine +Dandan +Shandra +Trivirostra +corrugata +cowries +Giurato +Stampa +sizar +Broadgates +prebend +prebendary +Bintree +Oratio +Obitu +Rogeri +Aschami +Linguae +Spicilegium +Scholae +Westmonasteriensis +Progymnasmata +Institutio +Lexicon +Joannis +Crispini +Constantini +scriptis +Humphrey +Breviary +Brytannicae +Gratulationum +Valdinensium +Disputatiunculum +Grammaticalium +Strathfield +Trot +Walkley +Knew +grandiflora +Aiah +Lebbie +aerated +haptera +elevates +Lebbiae +Koukoutamba +adversely +Dearbhla +Edgerton +romantically +reaper +cockeyed +optimist +broody +winningly +flawless +romcoms +urbanites +ridiculously +flecks +sweetly +Ayres +viol +Doughtie +Bertrand +downbeat +discern +galliard +veiled +Yow +Walke +soe +utilisation +jostling +Petrarchan +staunchly +patriarchal +madrigal +viols +Lachrimae +Teares +Figured +Passionate +unattributed +Virginal +tonality +modal +Pears +Bream +tenors +Edin +Karamazov +Trow +Hawkesley +Yeomanry +Limb +interchangeably +fcc +bcc +substitutional +metalloids +intermetallics +discontinuous +dislocation +serrations +ductility +hardening +unconnected +psychics +gypsy +entrails +cops +astrology +tromp +Bopper +lifeless +postmark +pie +blonde +reinforces +nick +clutching +pills +leery +fatalistic +fallible +withhold +reddit +quip +autoerotic +asphyxiation +Cline +Newhart +Stu +Charno +Lindala +gelatinous +lighthearted +Farrand +Nitpickers +Possibilities +Topping +Vitaris +Cinefantastique +blissfully +deadpan +titanic +Handlen +likable +indelible +Topless +Changed +Unauthorized +Ellerman +Sicily +Kapitanleutnant +Hanns +Essylt +Disciplines +christen +thee +hatchet +cabled +Yacht +Christened +Nordstern +Fassini +Camper +Lenn +Feinburg +salvager +Staten +Witte +mahogany +Antonine +Maillet +Rude +Genie +WGC +Cartoons +Huy +BHD +racers +Montoya +partway +Viewers +Anh +Phong +TikTok +Vinpearl +untouched +noodle +cakes +sampan +tofu +buffalo +recite +Bahnar +loom +watercraft +Ganh +Dia +wakeboard +Datanla +Rope +ostrich +Trang +Drink +Magnetic +Resonance +oneness +Brace +blicca +crossroads +multilingual +ICT +Maastricht +Peninsular +brisk +inflation +lira +contributory +Contributory +registrar +dependency +reshape +earns +spouses +gainfully +pensions +Ede +ASPTT +eking +Miranda +Langlois +Lizanne +Nirra +anthems +lopsided +WNBA +Palacio +Congresos +Tamekichi +Teiji +Tuyen +Ninh +spurn +accent +stumbling +cyclo +powerbase +quash +totalitarian +Stalinist +undecided +ARVN +Hamlet +Cong +barricading +lenient +veering +fatally +Ngo +colonels +paratroop +fomenting +discord +depose +puppets +Gia +alternations +servomechanisms +retarding +Reluctance +squirrel +coercivity +magnetically +magnetized +magnetization +aligns +Hysteresis +servomotors +gearless +elevator +straightforwardly +servo +Vector +Trapezoidal +Circumferential +keybars +footings +synchronization +damper +overheat +supplemental +nears +accelerates +electronically +polarity +emf +demagnetizing +overexcited +synchronism +Leechleaf +delissea +Scherbakov +Salenko +Nahornyak +CMG +DTD +Ficksburg +Aviz +Konstantinovo +Vologodsky +Shadrino +Vieri +Pichichi +Lazio +Eightfinals +Gwilym +Pentre +Holywell +NUJ +Denbigh +Ioan +Opposition +Spokesperson +Miliband +reselected +Cousins +Iraqis +Qusay +shredded +Ghraib +unsubstantiated +Kavanagh +swung +dissidents +Envoy +Halabja +Kurds +hindsight +pressurised +publisise +Genital +Mutilation +Robe +Gorsedd +Bards +Eisteddfod +Carmarthen +Dryhurst +Cochylimorpha +eberti +shrubby +Asteraceae +ovate +corymbs +achenes +villous +Cannabaceae +Schultes +laxly +Loran +conforming +Subcontinent +polytipic +insomnia +terpenoid +Sour +Diesel +internodal +bushier +sausage +Coachella +Californians +hallucinogenic +fibrous +relaxing +antagonistic +agonist +sedative +numbing +Plants +propyl +tetrahydrocannabivarin +THCV +Sud +Drapac +directeur +sportif +LPR +stagiaire +Saeco +Laurent +McPartland +kayak +Reiherbach +Raudenbush +stereotype +Lenore +expectancies +Graders +statistically +subconsciously +replicability +psychologists +hoof +skeptics +questioners +unintentionally +Deno +Herrell +preconditioned +gravitate +precondition +confederates +observational +Hawthorne +Oppenlander +Guara +speleology +Balmain +Northholm +Ricoh +scrummaging +Achilles +tendon +Barbarians +Shute +Win +WS +burgess +Sine +Metu +seafaring +Symington +Wigtownshire +Corn +hustings +fount +fonte +antimony +compress +alphabetic +shorthand +readability +renderers +DTP +designations +systematize +Frutiger +Univers +Italics +Metafont +TrueType +Developer +Semibold +Grotesque +parameterized +Myriad +TheSans +slant +Oblique +Italic +ligatures +calligraphic +exaggeratedly +katakana +Naskh +immanent +unicase +Narrower +prepending +Compressing +slimmed +monospaced +Palatino +optimised +Chauncey +pantograph +phototypesetting +bulked +overpowering +numeric +ascender +descender +sidebearings +kerned +Sabon +Arial +Helvetica +ITC +Avant +Croscore +metrically +minuscules +Typefaces +customise +Futura +Bembo +Caslon +Sassoon +tabular +superscript +subscript +Acumin +Grotesk +digitisation +Subsetting +Vanda +Oleksandrivna +Maslovska +Partisans +neglecting +hinterlands +Kardelj +Partisan +instaling +Starac +Kninska +Cincar +Livno +flanks +Jablanica +Prenj +Typhoid +Hypothermia +Kifino +aeroplanes +Piva +slippery +Chetnik +Gornje +Donje +Bare +Romanija +Proleter +Petrovo +collapsing +disobeying +Cassibile +Becucci +hinterland +synchronised +Krajina +ZAVNOH +AVNOJ +Jajce +Shatt +Klis +offload +Kardeljevo +Orientation +careerism +Lucija +Rede +Kirtlington +Burley +Wharfedale +McCracklin +Garlic +canisters +Houteff +prophecy +apocalypse +Armageddon +await +photographing +interlopers +Fitts +Coker +Rodenville +Wayman +insanity +procreate +Pomona +brides +Weyenberg +unregistered +Firearm +Affidavit +BATF +nonchalantly +strewn +nexus +DOD +Chojancki +cordial +Kalani +apprised +arming +crouching +scampered +flashbang +hail +Hustmyre +Jaydean +Schroeder +fanatics +enclave +endtime +overrode +tactically +undercutting +peacefully +minors +diatribes +rabbits +stockpiled +MRE +apocalypticism +Jonestown +Recalling +approving +CEVs +Loudspeakers +procured +Liquid +NICO +Pyrotechnik +monoxide +arson +spasmic +cyanide +insertions +dissipate +inhaled +Dayland +examiner +autopsies +consensual +Magaw +superseding +abetted +abetting +sufficiency +sentencing +fatalities +Lovelock +doused +Tort +negligent +recused +Andrade +DeGuerin +Trooper +wrongful +Caddell +Helicopters +pretext +deprogrammer +telephoned +MacWilliams +anticult +Portrayed +sociopathic +Gladwell +audiotape +Religio +mishandled +misfeasance +bloodless +Reason +Newsweek +ominous +Craddock +testimonials +noose +psychiatrically +suicidal +tacticians +Disciplinary +Allegations +downwind +accelerants +punctured +Stalkers +unsupportable +flawed +statutes +lawfully +Ramsey +revolvers +pouches +levers +Kevlar +suppressors +mislabeled +Bunds +Abrams +Handgun +briefing +Semiautomatic +Murrah +bumper +docudrama +Ambush +firefight +disowned +Linedecker +Barkun +millenarian +demonization +nontraditional +flamethrower +aluminized +juxtaposition +spokespeople +Allard +negotiator +Noesner +Kitsch +Armenta +dramatizing +fueled +Parcast +Erick +Dowdle +Branchinecta +longiantenna +Branchinectidae +Obispo +Carrizo +Banos +extirpation +Czerwin +Localities +Izzat +Kondapur +Miyapur +Erbon +Seneca +Buechel +Insecure +loner +forecaster +Buzhardt +Baruch +deathbed +Kuralt +Kurtis +putatively +GMA +Couric +Jarkko +TPV +FF +HJK +Hjelm +Ilves +Hypocassida +Ciriaco +Giusto +tmou +priori +indoors +experiential +marshland +neolithic +antlers +Basins +Shaanxi +zoological +Whipsnade +Ramsar +paradise +harmonious +ScaleSeven +Cogeneration +UNFCCC +WADE +broadened +decentralized +PV +apartheid +Woollens +Ermine +Heath +Dinant +Amwell +Lampits +Bridgeways +Agostini +Paar +Mouse +gondola +strive +Ngoma +genocidal +Epte +Munyeshkaya +Francois +Molins +corroborate +suffice +Gobelins +Vertumnus +vices +Metaobject +Bobrow +PARC +brevity +Closette +metacircular +Attendances +Publikliga +Freedmen +atheist +surreptitiously +enslavement +Abdulrahman +Protestantism +Helped +Gillfield +Raboteau +Fallin +expounded +Regularly +revivals +abounded +Fisk +laymen +disfranchisement +religiosity +oratorical +probationers +schisms +AMEZ +shortcomings +activism +Giggie +emotionalism +Worship +worshiping +Assemblies +reemerge +syncretist +hoodoo +Gullah +Warith +Deen +Elijah +proclaim +secularists +Estimates +Oshun +Zgierz +Grabovac +Stevan +Sindjelic +Joetsu +Ducatus +Ducatum +Herzogtum +Frankish +Merovingian +stabilise +Pactus +Childebert +Theudebert +cession +Burgundy +Burgundian +faithfulness +Thitherto +grafio +Romanised +Nordgau +Sundgau +Gundoin +inferred +antipathy +Arnulfings +tripartite +Arnulfing +Alemans +Carloman +Pepin +Carolingians +Carolings +Pious +alsicensi +Verdun +Lotharingia +threefold +ducatum +swear +Meerssen +Uto +dux +Swabia +Olivella +Leucosyrinx +lancea +turrids +bathyal +Pratas +Creepshow +Stomp +Compact +GW +Maztica +Empires +SSI +Avatar +Bloodstone +Battlesystem +FROA +Accessories +Sourcebook +Wizards +Georgius +Ingolstadt +Flacius +Platonic +Torquato +Geometry +versi +peripateticorum +libri +philosophia +impugns +Ionians +Presocratics +emanated +corporeal +materialistic +disbelief +adequacy +Peripatetical +Pancosmiae +mundi +corporei +principia +Sallabelle +Atkison +homemaker +Stegmuller +Ceratina +Jametz +Jamet +Arpaio +Maricopa +Lemons +monikers +satirically +indelicate +fucking +pussy +Blaeberry +Kootenay +Peigan +Athabasca +Kicking +sidewater +Masset +Haida +Gwaii +Loggers +Taan +maculicollis +Dotto +Hakku +Tachibana +Magnitude +Tatsuya +Hamazaki +Kow +Otani +Yuuka +Nanri +Cyberconnect +Comp +Nao +Mitaka +Stockyards +Bertold +Mainka +Christenhusz +vasculature +grammitid +Hypodematium +blechnoid +eupolypod +Polypodiales +Pteridaceae +Dennstaedtiaceae +subtree +Didymochlaenaceae +Hypodematiaceae +Nephrolepidaceae +Lomariopsidaceae +Tectariaceae +Oleandraceae +Davalliaceae +alternately +FSL +Diablos +Rojos +Dewon +hats +Hacienda +Polaris +relocating +undersized +McCarran +Cashman +Bali +Goldman +Sisolak +Dorough +symbolizing +caliche +dynamite +rechristening +Aldebaran +concourse +MANICA +ETFE +backers +Beckham +Naseem +Administrated +Tribal +KPK +Leas +Cliffe +oversubscribed +maths +Multi +AIB +Blackrock +Ballygunner +Ahane +Roscrea +Sion +Kilruane +MacDonaghs +Patrickswell +Kilmallock +Tones +Newtownshanrum +Toomevara +Piarsaigh +Pairings +Managers +selectors +Olya +Shadian +Charuymaq +Glossators +singly +semiplena +Silurus +Arazede +Thumb +ravines +trachyte +tumbled +Philetus +Hoyt +Quenouille +Kaddour +Goffette +Filipinos +couturiers +Barba +Mikee +Malaybalay +Ranches +Bukidnon +crumble +humbling +Villaumbrales +Queimadas +Brava +Nossa +Senhora +Gen +Bahawal +endogenously +overhangs +courtyards +affiliations +SEPLAA +Asif +Mumtaz +Sukhera +Adeel +emptying +Severe +channelize +Elevations +unimpaired +Draining +gorges +bungee +Gulch +Gabrielino +Devore +paralleled +twisting +Chileno +Coldbrook +Hoot +campgrounds +OHV +Mesozoic +Glendora +depositing +drainages +alluvium +interbedded +inundating +artesian +Wetland +bioswale +fir +Dimas +Biosphere +hydrology +chaparral +bisect +Wildfires +mudflows +tinder +Drought +Curve +Steelhead +upriver +rainbows +Yuhaviatam +Chumash +oceangoing +asphaltum +Asuksangna +Sejatnga +Pubugna +Anthropologists +Cabrillo +Gaspar +rosebushes +Madre +ranchos +Bartolo +Californio +pueblo +Californios +ninety +Mexicans +Cajon +Crab +streamflow +Flumes +toms +waterworks +Saloon +careless +pokes +torrential +cloudburst +churning +whiskey +roulette +ramshackle +sprang +seasonality +zanjas +zanja +quickened +exporter +cultivable +Flowing +percolated +Beardslee +Vineland +Teague +groves +Olmstead +Aqueduct +Owens +dwellers +Bonds +capricious +lessen +Reagon +Arrowrock +burying +supervisors +Storms +Shoemaker +mudslides +outings +pastime +Hiking +Timberland +Opids +Rincon +Coldwater +vacationers +expedite +divert +concreted +impermeable +Groundwater +LADPW +surges +Peck +Forebay +adjudicated +recharged +Subbasin +hydrologically +KW +penstock +kilowatt +Vulcan +refill +repurpose +erosive +dredged +unsuitable +effluent +Coyotes +Superfund +Yakov +Matvei +Gedenschtrom +cartographic +Kotelny +Eduard +elusive +Laptev +icebreaker +shoals +postulate +fossilized +hypothesize +miraged +mirages +Neanderthals +erupts +Tailgates +Tailgating +staples +coleslaw +pimento +Sholf +tailgates +connotation +pong +casually +tailgaters +stereos +partygoers +underfunded +legalization +Upar +cumbia +Intangible +Urgent +bearers +kuisis +accordions +Aruba +Quintero +parranda +juglares +Consuelo +Noguera +paseo +merengue +puya +piqueria +vallenata +contrapunteo +Joropo +trova +paisa +roamed +brightening +binges +improvisation +Piqueria +Zuleta +Gota +Vives +Alejo +Pumarejo +Diomedes +Rois +Geles +Gigantes +Galy +Lisandro +subgenre +congas +Timbal +fusioning +porro +gaitas +merecumbe +Nueva +Ola +Kaleth +Premios +Dease +liquidate +trucked +Chrysotile +Neglected +tramline +snowy +Ranasinghe +Arachchige +Debarawewa +Hamabanthota +Deberawewa +SLC +wicketless +Premadasa +Kapil +Dev +Pallekele +Hambantota +bagged +pacer +Chanaka +Welagedara +fifer +seaming +rip +Dhoni +adjudged +Tamim +Chaminda +Lasith +Malinga +taker +captaining +batsmen +Gabba +Therefore +Vivarini +Murano +Carpaccio +Bellini +Tingwall +Vallafield +Kottakkal +Arya +Vaidya +Whale +whaler +stormy +wail +beached +wise +wrinkly +kindness +supervillain +overrides +Presents +Breakout +SOS +superheroes +Corrupter +Imus +Sunfire +Vault +Raft +Electro +thrall +foiled +Superhuman +supervillains +syndicate +Skrull +Bucky +rooftop +exude +revert +stickwork +eaves +UTSA +electromyographic +Reliable +fallout +electromechanical +biomechanical +PCC +bioresorbable +microcomputer +Microsystems +quantisque +malis +Allocution +Consistory +Revolutions +Revolutionaries +generously +Principate +pronouncements +Freemasonry +Socialism +Pinto +plaster +concealing +ark +bimah +Chadian +Funding +trigramme +Montas +shrewdly +relapsing +delight +passionless +Corsair +Upbuilding +Discourses +simultaneity +illogically +repulses +adhering +Soren +Treatises +Lowrie +allocate +disinterested +thinkers +symptom +Lillegard +Fathallahabad +Kuhdasht +Muger +Wonji +Longinus +Impenitent +sinful +woodcut +Clowes +econometrics +estimation +Differencing +unobserved +regressing +invertible +exogeneity +unbiasedness +asymptotically +estimators +numerically +homoscedasticity +FE +Osteochilus +repang +cyprinid +cystidia +campanella +junipericola +Mycological +bristly +spacer +clades +basidiocarps +hygrophanous +grooved +translucent +subdistant +ellipsoid +basidia +vaguely +tapering +cutis +interwoven +pigmentation +pileipellis +Clamp +Mutabiles +Sphagnum +sour +Mycenaceae +cork +rockrose +prickly +heathers +Afropop +Tusk +SAMA +Claire +favourable +rocking +Kenedy +Sarita +endeavours +Arriving +treasured +Onze +Demoiselles +Chant +Morts +barbarity +Blaise +Photographer +speculates +frontispieces +Khokhlova +Boisgeloup +jotting +explorations +blobs +idiosyncratic +punctuation +Braque +Cahiers +hues +indigo +blueish +ultramarine +agitated +poe +consecrate +propagandistic +prefigure +cockle +guts +pinky +crammed +sausages +contorts +casseroles +claw +gnaws +wipes +bourse +imbedded +Surrealists +passionately +longed +Spectator +Blunt +coterie +Quatre +Petites +Filles +Beauvoir +Camus +restaged +Hockney +Entierro +Condo +defies +Raphael +Alberti +reminiscences +Earthy +monday +unpunctuated +grindstones +whet +cunt +evocations +scatological +Bizarre +crusts +marinating +fries +cock +cannibalistic +Kahnweiler +sketchbook +encyclopedias +Ruiz +dabbled +sonnets +lapels +shook +Toklas +Leiris +Finnegans +dazzling +Trozo +Piel +Hunk +Cannes +Laughing +PA +demolitions +Barangays +Silangkan +Kaha +Sulu +subordinate +mollusks +nodulifera +Reichenbach +Oppelsbohm +Remshalden +Buoch +Steinach +readable +UUCP +Usenet +BBS +Fidonet +WWIV +superstructure +Hotchkiss +amidships +barbettes +Cleland +Cunard +navigated +Devonport +Nore +celestial +spotting +Loney +Quelpart +Samarang +Nagasaki +worsen +radioed +gunsights +refloating +salvaging +Margraves +Hohenbaden +Weissenstein +Liebeneck +quarreled +Beppo +Basel +Lichtenthal +Tskhenistsqali +Tsageri +Rioni +Tskhenistskali +Janolula +Khopuri +Keyser +batten +Clarrise +NRHP +lawn +Gustaw +Jakub +Felicjan +Dorota +PPS +dishwashers +legionnaires +exaltation +Residing +fascism +Pelczar +Starost +unconvinced +Cadre +Wiktor +Ceremony +Stieberow +Groch +Legionary +Ukrainians +Nowy +Przemyski +confiscate +epilogue +interpellation +legionary +downplay +Mariusz +publicists +commenter +Mackiewicz +Wojciech +Latiniks +Hofrichters +bent +fawing +Eugeniusz +Zbrojna +overload +impeding +morally +reprehensible +bravely +goodbye +malcontent +Anthurium +nemorale +Oviedo +infiernos +Nunca +Pierde +Huele +Goya +Cinematography +Remando +viento +Leveaux +SVU +Fleckner +Montecito +Deveraux +coven +Hottest +Ashaari +Rashidun +Rosalie +Wightwick +Bracher +Stuggart +Loveday +Statesman +Centuries +Static +Paradox +Klaus +Dignity +Xinlian +PLAN +Tuo +Odontamblyopus +rubicundus +eel +Dakatia +Sundarbans +Prolific +Chucky +Keyshia +rappers +Snoop +Dogg +Raekwon +Johnathan +Haggins +AZCentral +overproduced +luscious +soulful +Stay +bedtime +Waterfalls +totalling +Scars +Gragareth +Cuckoo +CCU +NPL +Bankstown +marvellous +crossbones +LAFC +NYCFC +IS +Stuyvesant +Fiorello +LaGuardia +Proficiency +Coleridge +Philological +Chenevix +Trench +Furnivall +unlisted +Bernajean +newsletters +Nawaz +Fawad +eparchy +Catedral +Armenia +Exarchate +Exarch +Eparch +Lasowice +tilted +VW +limousines +CNNum +concertation +deepen +Benoit +Ministers +Secretaries +Marisol +Yurminka +Bakalinsky +Poker +bracelet +Limit +rebuys +Rounders +Hold +cashes +farce +SH +satirize +colonialism +undergirded +conceptualize +Esporte +Fino +Brasil +Glencoe +Restigouche +Reforestation +Thyateira +coping +quarts +litres +Non +Mangalorean +SME +KEL +FKCA +Konkani +decoded +nonpaying +encrypt +encrypted +subcarrier +encryption +cipher +pigeon +Rozalin +Lublin +calmly +brandished +Pear +Rovieng +Preah +Vihear +Guadiana +Shipbuilders +Yarrows +superheaters +Sufficient +Armament +Leticia +Guise +Colombians +Randle +Northrop +forebears +aeroplane +triumphs +tougher +Unwilling +Schwendler +mortgaged +showroom +nuts +deference +Retractable +Airplanes +eraser +Farmingdale +Bethpage +Towl +deputized +Selden +Converse +taxiing +taxied +confiding +undercarriage +Hellcat +TBF +Avenger +Tigercat +Bearcat +canvass +momentous +predominately +solidifying +Intruder +Ag +turboprop +paramount +Excursion +diabetes +Plandome +Manhasset +USNS +replenishment +Juniata +flavourings +chilled +Served +Shulchan +hypotheses +creamy +blender +teaspoons +tablespoon +Humpty +Dumpty +Ewa +Googly +euphemism +Langston +Zappa +Notagonum +weigeli +Baehr +bookish +implants +behaves +horrified +Manley +hypnotised +Diabolique +Kehr +Hetao +Warring +Wuling +Linhu +Loufan +Guling +Linwo +Wenguo +Heyin +Puze +Nanxing +Wudu +Yiliang +Manbai +Chengyi +Guyang +derevenschiki +repressing +Kharovsky +Molotov +Communard +Berdyaika +Sultry +Privychnoye +delo +Africanovich +Gusarov +censors +Novy +Bukhtinas +leitmotif +Upbringing +Spock +amoral +Lad +ethnographical +cheerful +idyll +Immortal +Koshchey +amorality +collectivization +traditionalism +fairytale +Honeymoon +Zavtra +Sergius +Radonezh +Nikolskaya +Timonikha +scaffolds +mooted +Panauh +Undeniable +MF +DOOM +Geedorah +Nas +Untitled +foals +filly +Guineas +Hephestion +Woodcot +Peeping +unplaced +Doubtful +Shipton +broodmare +inbred +duos +Ultraleve +nihilistic +Henning +Holsten +Pilsener +Bloodline +Woodstage +Gebot +Luna +Absurd +Gastspiel +Wacken +Burgrock +Altena +Rockharz +Moderne +Zeiten +Vertigo +Peaked +deiner +Deiner +Flagge +Bundesvision +Ich +Winterland +Geboren +wie +wir +Audiobiography +Als +meine +Sprache +Alles +Zeit +Stadthalle +Bodensee +Dampf +Ohne +Strom +Schandmaul +Megazwei +Letzte +Mal +dich +gern +Leben +Pures +Potti +Sivaraman +Chinnasalem +Sampson +McNeil +Canterville +Slept +Favor +Biggest +Morley +Miner +Marlene +Whispering +Zagibovka +Bolshesosnovsky +Phtheochroa +durbonana +Kazi +Jahanzeb +Arbab +Khondaker +Mostaq +Comilla +Brahmanbaria +Osmani +Khandker +Nurul +Dismissed +Mushtaq +Bangabandhu +ambassadorial +Malaysiahat +charisma +Pineda +Correcaminos +Amenemhat +annal +Petrie +Engaeus +nulloporius +crayfish +Qasem +Joyosa +strippers +unreported +Galardi +Pesci +Fangatau +atoll +islets +Iolaus +ofere +shrew +tenrec +Vymazal +polyglot +Topolany +corrector +Snadno +rychle +aphorisms +Zrnka +Gerson +Barrantes +Herediano +torres +UNAM +foreigner +Sibir +Taibuga +Khans +Sokolov +Siad +Ferens +Kardinia +injunctive +declaratory +Tasers +Kendra +Jahar +Darryel +Findings +dismiss +interveners +amicus +Renata +Shauna +Intervenor +Oversight +Muniz +memoranda +Compliance +shootings +excoriated +stymied +facilitation +Reeve +Wheeler +proffered +COCL +Prudy +SVP +Busch +Haunted +Linens +Shoe +PavilionShe +Empowers +rationalize +decolonize +BELCAST +Nationalists +Intense +PUP +Stann +Freetown +Calabash +GPA +CXC +ATLIB +transferability +Guyana +Kom +ERI +Coordinators +RLC +Inis +Moanasticon +Rusien +Diarmid +Mangunel +Pyke +Galwey +forfeited +forfeiture +Joost +Keppel +Albermarle +Ballymore +Belvelly +Glorious +pounder +Westmorland +undefended +Bantry +alarmed +Dundas +Wiliam +spoil +infill +Eyre +Coote +barrack +bombproofing +Organ +Communion +Queenstown +mooring +buoys +rioted +Gardai +Burnt +Acropolis +Giniel +Switching +Finishing +Thorner +Sainz +Nasser +Gazoo +Kalahari +mistletoe +berries +cruciatum +Boliviana +Trompillo +Boliviano +ceasing +Wilstermann +Cochabamba +Servicios +Aeropuertos +Bolivianos +Sociedad +Anonima +TBI +Stormy +BDSM +lifestyler +lymphoma +uterine +Everyman +Athelstan +Ridgways +Specifically +hgh +Fitzhenry +Dialog +unrevised +Octopus +Titled +maple +tarnished +Junin +Ataucusi +Loves +Ubiquity +Kermani +Khwaja +nisba +Eshaq +Morshediyya +Shiraz +Hajj +strove +Arpa +Mozaffarid +Mubariz +Esshaq +Inju +tehsil +Rihand +Tamor +Pingla +Chhattisgarh +CSEB +cousins +Agnikul +Jagannath +talab +sardars +bagi +Jagirdari +Kaskela +Rend +Pakharias +bhaiyas +ilaka +Khilat +shri +Fateh +ILLakedar +Jhilimi +meritorious +Bindeshwari +Vindeshwari +Indraprastha +Garhi +paatalrav +mandir +Mahamaya +Purana +Chote +Yogesh +Basant +Ka +Atihasik +Addhyan +Fringillidae +honeycreeper +lehua +Polynesians +Finches +Oahu +informatique +Amyot +EFR +Bologna +renting +Truong +Pol +BlackBerry +Stand +walkout +shortsighted +Disrupting +Acleris +tsuifengana +mandal +Karimnagar +Gretsch +hexatonic +emphasised +inversions +nonequivalent +platitudes +Perle +maximally +hexachord +Ahle +BWV +Lyudmila +Shown +Colles +Berlioz +bracing +chromaticism +Alban +Ferruccio +Busoni +Fughetta +ed +Fantaisie +Bix +turnarounds +tritone +enharmonic +Thelonious +Sahera +Ustad +Mehdi +Gopriya +Carnatic +Worshipful +Fishmongers +Seafish +mediates +Countryside +RSPB +Directive +requisite +scallop +domoic +diatoms +molluscan +cramps +ASP +digestible +prawn +mince +squid +clams +allowance +iodine +selenium +SIDS +timeframes +Defra +quayside +mainstays +Demersal +haddock +herring +Landings +THE +Nephrops +cockles +Brits +Nutritional +McCance +Nutrient +Intake +RNI +Amount +Intakes +Fatty +Amino +Acids +Babal +SACN +Toxicity +Cefas +Pray +Deir +cartography +Artwork +Marquard +Hergott +Blasien +Maurist +Vetus +disciplina +monastica +Estates +creditably +Genealogia +diplomatica +Gentis +Monumenta +Domus +labours +historiographer +Staufen +Momodou +Westlake +stinging +Purdue +Matching +Trefl +Sopot +Gillis +Cornelis +Jacobsz +Barriobusto +Christianshavn +Holmen +morainic +Trekroner +Wadden +windstorm +Anatol +Jordsand +sedimentation +Jutlandic +Agger +Vust +headings +Lachesilla +kathrynae +barklouse +presuppositional +Fullerton +disparage +pander +basest +Christless +triperspectivalism +knower +interrelated +Vern +Kline +covenant +presuppositionalist +outworking +lordship +infallibility +Apologetics +Glory +ebb +exhaustively +nominalists +realists +universals +noumena +Intelligent +rationalist +Schaeffer +minds +logically +temporally +Derrida +Belhaven +Rhytiphora +corrhenoides +UKRI +Installed +IUPUI +geometrically +flush +maquettes +maquette +Vessels +Torso +Fragment +timbral +Bartok +Stravinsky +Madrigal +accents +syncopation +antiphonal +Monteverdi +violins +heartfelt +rondo +brio +ritornello +Shangma +Subdistrict +Chengyang +Ballklubb +Opphaug +Brekstad +IL +Divisjon +Kjell +Sellin +Sivert +detracts +Modem +Ryukyu +fosters +Pakhtunkhwa +Razmak +Tinkhamia +Qi +Dongzou +Shiwo +Bochang +Liaocheng +Jianxin +Di +Langhuai +Beiyang +Gaochang +Yanxiang +marquessates +Linji +Shouguang +Sui +Brong +Ahafo +Osei +Relic +Femme +Oenomaus +Predacons +Aecidium +compositarum +Kunze +Puccinia +rust +Veracruz +Pozo +Rajbiraj +Rajdevi +Taksha +Pivovarovo +Vyaznikovsky +Carpomycetaceae +Bessey +filamentous +anamorphic +molds +Phylogenetically +Nucleariids +Microsporidia +Chytridiomycota +Neocallimastigomycota +Blastocladiomycota +Zoopagomycotina +Kickxellomycotina +Entomophthoromycotina +Mucoromycotina +Glomeromycota +mycologists +opisthokont +unikont +meiotic +ascospores +rusts +meiosis +homologous +Aftermath +CGB +Eyed +Watteau +Diffa +Kanouris +Exide +Lakeside +polycarpic +Rif +lemale +doubtful +accumulating +condone +dispelling +misconceptions +memorizing +Hila +Feldman +Irit +Sheleg +Sharir +Aharon +Renana +Raz +Ido +Yossi +Weigl +Neta +Moran +Melech +Thal +Chani +Gurewitz +headgear +attire +dresses +coverings +undercurrent +restraining +kissed +Cinematographer +conveying +claustrophobic +Blurred +Arri +Malick +Eshkachech +Forget +Bris +yearnings +happiest +rrrrrrnnnnngh +insulated +Podhoretz +eerily +understandable +skepticism +Farran +Nehme +darting +exquisitely +radiantly +nuanced +blurry +inevitability +constriction +Hassidic +Esiner +feminism +forthright +discreet +Cavanese +Pleasanton +temperamentally +Ophir +Volpi +bootmaker +Cleared +Worrall +VFA +Poona +Lewen +Battersea +Farnborough +Kamisori +gekiga +Koike +interrogates +auf +Forchheim +Haidhofer +Egloffstein +Hellmut +Eggolsheim +Trubach +Gelegenhait +landschaft +mitsampt +furten +helltten +Landshut +Succession +prehistorical +rampart +Ahorntal +Kandelberg +Haidhof +Opferstein +drawbridge +Kunstmann +evinced +ashlars +manmade +Epidemiology +Gillings +MSPH +MN +Carla +Cerami +quadrennial +Nothings +Lincolnfrom +Cameronfrom +Chaseof +McLean +disarray +Mormonism +SenatorDaniel +SenatorRobert +bolters +delegations +Englander +Cushing +Crittenden +Rives +stave +Eastside +Lecompton +Fillmore +Johnsonfrom +Toucey +reconvening +unreservedly +Raid +Harpers +spirited +Goodell +Jacinto +disunion +Secessionists +turnouts +Sumter +incur +ostracization +secede +Gasconade +sectionalism +Ballots +slates +unreconstructed +Thurlow +Awakes +canvassed +respectably +tem +pp +declarations +foment +abolitionists +crevices +Northerners +intron +spliceosome +isoform +ubiquitously +splice +isoforms +Czarnylas +Inroad +construed +quartering +presumptively +incumbrance +subsist +diminution +inquestionably +swallow +trawler +Rated +Maschinenfabrik +wasassessed +Unterweser +Hochsee +Fischerei +Cuxhaven +Homann +Mayen +Hansa +Dornier +glider +badged +Hybrid +cutaway +drivetrain +Utilimaster +ElDorado +Goshen +StarTrans +TurtleTop +ForceDrive +NESEA +callulops +webbing +Ferdoos +motherly +Min +Fujian +Fengze +Lichengs +Considering +unsurprising +Quanzhou +pastoralist +Mundabullangana +Yule +promulgated +alimony +applicability +MEPs +Zbigniew +Jernegan +Woolard +Clemens +Dorst +Rollin +Daggett +Duff +Muzzio +cornerstone +Calathus +metallicus +Platyninae +Viva +Bona +Geri +Caulfield +allusions +Esty +queasy +orchestrations +Pharmacological +Mizner +pencils +Valerio +Adami +Larraz +montereyense +Stalked +stalked +ascidian +Fewkes +atrial +passively +Populations +copepod +Pygodelphys +aquilonaris +branchial +Crossroad +Spotsylvania +knockouts +Rounds +Veronicas +Borin +Ziva +masterminded +Engel +Horten +Leonore +nas +bez +Ecclesiastes +Senat +Privilege +Jagiellon +abrogating +pertain +wherefore +henceforth +onerous +injurious +distillations +glassware +vaporized +vaporize +refluxing +coolest +volatile +Fractional +liquified +multicomponent +worthwhile +Distillation +Effectiveness +reboiler +condenser +Reflux +Fenske +Liquids +packings +Crustaceae +Mollusca +Coleoptera +Hemiptera +Caoyuan +zhi +ge +Luo +Zongxian +Approaches +camouflaged +GEE +Oboe +FLAG +Reliance +GCHQ +Bude +Mastering +Surf +Guests +Porthcurno +Poe +bullfighter +bullfights +Algeciras +incurring +matador +bullfight +Ronda +inseparable +Toros +bullring +corrida +Ganapati +Siddhi +Vinayak +Ashtavinayak +plated +Pavanara +humbly +genuinely +Agri +Childless +Deaubai +infertile +Ramakrishna +Jambhekar +Akkalkot +mandar +svayambhu +prophesied +Nardulla +Sayani +mismanaging +Tipnis +scrutinize +petitioner +Keval +Semlani +Jolly +Pima +Boultinghouse +Covering +SRPMIC +Pavilions +Odysea +Comanche +Shoshone +Zuni +homeowners +Cote +LURHQ +dns +ANZ +Novog +Suzie +Katayama +Apollonia +screamed +Walcott +cryptologist +supercomputers +Hampstead +Besicovitch +Hut +decrypt +cyphers +nap +deign +dawned +chink +telegraphists +trigrams +codebreakers +Banburismus +vindicated +baffled +Michie +Colossus +Golombek +Macrae +Blacksburg +Apartment +popularise +anticipates +superhuman +Concerning +Ultraintelligent +Kubrick +HAL +autobiographical +IJ +Caj +coastguard +Chesil +shipwrecked +Branscombe +Shambles +shone +Weymouth +Argand +dioptric +Brotherton +persuasions +Ostrogothic +reconquer +reprimanded +Tufa +Pinos +Airstrip +Cielito +Lindo +Rosario +Bortolozzi +foil +Carbondale +Photography +WSIU +orbited +inclination +precoveries +identifications +Infrared +triaxial +concurring +Christophe +Palomar +Transient +outstandingly +Collaborative +Asteroid +calculates +Attenuations +lightcurve +occultations +bornborn +Yadvendra +Ideology +Qinghua +Chiao +Tung +hydrodynamics +Timoshenko +academician +Xian +Nouet +Quimper +Inde +Nouvelle +Martinique +Guadeloupe +nonconvex +hemipolyhedron +dodecadodecahedron +pentagonal +dodecahemicosacron +hemipolyhedra +projective +Wenninger +polyhedra +deprives +Swazis +Hannie +Aids +olds +UNICEF +Orphaned +permissible +OAU +Exploitative +Khulisa +labourers +trafficked +perpetuating +Piggs +Scirea +Dowler +Margarites +schantaricus +perforate +encircled +peristome +subproduced +columellar +striae +encircle +threads +breadth +predominates +Eichsfeld +Moselle +Verein +einer +directen +Eisenbahn +nach +surveyors +Thalers +Kanonenbahngesetz +Hohenrhein +Geismar +Malsfeld +Khalifeh +Almalu +skateboarding +skateboarders +cofounded +Deca +Dwindle +Fallis +annotates +Enablement +SSN +Ontology +Sensors +avalanches +intensifying +alleviating +schema +barometric +infrastructures +Advancements +Standardization +geospatial +OCG +slows +Heredia +PKK +nome +guerre +Sakine +Fidan +meilleure +counselors +Bitola +Pelister +Tulalip +Smokey +Turnip +Brassica +napus +napobrassica +rutabaga +rutabagas +neeps +turnips +hollowed +lanterns +Kohlrabi +Stofnun +Icelander +ok +lawspeaker +Palaeographical +redaction +Erlendsson +chronicled +camcorders +prosumer +Funniest +weblogs +Birt +Birtac +sprocket +Decomposing +negatives +perforations +clearer +cartridges +VCR +Videocassettes +VCRs +playback +PDAs +Pornographic +Zapruder +Diarmuid +Bitter +Niamh +Sorcha +Bryanston +dyslexic +Stephenie +Philippa +Tutankhamun +Condor +Burberry +Concise +realignments +Turnpikes +Grammer +screenwriters +Chabon +Miramax +Robots +Frisby +BAD +Dinosaurs +Drift +Rosebud +Nassauische +Harappan +unfortified +multiroomed +mudbrick +jars +profusions +carnelian +Barnack +recast +foresight +Sr +Adlingfleet +Driffield +Laneham +brickworks +Samual +absconded +apprehended +waggons +authorise +Manure +weeds +dumb +Upgrading +Amenity +footpath +joinery +Stonebridge +northwards +flint +chutes +Rattlesden +Wattisham +Armfield +weatherboarded +Barking +Creetings +Farmhouse +Ashbocking +storeys +breastshot +waterwheel +humped +gault +Shamford +Blakenham +Fisons +Fison +Somersham +Watercouse +Bramford +Handford +outfall +angiosperms +carnivorous +Viverridae +Poiana +Viverrinae +superfamily +viverrids +squirrels +Catocala +Carya +cinerea +putealis +alkaliphilic +pursuer +criminality +scaring +cockiness +scared +Automotive +Scotstoun +incremental +Bosman +Blatter +quo +INEA +contravene +Cham +caricaturist +Charlet +Delaroche +Proudhon +Histoire +ostracized +sidestep +Wilhelmina +Vogue +Pod +sate +imbued +Fatimas +Fordham +spectrophotometric +Planetary +firewall +vagaries +Wayfarers +Galileo +critiqued +Newtonian +ticks +polarimetric +Seyfert +Polarimetry +synchrotron +cataclysmic +nebular +neuroscientific +Comprehensible +Interplay +Religulous +environmemt +Laudato +encyclicals +tireless +Mendel +Biesbroeck +Jagellonian +idiomatic +rheumatoid +Patents +Specialized +Feba +Anadolu +TRT +buffets +manat +mouthpiece +Altyn +Asyr +dublage +Mediumwave +definiteness +lemmas +proofs +decompose +Projection +Lemma +generalizes +characterizations +Amherst +ambassadorship +Hurdles +filtering +ADP +enforceable +FL +Fruity +Gaan +Bazen +Parkent +raion +shortstop +Visalia +Rawhide +BayBears +AAA +Shelby +Socrates +Brito +Souza +Widener +Solak +PTNBL +Poche +McKinney +Liang +paralympic +Ponzi +estimating +liabilities +Vipond +Tritopterna +Olethreutinae +Zurab +Zviadauri +Doha +Hiroshi +Izumi +Khasanbi +Taov +Tuvshinbayar +Henk +Grol +royalty +digipaks +segued +Rickenbacker +Rickenbackers +Trichilemmoma +cutaneous +neoplasm +Multifocal +Cowden +hamartomatous +polyposis +tricholemmoma +SouthWest +xianxia +Untamed +Depwade +Ewanrigg +goldfields +Keighley +Sutcliffes +Christendom +Astray +Aristides +Rosh +Pinna +authoring +Boulton +Unamended +Clapham +Berean +reunions +Kimmeridgian +quadrupedal +paleontologist +exemplar +DMNH +HMNH +HMNS +stratigraphic +Hayashibara +Barnum +palaeontologist +Siber +stegosaur +Galeamopus +allosaurus +Rabea +Lillich +Nicolai +Susannah +synonymy +Miragaia +Meilyn +basalmost +sacral +arches +protrude +pubic +dubium +lumping +antorbital +fenestra +basisphenoid +dorsals +shoulderblade +indented +diverges +sideways +prepubic +cervicodorsal +tooth +Asymmetrical +histological +metaplastic +hollows +rotting +sheaths +insulating +smoothness +horns +beaks +Huayangosaurus +Chialingosaurus +Tuojiangosaurus +Lexovisaurus +stenops +ungulatus +stegosaurid +Loricatosaurus +dacentrurine +Mateus +heave +refrained +Plates +taller +palaeontologists +Padian +Windscreens +MATCH +RULES +TIE +gamepad +Shur +Daraq +Sho +Marand +spellers +upbuilder +policemen +upbuilding +incumbency +incompetency +Breck +Platt +Hendricks +triumvirate +Greiner +Wolcott +Whist +Lotos +golfing +Hilles +Sweeny +Bahnhof +Donnersbergring +Bessunger +Haltepunkt +Bessungen +Hauptbahnhof +balcony +Wiebelsbach +Kranichstein +HEAG +Sahibul +Saif +tariqat +Hajji +Fuad +khutbah +Lala +zawiya +Dergah +Prayers +Zikr +Mawlana +dargah +Corell +Balmoral +Glenmuick +Aberdeenshire +Rinabaich +reputedly +objection +Elgin +Farquharson +Invercauld +Mordellistena +emeryi +Mordellidae +Schilsky +Mapp +Kankan +Rosita +Kirwan +Aspinall +Addelitta +Cancryn +erations +Voge +honorably +Sheree +Aisha +Baladeba +satires +geeta +zari +troupes +Gotipua +thali +Governments +Zonal +Patronage +Digapahandi +Guru +Eleutherodactylus +Pristimantis +Horrible +Awatere +Savelyevich +Gorod +Boldino +Dorogobuzh +defection +Godunov +Kedrin +Contribution +Exoplanets +exoplanetary +exoplanet +Teledyne +subsystem +FGS +astrophysics +Explorers +Investigator +Jet +Propulsion +Arnholt +comforts +conveniences +Tuna +Packing +nightcap +Balboa +cavernous +PCL +Victim +Glenwood +jogging +nebulous +Skatepark +Taft +Westheimer +Salma +quadrant +McKevitt +Macleod +serum +diagnostically +Erythrocytes +replenish +Agglutinin +armpits +Honam +Selseleh +Mikhailovich +Ternavsky +Rostov +Shinnik +editorially +termtime +Publication +SPA +Commended +lecturers +AUT +NATFHE +eugenics +midmarket +journalistic +Coen +Might +Bryony +Rina +Kirby +Roar +Heythrop +marginally +Sennet +Kale +Eveline +Specialising +Victorious +HMNZS +Surprise +Destroyers +gyrocompass +battlecruiser +Flotilla +CB +Moubray +Simson +Tremayne +Jakubovany +Sabinov +genealogical +Archiv +BirdLife +uplisted +underparts +coverts +speculum +irides +Sonora +Chihuahua +Sinaloa +Nayarit +Jalisco +Decline +Amazona +finschi +nestling +nestlings +fledglings +amazons +Marginella +rosea +Blockading +linkage +coastwise +Dismal +desultory +begged +Redstone +earthwork +imbecile +watermen +unspoken +fleshed +Rebel +dickering +bales +Embarkation +Earning +seaworthy +Picket +laden +Zouave +surfboat +strangely +infantrymen +nightfall +Bartow +holed +Midshipman +redoubt +combatants +bereft +capitulation +Abbreviations +rebranding +Abercrombie +Voluntaryism +Voluntarism +Naseri +Siyahu +Fin +Machinists +dissenting +nonmembers +gambusia +livebearer +ichthyologist +Hubbs +Gaige +Karaelmas +Jinnah +Abbasi +Governer +Genos +Derwin +fumbles +punt +logjam +EAST +notched +blossomed +semifinalist +deflections +ILB +headlights +deflection +Woodyard +Nyanza +Stats +USAid +VILLAGES +Synthetic +Grains +sintering +unverified +Goodrich +Smelting +Aluminum +ores +Losev +Naturally +Murchison +carbonaceous +chondrite +isotopic +simulant +purity +Nitrogen +sublimed +dicarbide +disilicon +argon +redeposited +Cubic +Homoepitaxial +heteroepitaxial +Precursor +polysilazanes +Pyrolysis +CVD +thermalization +amorphous +blende +luster +passivation +melt +discontinuities +doped +Metallic +doping +Superconductivity +lapidary +durability +Particles +laminated +sandpapers +whiskers +superalloy +Chobham +infusion +Carrera +Bugatti +Chevrolet +Corvette +McLaren +Audi +VT +harmlessly +Gapped +gapped +arrester +varistors +Dunwoody +shipboard +FETs +Bipolar +transistors +thyristors +nitriding +JFETs +electroluminescence +brighter +spreader +rigidity +polycrystalline +telescopes +Telescope +Gaia +subsystems +Radiative +Filaments +Temperatures +EKL +igniters +Pebble +Bed +embrittlement +toughness +volumetric +gemstone +birefringent +sharper +Loose +moulds +undamaged +misidentified +birefringence +cleaner +maleic +collagraph +grit +epitaxial +decomposing +graphitization +attainable +entails +lattice +photons +configurations +ZPL +PACAF +drawdown +Redesignated +Ashiya +Pohang +Tsuiki +FIG +Yonpo +Hungnam +Chosin +Relocating +Pusan +Suwon +remanned +Counteroffensive +reassembled +itse +Pantom +Dagger +Phuoc +Tuy +Beak +interdiction +Airlift +Gallantry +Crosses +TET +Thunderchiefs +Korat +Okinawa +Elmendorf +Tonopah +Nighthawk +Shaikh +sorties +Weasels +Realignment +downsizing +Sikorsky +reestablished +Coos +Millsfield +Erving +Dallamano +Hokkaido +Consadole +EAFF +Outbound +ASV +coaxial +antennae +Adellen +Algoma +shadowing +squall +Inverarder +Eidanger +reloaded +Anadara +straggled +commodore +chap +booms +torpedoing +Eidangers +BdU +Sheykhlar +Shaikhlar +Kuhgir +Tarom +Sofla +lakeside +knot +Clinique +marais +Montmirail +securely +impedes +foldaway +caterers +cafeterias +backyards +pasting +utensil +drawer +Pucci +southeastward +SW +Oxbow +northeastward +Thayer +sloped +Synsam +Catharine +Palapye +Jwaneng +tiebreaker +Pain +Khabarovsk +Krai +Primorsky +Zubair +Bandukda +Export +Eqbal +Arif +Majeed +Peasley +Marbet +Rahayu +Supanggah +Jawa +Slamet +Davydovo +Decreasing +Spahr +ROTC +Portal +Loughlinstown +capstone +encyclopaedist +Agen +Jouvenel +Chautemps +forlorn +Cahors +Chambre +acknowledgment +reimbursement +masonic +Herriot +Travaux +publics +dockers +distrust +stalwart +Darquier +Pellepoix +mayorship +Indicted +Febvre +ADAC +Supermono +Alstare +Misano +Adriatico +Taru +Tomoko +Aprilia +RSV +Rallycross +Nunavut +Farish +tetrapodomorph +Tiktaalik +roseae +Fridtjof +Nansen +Glyptolepis +sarcopterygian +dorsoventrally +coronoid +fangs +coronoids +postcranial +fleshy +ornamented +radially +palynological +Gauja +Formations +supercontinent +Euramerica +benthic +fossiliferous +estuarine +Sarcopterygii +subclass +Dipnoi +dipnomorphs +Embry +Kanawha +Toluca +Futbol +dibutyrate +histocompatibility +chemo +fluorochrome +intercellular +Papermaster +esterases +Jemima +Paralympian +Issued +laughable +uncovers +bonhomie +Davenant +fad +prevail +licences +copious +loopholes +pseudonyms +prohibitive +enjoyments +intercourse +Vices +cheapest +Townley +Engraved +prohibitively +importantly +Copyright +squalor +pervade +aptly +pawnbroker +avaricious +Gripe +greedily +pennies +addled +syphilitic +sores +unheeded +stairwell +exaggeration +Dufour +workhouse +strangled +Drunk +penny +cavorts +frantic +haircut +unsold +Downfall +pawned +denounces +cripple +crutch +quieted +bawls +starving +emblematic +Bloomsbury +Virtue +cherishes +hellish +Vitals +preys +Madness +sickly +hopeless +sparkle +joyous +jollity +doughty +toasting +Barley +cooper +tankard +pint +porter +toil +Polticks +Imitation +Moderns +counterpoint +Englishness +pollute +fiercely +scrawny +burly +aloft +mutton +ham +industriousness +overflowing +paused +toasts +wearied +Fatigue +Toil +quaff +balmy +paternalistic +barrow +archetype +misery +ugliness +leans +Walpole +Dallaway +Bruegel +Maigre +Grasse +Heyden +overflows +emaciated +squabble +Idleness +rebut +Hazlitt +unfairly +coarseness +shewn +Sewell +puerile +subtlety +insistence +Certainly +pawning +Burrington +hundreth +wretches +wretchedness +wretch +oblivion +pittance +furnish +morsel +drayman +fondling +Copies +Adlard +reinterpretation +Smirnoff +Rowson +Cocaine +Pub +Binge +Foundling +Compare +Gulielmus +Wojskowy +Klub +miestelis +interwar +Siedlce +Lauda +Zygmunt +Wenda +dyon +Lipiny +Warszawa +Warmia +Grajewo +Kotwica +Czarni +Hallerczyk +Kresy +Wschodnie +Warta +Werkowska +Warszawianka +KPW +Ognisko +Pinsk +Junak +Drohobycz +Przeglad +Belorussian +pebbles +Oware +sown +minimax +pruning +Marzieh +Etemaad +Gerdab +Rials +inhuman +Afyonkarahisar +Edwardsville +SuperDraft +Bethlehem +Rhinos +concussion +Penquite +Vendors +pillows +Gloria +Forouzan +Sprout +creditors +biathlon +Pyeongchang +Alpensia +reschedule +Zeno +Elea +paradoxes +Eudoxus +approximations +fluxions +Tractatus +Quadratura +Curvarum +linearizes +mathematicians +Euler +Lagrange +fonctions +analytiques +opined +rigour +precluded +Gauss +etude +hypergeometric +converged +binomische +Weierstrass +arithmetic +affinely +topologically +hyperreal +hypernatural +infinitely +righthand +LaSalle +Magnolia +Bozen +Minority +Unterland +Modena +Thesis +autonomies +Pens +Volkspartei +Prodis +Partito +projector +Centrum +founds +Complementary +Pension +Miliduh +Lusatian +Sorbs +vassalage +Franks +Slavs +Bohemia +Lecho +Nussito +chieftains +Pedras +Altas +Peay +Hacker +Combinator +hypothesizes +rejuvenating +heterochronic +Phalacrocorax +chalconotus +dimorphic +morphs +chunky +shags +Taiaroa +Lepton +collide +unfilled +maximises +Electron +Comparing +Huachococha +wachu +Ancash +Fitzcarrald +psyche +Zsasz +Arkham +Issue +Azrael +crossovers +Calabro +Poison +Raine +Sychak +Montage +Michaels +storytellers +metaphors +neuroscience +Instructor +Technicolor +Techniscope +Duryea +Fuzzy +Broncho +rewards +resolves +exterminate +eloping +Zargari +ProTeam +Dordrecht +Pensionary +Govert +Dubbeldam +Tweede +Slingelandts +Wildt +Margaretha +Horwitz +physiology +pharmacology +transporters +NIS +Kaback +permease +Coli +Liver +lactating +Juilliard +linguistics +Modenutti +Peyret +Geysels +Darrouzet +Pourcher +Motif +Participates +Basolateral +Spadaro +Cardone +Kibbey +Shulman +Dixit +Pathogenesis +NAFLD +extrahepatic +Boutagy +Sinusas +forestalls +hypothyroidism +Preclinical +Echeverria +Renier +Vogel +Tse +Wapnir +Regression +metastases +Vitamins +Oikawa +Wauthier +Dinh +Selitsky +Carpino +Levine +Cardinale +Klimstra +Gaudio +Sethupathy +fibrolamellar +hepatocellular +carcinomas +Saenger +Gamez +Godoy +Muzumdar +Mutant +Causes +Stunted +Arriagada +Albornoz +Opazo +Becerra +Fardella +Michea +Elorza +Kalergis +Riedel +biomedical +Thyroid +Ingbar +investitures +Loeb +Alumna +Californian +Bemis +lobe +moraine +sinuous +wildfires +savannas +congregates +campground +Interstates +lakeshore +Interpretive +lieutenancy +lieutenants +Custos +Rotulorum +Fichtel +Kleiner +Kornberg +Geologically +orogeny +Precambrian +stump +Tourmaline +Fichtelgebirge +Eger +Waldsteinhaus +Photographs +consolidation +Sabraton +Somdech +Phnom +Penh +Sihanouk +Monineath +Narindrapong +Sihamoni +winnings +Cinequest +Omaha +overzealous +ribbing +horrors +max +bugging +ripping +lampoon +Yorinobu +restrain +Kurama +Ichihara +Tsuna +slash +Hyakki +Toriyama +Kumohara +disordered +boars +theorizes +wicked +Takizawa +Utagawa +Tsukioka +Hakamadare +Yasusuke +Jutsukurabe +impersonate +stuccoed +downturn +Fiametta +Decent +unclassed +Angelots +garagiste +vin +aurum +SKERMAN +McGOWAN +SNEATH +TSUKAMURA +bean +Kirov +Oblasts +subgroups +Volgaic +Uralic +Vetluga +Bolshaya +Kokshaga +Ananyino +Dyakovo +Moscovian +Heard +Kilpatrick +Bestor +Spurs +Maldives +Rankings +Taipei +Pts +GF +GD +Goal +seeding +underdog +PDL +Edu +slotted +Referenced +Metacomet +Gunner +Forecastle +Landsman +braved +Farragut +Christiane +Aulard +Bayet +hautes +libre +Guylaine +Christelle +cramped +shuttled +RMG +Equestrian +sycamore +Lautner +freeways +didactic +Tarzana +preschoolers +LAUSD +constructivist +Satisfying +necessitates +bicultural +Brevet +ACT +Grison +LaBonge +Companions +Nightmare +Bettye +Gig +Melvyn +Strelson +Carlotta +Mauridge +Siddack +Klanton +Neesden +Armour +Slavonia +Majstorovic +Adorers +Altar +Rosary +tamburica +Cepuran +anniversaries +deeds +endeared +Soldo +Gulella +puzeyi +pulmonate +Cortain +accusative +declension +spata +Nota +postdating +Brumadant +reforged +Corte +Saracen +Brunamont +chansons +geste +Aspremont +Renaut +Montauban +Belluno +Chies +Puos +lineups +outmatched +goaltending +IHA +sluggish +quirk +Elis +Freziera +jaramilloi +Pentaphylacaceae +Miloradovich +Decembrist +Karelia +metrical +Mirsky +Pisma +Russkago +Ofitsera +allegory +Vampyre +Simelius +Sergy +sabulosa +Pilbara +bushy +Eremaean +luthier +ukuleles +Emiss +Gillbad +vocations +preachers +Lick +Greencastle +Fairplay +Mammoth +Testaments +Aldabrachelys +lifespans +overexploitation +Gerardus +Mercator +Naturalists +Perrault +Tortue +Schweigger +Bibron +nigrita +disproven +Mascarene +Slevin +elephantopus +toto +corpore +Gairmard +Garman +Pritchard +californiana +oblitum +Geochelone +zoos +indiscriminately +Fertile +offspring +herds +abingdoni +becki +vicina +overwater +Survival +diverts +colonised +colonized +Alcedo +vandenburghi +microphyes +subfossil +Fausto +Llerena +bolstered +undiscovered +nonoverlapping +distributions +Montura +Caseta +Adalgisa +Rearing +heralding +Sur +reintroducing +scant +purported +dung +Lichens +scute +scaly +Fray +bodied +fortuitously +colonisation +extremes +correlate +biogeographic +arching +forelimbs +Lineages +Carapace +Saddlebacks +extremities +dimorphism +saddled +tails +plastrons +knobs +ectothermic +bask +meadowed +disposition +wallows +thermoregulatory +mosquitoes +snug +pallet +herbivores +Hippomane +mancinella +Psidium +Azolla +Tillandsia +bladders +overhear +overtaking +pacing +uttering +hiss +hopping +plastron +irritating +insidiously +ritualised +gaping +prelude +rams +nips +Mounting +straddled +vocalises +hoarse +vocalisations +noises +hissing +tiring +blindly +billiard +yolk +predator +FitzRoy +Covington +recollect +scanty +tenanted +mutability +restated +Mocking +Falkland +Chiloe +Inglish +counterexamples +defenceless +diluted +Dampier +extraordinarily +pullet +Colnett +delicious +tasting +roasted +Gauchos +restocking +harboured +unclaimed +sealers +buccaneers +depletion +trample +poachers +Threats +endemism +exportation +inbreeding +Fruitless +postnatal +culls +Goat +Marksmen +Goats +Efforts +keystone +trampling +flycatchers +Sterile +interbreeding +inspirational +Sripada +Vasavi +Kanyaka +Parameswari +Charitra +parrotfish +preisli +Paratethys +Cryptanthus +felixii +Almarhum +Maneater +lurks +FearNet +Sorbo +Angriff +Ravindran +Researcher +CMU +Theoretical +Discrete +Peplos +Feres +paleolithic +Claudian +Legio +subjection +seafront +Isca +Dumnoniorum +Teign +Teignbridge +civitas +sporadic +incursions +Cochintone +Alric +Falesia +Maidencombe +Chievre +Conquest +Cherche +Dissolution +Flete +Fleete +Torrequay +Torkay +goldsmith +Earls +Armada +Brixham +Rawlyn +Keyberry +uncomfortable +Venerable +fruition +Ruffian +favourably +Ferrago +brandy +Octavian +Panorama +Dieppe +tor +Mohuns +convulsing +Trevelyan +Watering +Isambard +Brunel +labouring +foetid +alleyways +emigrants +insecure +rioters +swarmed +posse +navvies +coastguards +magnanimity +sufferings +exacted +handbill +Tiverton +magistrates +precautions +nip +Syracusa +Headland +Gregoire +Stroganoff +retinue +Infirmary +trowel +Lincombe +Plaque +Disraeli +villas +Howden +milder +Babies +Westward +Profundis +Cry +Importance +Baskervilles +Origin +Ilsham +pane +Thorogood +Bamfylde +outlook +Salus +Felicitas +ailing +watering +leavers +residentially +intermingled +steamroller +Hele +Shiphay +Willows +Heaviside +patriotism +attired +Lyncourt +Blinded +Fusiliers +annexe +Maycliff +Toa +serviceman +Ruihine +flu +ravaging +servicemen +exhumed +Coastlines +Dubliners +Suvla +Anthroposophy +Chertsey +Marnham +Oswald +Millbay +Loyalists +ARP +Petrol +coupon +AMF +inhabitant +Wrighton +Walthamstow +Chilcote +pillboxes +Norcliffe +Foxlands +Postings +Dorchester +Metropole +Stanbury +Blitz +railings +Daison +jettisoning +navigators +intakes +logbook +Typhoon +Meadfoot +HSL +Whaleback +Denton +Teignmouth +compulsorily +insured +NHI +Hams +buildup +Quartermaster +GIs +Vicarage +Sherwell +Rathmore +Amphibious +MM +Rosetor +Nos +rescuers +Overlord +holidaymakers +Please +Pannell +dla +biotope +karstification +Tegea +topography +shattered +tectonically +dissolves +widens +ponor +katavothres +Exploratory +Natura +Geisental +Hana +Flach +Click +paywall +Authenticated +Bewkes +authenticate +authenticating +NBCUniversal +Bravo +CTV +exclusivity +mandates +Songwriters +Carnes +regionalized +multichannel +fiefdoms +NCTA +BTIG +FX +unskippable +frustrating +Cablevision +Synacor +sweepstakes +Janeiro +geoblocking +interlocking +Crupnick +SVOD +Amid +augmenting +NBCSN +GfK +CTAM +prioritizing +MVPD +counterweight +subscribing +Teeny +Sealion +soviet +Bessans +Wiz +cigarette +ATOD +Gohmert +Filak +Ratcliffe +Jaquess +Woolridge +Cargas +democrat +Scot +Gallaher +Hinojosa +Hildago +Chairwoman +Palacios +Pastor +Villarreal +Libertarian +Beto +Kaleb +Matta +Lubbock +unhappiness +Jodey +Phelps +Tejas +Vakil +Helotes +Kathi +Krause +Micah +Mauck +Farenthold +Nelda +Hayward +Zeffen +Hardin +Schafranek +Bernice +Lingerfelt +Sweeney +Ramsland +Veasey +Quintanilla +Filemon +Vaden +Narvaiz +Stockman +Salpointe +reservist +masterful +craftsman +screwed +Slide +Loading +disassemble +formable +totemic +rowboat +weld +Basis +semi +rowboats +playfulness +stemhead +rabies +Duckworks +Homage +Elia +Groves +Battleford +EPHL +Jehan +UKZN +Ruhan +Mylothris +splendens +NGO +storybooks +Bookly +NATIVE +Mxit +PRAESA +Asifundze +Ehlanzeni +McElwee +discounted +Cellphone +Shuttleworth +Puku +affirm +WikiZero +eLearning +Informations +eThekwini +Researching +literacies +multilingualism +cybercafes +Kibera +GLA +Mobiliteracy +Sovereignty +Howland +Kingman +Navassa +MCD +Pant +Oswestry +Nantmawr +vestige +Argentines +toasted +tramezzino +bakers +homesick +Sandwiches +polyhedral +tessellation +Honeycombs +circumsphere +cube +cuboid +trioctagonal +tetrahexagonal +omnitruncated +Kompakt +DIY +Hooj +HSwMS +Karlskrona +Tawazun +Merkel +CAR +Selim +Rafic +MacIntyre +PBY +Akutan +Minuscule +Palaeographically +parchment +Ammonian +Eusebian +lectionary +uncial +Alandt +Claremont +INTF +fol +Yoichi +Ozora +viduus +Pascoe +goalkicking +bettered +Hikari +uncanny +Lefty +confiscating +suspends +Sluggers +Worldvision +Specials +miscegenation +Abandoned +quadroon +scornful +naturalism +pervasive +Shakespearean +Birthplace +invalided +Stratford +Jowett +Cobbe +debunking +Marlowe +scriptwriter +Ruston +Clemency +DLitt +binoculars +metallicity +Maribel +Micalvi +Driven +weakside +reacquire +expend +tags +counterattack +Combos +knockback +lobbies +Beginners +Needing +deceives +intercepts +fuse +Valkanda +Dormammu +infused +Symbiotes +ejects +Hawkeye +mechanized +gauntlet +reclaiming +immortals +Origins +Norio +Unreal +nimble +Insomniac +Telltale +visions +forgo +dioramas +Danvers +Anywhere +DLC +Homecoming +Avenging +Hasbro +Funko +Suriel +divisive +Gamereactor +Destructoid +unpolished +Tamoor +Darry +overhauling +stalest +loathed +silky +abomination +Minotti +meager +laziness +Injustice +Cuphead +Tekken +Create +Klymaxx +Bernadette +CIAU +Abidjan +Lagunes +Medullary +carcinoma +infiltration +Amado +Cutral +Neuquino +annulled +Kirchner +Estela +Traversing +Messer +Hatfield +bribe +Vasula +speciosa +murex +Cappataggle +astro +vandalism +exertion +Culpo +Ferrari +Ravensthorphe +Ravensthorpe +Esperance +Tubers +kipfler +maxwellii +suckers +youlk +Gennifer +Moderno +Echandi +Ibarra +adhesion +Reformist +preachings +conspiratorial +squandered +Reformists +Jeziorno +Pra +Kwahu +Proterozoic +metasedimentary +Craton +Akwatian +Harzburgitic +crystallized +unstudied +faecal +profitability +privatize +Manitowoc +resell +smuggled +kaolin +pawpaw +cola +cashew +Bamboo +Okyeman +Epan +circumference +relaxation +souvenirs +Akyem +drumming +Marple +Goyt +Compstall +SSSI +carr +Sailing +entrants +Southport +anglers +Zoran +juts +Buzzards +Phlaeothripidae +thrips +misspelt +Idolothripinae +Phlaeothripinae +fungivores +Thrips +Terebrantia +Idolothrips +marginatus +Bryce +Jagannathpur +Islampur +Anuchand +Sunathonpur +Thegoria +Koborstan +Karlo +Compassvale +SMC +Buangkok +Hougang +Ang +Kio +Ser +Janil +Gan +Thiam +Poh +Zainal +Meng +Xueling +Hean +NTUC +Nami +Kanagawa +Miracle +Nakayoshi +Neighbor +Frog +Lizard +tornadoes +Terrie +calcious +Nationally +paucal +dyadic +Plowden +Leveson +Marylebone +fifties +monograph +Orchids +hildae +Prasophyllum +nichollsianum +Genoplesium +Kuonoto +Buol +Persbul +Elevated +hypercholesterolemia +predispose +lesions +chylomicronemia +hyperlipidemia +abnormalities +hepatosplenomegaly +neurological +Eruptive +Familial +dysbetalipoproteinemia +elbows +Palmar +crease +Preventive +Lifestyle +Weight +mildly +carbohydrates +Medications +fibrates +Epanova +Golabi +Fasarud +Darab +Rowshanaq +Balghelu +Declarative +Lascelles +Dumbarton +Lobanikha +Cherdynsky +bracketed +foliate +Valz +Tchiquaqueia +Jinan +Predecessor +Qilu +Courses +HSK +Qarqa +Abarghan +Sarab +Innocence +Millarde +Arnolfo +Ambrosian +simoniac +penance +Pandulf +Bernold +Thimo +Odalric +Gebhard +transferral +Erlembald +Tecla +Milanese +chelated +aspartates +adjustment +percentile +Aspartate +SCF +ion +Alburnus +leobergi +Azov +Tsimlyansk +benthopelagic +friar +Buckenham +frere +Austyn +Priory +pilgrim +Compostela +maidens +Chaucer +Jacobus +virginis +Arundel +Betrice +Roxburghe +Lyvys +Horstmann +Bokenams +Legenden +Monasticon +Anglicanum +Frere +answerynge +shewith +lyneal +lordis +honore +fro +MCCXLVIII +Abbotsford +Khote +Sikkey +Partho +Krishan +Atul +Agnihotri +Ayub +truckers +underworld +rob +getaway +wily +Vanoli +Homo +Pluto +spacecrafts +Soils +Hawalli +FFA +Campbelltown +Ante +runaway +symbolise +Dharawal +Athlone +Grampound +Cashel +Rickford +Komorze +Marjolein +Shamiz +Sanandaj +fuscosuturalis +Speculation +Fireplace +Bemidji +Moustapha +gutsiest +craziest +adventurer +inconceivable +Adrenaline +Bodhi +Falsetto +Overton +Chenault +Woodburn +Lewman +Wenona +Omnium +Sheepshead +hurdle +Triple +Filly +Haze +Kenn +Isacoff +Munro +meaner +tertian +constructionist +chordal +Feuilles +Elektronische +vom +Freitag +Licht +Karlheinz +contexts +bebop +sonority +subtonic +Gleebeek +McCartney +Gretty +Kington +Escheator +Nitheway +Roses +attainted +escheator +Clearly +esteem +incapacitated +Juyn +Notwithstanding +firmness +sages +furtherance +Jewe +Wivelscombe +Wydecombe +Molyns +Glanville +Critchell +chantry +Woolavington +monogram +Jaws +Enright +Rin +Darryl +Filmmuseum +Chartre +Ellipsoptera +puritan +katadiktya +downward +diktyon +picarella +Dowager +Weichs +Henny +Porten +Asta +Sadie +tightrope +Zwei +expressionist +Richthofen +misreported +confuses +graphemes +impairments +auditory +graphemic +allographic +Dyscravia +dyslexia +voiceless +Sapporo +Maputo +Countries +optimally +Ectothiorhodospira +halophilic +obligately +autotrophic +NaCl +Proteobacteria +phototrophic +halophile +betaine +hypersaline +crystallizer +sulfide +oxidized +citrate +photoautotrophic +proteome +KCl +microanalysis +PYP +Oboes +flared +vibrate +Sprightly +Playford +Inferior +nominally +scrape +Skilled +Subtle +Novice +bassoon +Arundo +adjusts +Slight +Plastic +haut +bois +joints +Circumstantial +Philidor +Hotteterre +boxwood +Denner +Stanesby +Jr +concertos +spurious +Akademiemodel +Hajek +Golde +reedier +harmonics +expressively +Philharmoniker +violetwood +Ebony +socket +Gillet +thumbplate +taille +Delius +Holst +heckelphones +heckelphone +piccolo +sopranino +bombarde +piffero +ciaramella +xirimia +zampogna +contra +Sartin +Faustus +Shorland +Primeaval +coloristic +Garvin +Coltrane +Yusef +Lateef +Nucleus +Indie +Sufjan +underscore +poignant +Ennio +Basil +Poledouris +Mbarga +Chartiers +Crafton +Statewide +Fayette +Seventy +Hazelwood +Brookline +fists +incite +Jani +Niilo +Friman +telenovela +Televisa +IMDb +Eucereon +balium +printmaker +Engelschman +Frans +Amelberghe +vanden +Academie +Maeght +Bruges +Brugse +Vrije +jay +pomp +Filips +Marissal +Alexian +mortuary +galantes +disporting +amorously +commedia +Tillemans +gentry +Southwick +Hascombe +Azimuthbjerg +Sondre +Mapping +Pytalovo +Mitau +swaps +Brooklandville +Goucher +Lutherville +Bosley +Merrymans +Ladew +Topiary +Towsontown +turnpikes +Knoebel +macadam +disjoint +Ryongwang +Ryonbong +wrongdoing +semis +Preliminary +Hardwick +Emblem +bolting +mash +Allstate +AMA +BSB +Bubbling +Dawa +Emad +Dhia +Nouri +unheralded +Ativan +awardees +Jacklyn +FPJ +Agila +Maynila +bested +Chito +Itanong +Sa +Buwan +Oculomancy +scrying +diviner +gazes +Minto +Formosa +Neustadt +Macintosh +polluted +leak +flashlight +plugging +riddles +Solvers +comprehension +Savignano +Allgame +Abandonia +Underdogs +Baskets +Lassen +OVC +forgoing +Townsville +Illawarra +Murrow +Quill +Fijian +Keyte +Mullany +inextricably +peinture +Covent +embezzled +hierarchic +Vertue +carryd +stilted +arte +Zoffany +Canons +Milner +Moser +Pall +Gainsborough +medallist +Yeo +Perspective +Neoclassicism +engravers +Girouard +eked +maximise +Priestley +cabinetmakers +Chippendale +Linnell +Zucchi +Farington +Veal +Slaughter +Honest +HaMillion +Armoza +Nahibi +Basirat +aspirant +Progressives +GuideWell +Pathway +Guidewell +GetInsured +marketplaces +nonmotile +NCH +ATCC +CCUG +CIP +DSM +JCM +VKM +Yongpyong +Elviss +Topvolley +LaFolier +cannery +Varden +char +Float +Densetsu +Wairudo +Geo +Toji +Sakata +Tsugumi +Sendo +Xiangfei +Ryuji +Sakazaki +Desperation +Attacks +flashing +counterattacks +taunt +unblockable +renames +Giza +Mou +Kokoro +Yuretarishinaide +Sekaijuu +Doko +wo +Sagashitemo +photobook +Kitahara +metallurgical +Mankato +Prawle +Vilhelmine +Herlich +Harald +Ilsted +etching +Frie +Udstilling +Vinding +childbirth +Julenat +Ollerup +silverware +Draftsmanship +Kunstindustriskolen +Kunstneres +Thorvaldsen +Et +barselskone +Oberleutnant +Schultze +MWM +RS +PG +VV +Calabria +Inishull +Lightship +Botney +sortie +Jarlinn +biologically +Grape +DeWeese +Davie +Shafiq +Kagimu +Zebra +UPL +GraphicsAudio +Drowned +Raccoon +Groot +Steal +Ant +Demon +Dieng +subdistricts +Chowdhury +workload +Monongahela +KDKA +Zworykin +Chalfant +Braddock +megapixel +CMOS +geotagged +LTE +iterations +Marital +Workplace +Tendency +Platyptilia +spicula +Townships +mascots +Rajasthan +betting +Intex +Raina +Saurashtra +TYKA +Oxigen +Astral +Kansai +Nerolac +Sanghi +Jio +Ateneo +Jocks +Courtside +courtside +Maharlika +Pilipinas +Alzate +Brianza +Snoderly +Dodd +Eimon +codification +Naifeh +Positioned +IPS +GASB +disseminates +CMFO +EOA +Benchmarking +Ago +Chappell +KT +Cullum +Cooking +Vinyl +Edsel +Dermot +Abertay +earMUSIC +Prahova +immunology +Eadie +Gorczyca +candidating +travertine +orients +bequeaths +Noguchi +Bicentennial +Riepma +Haggerty +Geometric +Lyrical +Minimalism +Charged +Claes +Coosje +Lyman +Elie +revelry +curatorial +losy +wybranych +powiatach +okupowanej +Polski +Grabowski +draconian +gossip +Borkowicz +Rzeczpospolita +tendentiously +conscripted +extermination +deconstructing +DESTRUCTION +falsehood +accuses +Filomena +suing +Engelking +defaming +ballistic +autonomously +homing +Refer +ICBMs +schematic +hairpin +yaw +Hydraulic +actuation +evading +locale +Saturdays +GlobalTV +Zriek +retooling +Jennings +Gauthier +reacquaints +Fain +neglects +Trying +scampers +Forging +hesitates +blackmails +Driving +reminisce +unsettled +inquiring +instigating +misappropriated +mourns +bachelorette +Freaked +Upset +Deducing +foreshadows +unopened +unheard +toughing +chandelier +pierces +restates +reassure +lawman +harpooned +darkens +evades +inflict +savagely +worrying +slams +incinerator +cellphone +shotguns +arrow +shears +prompts +Lillis +Attempting +RTM +EQAL +Beckett +digitize +Krell +Trzecia +Olsztyn +nonvenomous +peninsular +Bangka +Lingga +Riau +crepuscular +Hatchlings +stripes +postocular +shivers +reptile +Stull +curtus +Pauwels +Aubertin +picturale +vol +monochromes +nails +Documenta +Barristers +rustling +QC +Puisne +Apex +Olwyn +Corinda +Toowong +proxies +eroge +aristocrat +Samanids +Amirak +dehqan +spahbed +Khosrau +Pishdadian +Muhtajid +Chaghani +Imad +Damghan +pardoned +Mamari +invite +Mansuri +Mannington +Parliamentarian +Purge +Yarmouth +Spelman +Furendo +Uchimura +Nitobe +Quakerism +transliterate +Mongolodiscus +Dzhagdy +Egyngolskaya +obtusa +Agnostida +headshield +tailshield +Yukoniidae +glabella +palpebral +lobes +librigenae +pygidium +rhachis +Crocodiles +Krokodillen +Alfons +FOBISIA +educates +enrols +IGCSEs +HCMC +Voskresenskoye +Kameshkovsky +Harstad +onshore +Loudermilk +Delmore +mandolin +Punic +Beylical +beylical +Dunant +Mahakoshal +Berar +giganteus +Haydarli +Aclytia +klagesi +Rothschild +Palette +Brush +Delectables +imaginative +Messthetics +Fugazi +clicked +MacKaye +eponymously +Anthropocosmic +Punk +Cellnet +Telikom +Lae +Bismarck +Samoan +Tuinaimato +Sogeram +Idrissa +TSV +Vitaly +Saransk +Zambia +Mabi +Mputu +Chokri +Koko +Arbouretum +Heumann +Thrill +Tazewell +NorAm +Eligo +Ambit +Bounce +Gexa +Cirro +Kona +deregulated +Blackstone +Kohlberg +Kravis +Hellman +Vectren +metered +restate +risque +Carlen +Philology +Lengua +Gallega +Lingua +Festa +palabra +Galego +nosa +Valdeleite +Cifras +inadequacy +Letseng +Nqechane +Hololo +Extraction +Maluti +Firestone +Plc +Kopane +Lucara +Mothae +Namakwa +Geology +kimberline +kimberlite +Explosives +Artisanal +bituminous +Charmoille +Doubs +OC +CQ +Fentock +mon +ma +Gilles +Charlebois +Yvon +fois +soleil +Tapis +showbusiness +Voix +vie +AV +SOCAN +occupancy +Decades +Rust +Demographic +Forecasting +capitalizing +lender +Mortgage +vacancies +landlords +Landlords +foreclosed +stamps +HRCPC +warehouses +publicize +Salvadorans +Jointly +OJJDP +TDPRS +Nyelene +ESL +Citizenship +teens +TAKS +Voz +Nestor +conglomeration +jaunty +incongruous +Suro +Lomax +Discroll +Dug +intersections +unimpeded +Safer +Lantern +ro +Occupancy +Peg +planner +distrustful +demographer +undercount +Charities +Gessner +Storefront +storefront +Immigrant +Superneighborhoood +TRIAD +Renwick +Annex +Vallbona +offenders +Huffman +hamper +dilute +Nicaragua +Suite +METRORail +Scarbrough +Christof +Spieler +Locally +Preferred +ADOC +Pollo +Campero +Banagricola +BancoSal +HSBC +appliance +spiced +cucumbers +popsicles +Woodlands +Farris +Builders +Oriana +eases +Mart +Shilcutt +microcosm +BBVA +Maricela +Rookin +Asakura +intimidate +Spaces +conformity +quaaludes +rapists +patrolling +Lanier +revitalization +barricades +Pocos +Pero +Nguyen +Salvatrucha +Tammy +Clergy +Chateaux +graffiti +philanthropy +admittance +Amigos +Por +Accelerated +KIPP +Robindell +Abbe +Rebuild +vacate +fliers +truancy +ShowMax +Bombardment +McRae +Carrollton +Donaldsonville +Thibodeauxville +Aboard +Bisland +Simsport +Cotile +Assaults +Iberia +veteranized +furlough +Rapides +Plantation +Retreat +Morganza +Mansura +Sailed +Halltown +Berryville +Winchester +Ordered +selves +pretense +thefts +outrages +pitied +satisfying +consciences +distressing +Jabez +Twofold +Harlow +canonic +curving +Kandinsky +Glyndebourne +Commissioned +Eyebeam +Hub +notabilis +gladiformis +decumbent +oblanceolate +Midvein +globular +Pods +papery +Seeds +funicle +Monkwick +Afrin +Kafr +Safra +Darmashkanli +Kushner +Fombrun +whiteboard +Edmundo +Aguilar +LXIII +Provision +Sanitation +Drinking +LX +Harassment +Journalists +Investigative +Generated +subsecretary +LII +Guadalupe +Quesnelia +testudo +ecoregion +Serra +rosy +bracts +Justices +Camps +Cottages +wits +Sanitorium +Trudeau +chalet +Ledge +rustic +Pinebrook +Adirondacks +Placid +Ardsley +Aeolochroma +rhodochlora +Geometridae +Goldfinch +Hambling +EW +Watton +Wyer +Hornsby +Bilham +millwrights +Windmills +winded +clasp +wallower +FAA +taxiways +utilitarian +plywood +Kitzes +Gersdorf +Zwickau +Gerdorf +Squash +Mamun +Nusrat +Imrose +Shuvoo +Lyricist +Mehedi +uncivilized +Kobir +Zahid +Mehadi +kamal +Priyo +upfront +Covina +Vasquez +Tabasco +condiment +Katims +sagging +waitressing +ketchup +insatiably +Secrecy +antagonists +kid +renegade +haunts +Cruces +gleaned +goons +coerced +grips +professes +peering +handpicked +Into +Shall +shockingly +Plec +Amblin +Bender +Jeanine +Cowles +OEC +Org +Converge +Redondo +ANZO +Gallop +dashing +reverses +Megray +unsteady +caracole +breastplate +bodyguards +tenuous +earls +Airlie +royalists +busied +Huntly +doubted +guerilla +Excluded +grief +Marquessate +dignities +Steinar +Polliot +Salmaniyeh +Jiroft +Wagait +Delissaville +Lissa +Delissa +shires +secretariats +Badulla +Moneragala +Dupont +NAACP +JACL +Tsui +Chennault +seminars +NO +assembles +firsthand +lunches +Interns +placements +Parcel +AXA +BAC +Fellowships +Kalmyks +Kalmyk +reclassifies +Suku +Gebfuka +anthropological +Ambon +Moluccas +Republik +Namrole +inflow +ethnogenesis +Kayeli +Lexical +Ambonese +creolized +Remnants +Opo +Hebe +Snulat +Wainibe +Eucalyptus +tuna +decor +machete +Mw +Hokuriku +JST +Oki +jishin +Honshu +Oligocene +tectonics +Amur +Okhotsk +blackout +Tsuruoka +Shindo +Octavius +Winhall +Perpetual +Horsham +Horsford +Akaroa +glaciolacustrine +silts +rhythmites +varve +Sedimentation +matchdays +ferrarii +fiends +subshrub +Lepasalu +unrecognized +Iva +remand +Alija +televised +banovina +Mostar +democratically +acceding +Largely +Milenko +Radovan +demarcation +Dobroslav +Drina +Bosniaks +Clashes +mediators +Thorvald +Stoltenberg +Jadranko +accords +Srpska +Unrecognized +Leijac +Tomy +Tutor +intermission +scrolls +Epoch +Astro +overlays +Ahl +intro +inputting +Shori +Skramble +Whirlybird +glycosylamines +anomeric +glycosidic +purine +phosphorylated +kinases +Nucleotides +novo +nucleotidases +lumen +nucleosidases +nucleobases +dideoxynucleotides +hydroxyl +polymerases +Orissa +Whistler +busts +Rothafel +Damrosch +Burleigh +Boothbay +Mataram +Visi +Jogja +GrandPrize +SICAF +Nioda +juries +Noida +Kemenkumham +Jusuf +Kalla +SMPN +Magelang +highschool +SMAN +STMIK +Amikom +Koneswarar +Kudavasal +Tiruvarur +kumbha +pralaya +Kumbakonam +abode +snatched +anthill +Linga +Lingam +nayanmars +rebirth +Kochengkannano +saivism +Sambandar +Arunagirinathar +Paadal +Petra +glorified +Tevaram +Nayanar +hydrophones +SFR +Radoje +misappropriation +Syndicates +juntas +Lech +Ranko +Obrenovac +Nineteen +Dobrica +Mihailo +Stane +Dolanc +Censorship +swath +Boles +Extracurricular +Ringerike +Buskerud +Begna +Begnadalen +Urula +Ganesan +exacts +makeover +loosen +conspire +incurable +Babu +Thiruvengadam +lust +schoolgirl +immolates +sprout +Indrakumar +stings +swell +secluded +ayurvedic +physiotherapy +freak +sceptical +Nanban +choreographer +Baahubali +choereographer +Yuen +Anl +VFX +Muthuraj +Davina +Christien +Dominie +Thendral +Vogt +Azhagan +Aanazhagan +finalised +Deepika +Prabhu +thespian +Jiiva +Ojas +Siddiq +aspires +authentically +Sattru +Seabeach +Panjin +Sixty +Pollachi +bulky +Sreedhar +Pillai +Patchwork +Sushma +Cine +Stills +labelling +cyborg +Prajakta +Hebbar +IBNLive +Nivedita +Mishra +indigenously +overdo +ignite +magnum +opus +teasers +Behindwoods +impactful +rewinding +Bigg +Jaya +Subhash +Jha +imaginable +flamboyance +balances +gimmickry +Komal +Nahta +celluloid +Rajeev +Masand +IBN +visionary +mesmerisingly +spectacle +Rediff +greed +terrific +exhausting +shoddy +sketched +Haricharan +Sify +Barring +hummable +AR +Oneindia +Gautaman +Hunchback +Deepanjana +stupid +regressive +Scheib +zippy +hefty +Chiyaan +bravura +flatly +overdone +Saltz +exuberant +unselfconscious +cartoonish +onslaught +giggling +crores +Urugusu +Matter +Button +Miscellany +Tacolneston +Seventies +Roundabout +rollout +Discrimination +Cowley +bureaux +kinetoplastid +flagellate +trypano +soma +antiquua +heteroxenous +Poinar +Zoonotic +Hymenaea +protera +Burdigalian +Toca +Schizotrypanum +hematophagous +Chagas +norm +Lutherans +corpora +Philippicum +Ecumenical +Athanasian +Schism +Catechisms +Primacy +pacification +superstition +unaltered +addenda +confessors +implicitly +erring +rostered +unconditionally +normata +normed +normans +norming +authoritatively +Jacobs +Symbolical +Friederich +Bekenntnisschriften +Jaroslav +Piepkorn +Kolb +Wengert +translators +Triglotta +uncontested +Retiring +Rendered +Nantou +Nei +Lingding +Lantau +LAGOS +Ayodeji +Saibu +Adebayo +Okada +DPP +Oba +Akran +Fatinikun +gutting +HVAC +STEM +GreatSchools +AE +extracurriculars +WGBH +BioBuilders +reprogram +Envirothon +Mock +MCAS +DCL +environmentalism +Recycling +Dunraven +woollen +articulo +nerves +twitch +rigot +inquest +Unionist +Escuadrilla +Harka +Malvaloca +Deliciosamente +tontos +leona +Castilla +muchachita +Saura +Caza +Peppermint +bosque +lobo +Patrimonio +Bearn +barro +dodge +Frisbee +Scavenger +landholder +Leightonstone +ploughlands +danegeld +Danes +allotments +Huntingdon +Anarmodia +remotalis +Dognin +Stokely +Doorley +mariner +Ladislaus +unsymmetrical +Prislin +Sisak +Dolac +serfs +Gornji +Grad +Ribnjak +Centar +biosphere +archea +millilitre +hydrothermal +vents +extremophile +sulphide +bubonic +yogurt +Schizomycetes +latinisation +hyperthermophile +alphaproteobacterial +Eukarya +unaided +Thiomargarita +namibiensis +Epulopiscium +fishelsoni +ultramicrobacteria +bacilli +Neisseria +diploids +Streptococcus +aggregations +microbial +mats +protists +Biofilms +implanted +compartmentalize +cytoskeleton +localisation +chlorosomes +nucleoid +intensities +teichoic +lipid +lipopolysaccharides +vancomycin +Haemophilus +influenzae +Mycobacteria +arrayed +Campylobacter +Pili +Glycocalyx +disorganised +slime +polymeric +macrophages +periplasm +multilayer +detectable +anthracis +puncture +tetani +photosynthesis +redox +cyanobacteria +methanotrophic +secreting +ethanol +Facultative +acceptors +optimal +Budding +cheaply +logarithmic +metabolised +antioxidant +Carsonella +Sorangium +Borrelia +Vibrio +Mutations +Mutation +exogenous +reorient +periplasmic +motility +pilus +Motile +myxobacteria +Shigella +polymerisation +bioluminescence +communal +optimising +differentiating +excreting +autoinducers +pheromones +morphologies +Bacteriology +Eubacteria +Eukaryotes +mycobacteria +sputum +Diagnostics +metabolically +mutualism +commensal +warmth +Myxococcus +Vampirovibrio +saprophages +entrap +interspecies +butyric +propionic +methanogenic +accumulates +rhizosphere +absorbable +synthesise +folic +fermenting +undigestible +probiotic +Pathogenic +tetanus +foodborne +leprosy +Micobacterium +Helicobacter +pylori +peptic +ulcer +blight +wilts +salmonella +meningitis +vasodilation +Rickettsia +Burkholderia +opportunistic +immunosuppression +bacteriocidal +bacteriostatic +chloramphenicol +antiseptic +sterilising +indwelling +Disinfectants +Lactobacillus +yeasts +fermented +soy +digesting +hydrocarbons +Fertiliser +Exxon +bioremediation +enantiomerically +thuringiensis +Subspecies +Dipel +pollinators +Escherichia +coli +biotechnology +bioengineer +microscopist +Antonie +Leeuwenhoek +hiatuses +Bacterium +Treponema +spirochaete +Woese +Pommerville +Saltine +Salted +Unsalted +Crackers +Occasionally +Boxes +Aude +Fa +Poetics +validation +construes +polyvalence +monovalency +aloud +triviality +reductionism +irreplaceable +Wiggle +OpenVG +Ablonais +Concours +fleuris +Orly +Finances +Publiques +Essonne +Sorel +revocation +cropping +loamy +fesse +sable +escarbuncle +fructed +Successive +Brassens +Excel +Dormant +Bac +boredom +Newspapers +Injured +Stampede +Checkout +Receives +Sentance +Bribe +Willamson +Compete +Snopes +virial +Appoint +Forgot +Rewrite +AdSense +halved +parametrization +cmf +Gumbel +frameworks +Mirror +Wallowa +Sargeist +Behexen +Grievantee +Warmoon +Quorthon +Bathory +Impaled +Kohti +Nousua +uncommonly +Vihassa +Ja +Oi +Kallis +Kotimaa +Musta +Gurki +Malek +Arsanjan +CNCO +BTS +Chvrches +Marshmello +Kygo +musics +Nicky +GIF +caption +RIX +soaring +Vaughtrey +Copsey +bpm +criminally +underappreciated +unadulterated +Aoife +Bustle +Cedergren +Popjustice +Adejobi +trademarks +McFadden +vets +Straits +Egan +imperfections +alloted +Troubled +Trending +Heatseeker +QQ +playlist +Shoot +voicemail +Goldene +Kamera +Kinsella +Poirot +Gilmor +Bekeshevo +Bekeshevsky +Baymaksky +Product +Accessibility +Corporations +EverybodyOnline +giveaway +Trapeze +Switch +TalkTalk +Jovita +pots +Puodas +Dvi +Dubuo +Moterys +Details +molding +stoneware +angelas +Moteris +metas +Mama +vaiku +prijuoste +papais +Republicains +Emmanuelle +Gaullist +Gaulle +orientations +Bonapartism +ethic +voluntarism +MEP +Bourlanges +Berlusconi +dirigisme +Fadela +Jouyet +Hirsch +CEOs +unease +leeway +centralization +Hollande +Fillon +Devedjian +Brice +Accoyer +reemerged +Nadine +Morano +Peltier +hectare +Viale +Oberdan +Celtis +Fagus +Ilex +Pinus +Staphylea +Taxus +exotics +Ginkgo +Gleditschia +Liriodendron +Parrotia +Sequoiadendron +Asplenium +Athyrium +Phyllitis +Aceras +Aquilegia +Arctostaphylos +Aubrieta +Biarum +Cymbalaria +Drypis +Edraianthus +Ferula +Festuca +Gagea +Galium +Leontopodium +Ranunculus +Saponaria +Saxifraga +Sideritis +greenhouses +succulents +epiphytes +Cinnamomum +Kalanchoe +Nepenthes +Stanhopea +noncontiguous +Distressed +commonwealth +Disjunct +Tarpon +Colee +Hammock +Calusa +Cooley +robbing +riddled +critiques +Everglades +desegregated +spurring +Pembroke +shrank +Tamarac +corrosive +Recognition +Winters +Sensible +flurries +hurricanes +Unspecified +Trinidadian +Subsaharan +Speakers +gays +Stonewall +Manors +SunTrust +SouthTrust +foreclosures +foreclosure +Citrix +Commcare +DHL +Beverage +Tenet +FirstService +WalletHub +snowbirds +tens +Elbo +picnicking +Terramar +IMAX +architecturally +Swap +megachurch +mega +Orsdel +Seabreeze +Siegel +riverside +Bugsy +Owls +Catie +Seiler +Gateway +Sunland +Ashe +BCT +Metrobus +Dade +shortfall +JetBlue +airfares +Bahama +Sawgrass +paramedic +hyperbaric +HealthGrades +accomplice +saddler +sabre +scars +Sandhurst +bailed +hooting +singed +Escaping +Pentridge +Suspicion +Glenmore +squatted +Schwerdt +Convict +arbovirus +Alphavirus +gelidus +racehorses +Aedes +aerosols +edema +lymph +neutralizing +Ellyson +Harriman +excursions +recognisably +perils +SSRN +harkened +Ethelbert +stairway +flickering +Keds +Tufeld +disembodied +werewolf +leprechaun +lobster +Jory +alchemist +hydrodynamic +wreaking +havoc +Sharkey +gaslighting +Mister +Riddle +Handed +Lennie +GNP +Crescendo +intercom +Revelation +Madman +Kirkham +annulling +Laval +Translators +Interpreters +Glassco +QWF +Masiyiwa +Tokwe +Mukosi +Ebola +Trusts +Chronic +merit +epidemics +empowering +Undersecretary +Integral +Senokos +Scraping +Thirsty +FDL +Thirlwell +Hell +Finely +Honed +Kai +Wachs +Paderborn +Rolfe +slyly +bandana +pranking +bedlam +soapbox +overdue +speechless +dartboard +drunks +playfully +berates +stakeout +reveled +shoved +deckers +ducking +foils +humiliates +Sorbet +demoralized +apologies +bailing +sabotaging +reenactment +jousting +swipes +disarming +reassures +Reenactment +goads +exonerates +overpowers +goodbyes +prankster +Gaspard +Augustin +veterinary +immoderately +Ancien +Tuileries +Saumur +denatures +gentlest +Stofflet +Charette +Sapinaud +Rasihia +Ammann +Sieglinde +pentathlon +Vardinogiannis +Alam +Shawish +GDF +Ghazalat +TransGlobe +Provins +FRCPSG +FRCPE +pharmacologist +neurotransmissions +Poisons +standardly +Renfrewshire +Hyndland +Drymen +Toxicology +Cwm +Llanthony +Brecon +Beacons +Brough +Gobindopur +Tinsukia +Necklace +marnierana +houseplant +Kumail +Jenny +burger +goop +complicating +concocted +remover +dentist +adhesives +Yap +Novocain +peek +Same +Kristen +Schaal +ender +townspeople +cram +Screenrant +Altogether +TheWrap +Meftah +Blida +pent +fisherman +Brinchmann +Cala +Reise +rike +Wihbourg +morgen +drar +Verdens +cookery +Veien +consequential +Jiannan +Weizhe +Zemin +Gong +Nong +Bing +admissions +Zhao +floated +SOE +Oligarchs +Rongji +revalue +renminbi +rungs +liberalization +openness +capitalist +inadequacies +Triffin +diversification +regrettable +bancor +Bretton +astray +reappointment +Jinping +burnish +wean +embracing +pecking +Bruderer +Guatemalan +epiphorellus +Coelopogon +lichenized +Parmeliaceae +marketers +pork +Corrales +Expansion +Manuelitas +Pendaries +Mora +Troll +Expeditions +Coring +Oakenfull +Rickardo +Ways +cameos +Fagen +oxeye +jacks +tital +ops +seagrass +handlines +NRE +switcher +genset +Shensi +Longer +Gayelord +Stopped +Eboli +Guareschi +poached +Pellegrini +Catcher +Flannery +Galassi +Mandel +Chinski +Mitzi +Ileene +Crichton +colophon +Madeleine +Steig +Sachar +Roald +Chau +Tsai +Kok +UW +Fieldhouse +Naser +expenditures +Mozzaffar +encouragements +Arabians +Akkas +Bashi +concessions +Majles +Masumeh +Garter +Pahlavi +Mossadeq +exogenously +Keynesian +Dichotomous +slowdown +financialization +accrue +OECD +Has +Conceptualization +Audion +repeater +Transatlantic +Brock +triode +Rates +profited +nationalization +divestiture +confine +Telstar +PROPERTY +NOT +Telephones +fax +cordless +modems +dialup +Guess +divested +Lucent +reorganizations +Alascom +eying +wager +MediaOne +Metromedia +paging +Whitacre +Telesis +disapproved +RBOCs +RBOC +ticker +Interbrand +reassumed +Interwise +conferencing +Aloha +quell +FLO +Regulators +Lesbian +Defamation +Consumers +consolidating +Yahoo +eBay +hypocritical +divulging +fiberoptic +Iusacell +Ajit +Pai +DVR +Insider +Makan +Delrahim +Trademark +Vrio +Macaulay +Soho +stereo +OPIN +recut +rocksteady +Sings +Cantrell +Donny +Osmond +Boaventura +Shallow +Delivering +Kisbarnak +kisbarnaki +Ludovica +Eucharistic +Akademia +Troop +Banathy +Stag +Hungarista +Exteris +forestalled +emissary +Pest +trustworthy +Panzerfaust +trussed +brazenly +unneeded +Veesenmayer +abrogated +Veres +Dalnoki +alliegience +safekeeping +Bullion +Depository +expropriation +Prisoners +Foucarville +POW +Teleki +Harcosok +discouraging +deprivations +Knightly +nullifying +Vladimirovich +Vassilyevna +referent +blocs +CENTO +rapprochement +Extraordinary +Acquisition +degenerated +Khomeini +passport +Kuzichkin +traitor +panicked +Kabul +Keshtmand +submits +Leonov +Prilukovym +immortalized +Vadim +Bakatin +Darreh +Posht +Eslamabad +Sangar +Rasht +Gilan +Handley +Voets +medalled +Cher +Cedarhurst +Valhalla +Kithcart +AOR +Mardones +Anniela +Hitworks +SukkerChok +Pebe +Ashlie +Toyoko +Casiopea +CASIOPEA +Rinaldi +Picked +Vibe +languidly +lubricious +retrotransposon +telomeres +hepatitis +Temin +tumour +murine +leukemia +Renato +replicated +mistaking +synthesizing +polyadenylated +immunodeficiency +annealed +unwound +duplex +polynucleotide +unwinding +degrades +retrotransposons +Telomerase +Valerian +Dolja +retroviral +RNase +synthesizes +replicates +transcribes +proviral +proofreading +proofread +Promega +AMV +unannotated +retrovirus +zidovudine +lamivudine +tenofovir +nevirapine +sidestepping +Sectarian +Insurgency +crackdown +Mujahideen +Shura +Qaeda +Anbar +murderers +Began +Baqouba +ISF +Salah +detaining +IEDs +IED +liters +nitric +EFP +Khalis +Tigris +havens +Jabour +Pak +Belts +Muqtada +Jaysh +Yusufiyah +Owesat +Babil +Loy +KTLE +KTKL +announcers +Erockalypze +Zac +Romeo +Flo +Plies +KYLZ +wattage +HAAT +Vivint +airstaff +TJ +Modeling +airdate +Saxone +severally +Carnoustie +Links +Opeth +Mikael +Tiamat +keyboarder +Westholm +Moonhorse +Roadburn +percussions +Manifest +Iwers +Bassist +Keyboarder +Drillia +longitudinally +siphonal +sinus +Mariann +physiotherapist +clot +Damallsvenskan +eventful +niggling +Mwimbi +Ameru +tarmacked +Kathwana +Ruto +assented +earmarking +Constitutuency +Japheth +Kareke +Mbiuki +patrick +george +Murugara +Kindiki +Ikuu +Muthambi +Kajiunduthi +Kirege +Magumoni +Mukuuni +Ndagani +Mzalendo +Kithure +Erastus +Njoka +Micheni +Notably +Principals +Chevreuil +Swansonville +Pittsylvania +accord +Decision +Regions +Persons +retraining +determinate +Paid +Unemployed +Integration +Twinning +Kunther +Linz +Vishelburg +Dera +Rahija +Masori +Mondrani +Habibani +Kalpar +Mareta +Politician +aethiopicum +bunches +Calvinia +Overberg +Melo +Illicit +Narcotic +Psychotropic +ammended +Merxheim +Conservatoire +Niedermeyer +Viardot +Kapellmeister +motets +Mackey +friendships +Alba +Graatz +Sackville +Eccentric +Countach +dashboard +curvaceous +targa +Gallardo +prototypes +CNBC +fiat +Confidential +ETHs +BlockTower +Options +FIX +SMARTS +stablecoin +tokens +underwritten +excessively +Coinbase +Yevgenia +Yakovlevna +Evgeniia +Iakovlevna +Lubjanca +geodetic +SAI +enactment +fr +Background +Superb +grandly +towering +sublimity +lazily +broods +fluttered +outfitting +goldfield +canoe +practicalities +amply +avidly +castigated +mainlanders +infuriated +popped +Helmcken +Musqueam +quarantine +Coquitlam +armoury +offramps +wane +Woolworths +Chilliwack +Vancouverites +rivalling +weddings +Begbie +Salient +Holbrook +Northbank +Hamley +displacing +dockside +antiques +Rarely +Walmart +Fraserview +Labatts +condo +TransLink +Added +Woodwards +townhouse +spilled +Responding +weathered +Robson +Colonist +mainstay +Sargent +Legge +Vagabond +theatricals +Heartaches +Razz +Screaming +Burlesque +stagecraft +Mushtari +Begum +Kushwaha +Barkerville +Ladner +Bradner +Pender +anvil +schoolchildren +maypole +paramedics +Boucher +Massage +conforms +riverfront +deviates +Brunette +expressway +Grandview +Derwent +Annacis +CN +shortline +SRY +midget +Lacrosse +Denman +incarnations +Hyacks +futsal +Swimmers +Bottrill +natives +Alexz +Devin +Dasli +Sangi +Baku +pretaped +Gaither +Donovan +claymation +Crista +Claus +undertones +Keegan +situational +cutout +Shaq +Morbidly +Obese +glut +Rejected +MacFarlane +Efren +Prequel +Softstar +wuxia +Separating +subalgebra +Leeson +collage +Lesson +Stack +reveries +quotidian +Zuel +chucking +sunnies +Yeprem +Pilibossian +Hrayr +Luder +Shushan +Rosy +Armen +dare +Mymensingh +Dharm +ensues +Kolkatta +Aghore +Mukhopadhyaya +alia +Dispensation +Chaudhuri +untimely +Bhubanmohan +Sasibhusan +tacit +Sammillan +Nabobidhan +Sivnath +Sammilani +Bhattacharya +Dhamua +distrusted +Fundamental +Dharma +Brahmic +clarifies +Heemraadsplein +niches +renown +Sire +Coit +Leland +scramble +Onion +Dare +Borujerd +Welwyn +Bellman +Moth +Gliding +Licence +Herts +aerobatics +apprentices +subsidised +Moths +Tallis +Combattimento +Herreweghe +Mackerras +Louvre +Acis +Tamino +Gonsalve +Concertgebouw +Vignoles +Gramophone +Stradella +Deceit +Radames +botox +rejuvenation +substandard +irregularities +Nygarts +Dinenage +congratulatory +pileate +trimitic +hyphal +hymenium +hymenial +Fries +caterpillars +Triaxomera +biotechnological +laccase +Bhumibol +Adulyadej +reckoned +scare +RVCs +Mirfield +Meltham +Saddleworth +Admin +RVC +facings +Stanhope +Memorandum +mobilise +Haldane +Reforms +billets +Boulogne +Aubers +phosgene +fruitless +Poelcappelle +Casualties +Ariska +Dukeries +Havre +Ancre +repelling +Bullecourt +Hindbenburg +Cambrai +Bapaume +Creeping +bridgehead +Vaulx +Vraucourt +waded +Selle +sappers +Solesmes +bridgeheads +Clipstone +Consisting +reorganise +Garrison +reorganised +lanyard +sergeants +Plasnewydd +Adamsdown +Wellfield +Rhath +Raath +latinised +Ratae +Bowood +Llandaff +Prichard +Wyburn +Bake +Splott +Trooping +Household +chevrons +mockumentary +fewest +Feb +Myall +sunbathing +Blueys +Boomerang +naturist +Symes +Adenosepalum +subsection +Evvoia +Andros +shady +taproot +obtuse +WOKI +Pirkle +reminders +WKVL +simulcasting +SummitMedia +Kuan +Dynasties +Kingdoms +typifying +Shu +compacted +Strumaria +hardyana +shrubland +Magpies +wrist +Winsford +BICC +Bestselling +Organizational +YA +Hypable +skater +Yamato +Amano +Ripsaw +handsaw +Loyalist +ripsaw +shekere +Scrape +Pichab +Kuh +Mareh +Khami +Basht +Kohgiluyeh +Chisel +cheeky +Deseret +superintendents +apostle +Antelope +Norma +Vilhena +Hermannstadt +Clube +Hapoel +Kiryat +Shmona +Beitar +Brownsborough +Abberline +charlatan +complicity +beamed +Korolev +pooling +parametric +reuse +Astronautics +birdwatchers +publicise +Cotinga +Vengeance +Veils +Tremor +Marios +Dalwhinnie +Lochaber +Bathymetrical +Lochs +Kingussie +Pattack +wraps +watercourses +Labhrach +Lochan +Coire +Ardair +Creag +Temeraire +geothermal +Ardverikie +Graeber +hoogiana +Tulipa +ostrowskiana +Tubergen +Sealy +Clos +veined +margined +glossy +stony +Turkestan +zenaidae +Wikispecies +Rott +Schleifer +Pathobiology +Stahl +Fonds +Chemischen +Industrie +uncultivated +biogeochemical +phytoplankton +polysaccharides +heterotrophic +Specially +integrative +ResearcherID +resemblances +sinologist +Chyen +Mu +allergic +Sinology +Couperie +bibliographies +append +Lahaymonnais +Peixotte +philological +Essai +Mots +Langues +deciphering +philology +VIIth +lexical +syllabaries +explanatory +prix +Inscriptions +sinologists +sinological +Kangxi +comparativist +Schlegel +Orientalists +monosyllabic +equated +polysyllabic +Chaldean +Assyriological +ganzhi +fared +refutation +populist +paucity +Brahmi +Kharosthi +Hezemans +Emir +Nizamin +disrepair +Garrus +Vicenia +slurs +victimized +smeared +communicated +shrugs +gestures +Forensic +Ku +Klux +Mafia +McKinnon +niggers +epithets +schoolmates +falsifying +shoplifting +sexualizing +knowingly +Neighbors +Newsday +Markovitz +mistrust +unspeakable +aroused +Sherry +multiculturalists +minimizes +indefinitely +defamation +liable +defamatory +defaulted +Johnnie +Cochran +garnished +invent +Eliot +Spitzer +Waimate +Horseback +Leopard +Locarno +Morrisey +receptionist +Halpern +blossoms +dramatised +barmaid +Maxine +Jeany +Runtown +Headies +mic +sensual +Shay +Teyana +Walu +JM +Burwood +Imprisoned +Longbottom +Dapto +Polding +Gipps +exiles +Drummoyne +Farrington +COFL +TAFL +Bucaresti +DW +Utrecht +Huish +rash +marquee +Mirren +Firhill +Academical +OTIS +Illuminati +Aiden +Bira +wishbone +inboard +tub +Godiva +Kieft +Tecalemit +DOHC +crossplane +dynamometer +pannier +Cosworth +MAE +Holbay +EMC +carburetors +Jupp +Burkhardt +participations +Exell +stalks +inflorescences +receptacles +deshisce +ellipsoidal +oblong +Deposit +Guaranty +Adnan +Khashoggi +HKDG +Tetra +Ghanim +NBTE +Alhaji +convocation +hoodlums +Igala +barricaded +vandalized +considerations +seagoers +Outbuildings +chalking +impurities +wrought +resists +galvanic +Degradation +USCG +Masonry +Appropriate +Soaking +misting +weatherproofed +gutter +caulking +decking +Addressing +Proper +lantern +Fresnel +etches +porous +trough +prism +Prisms +conservators +patina +Sankaty +Launches +payloads +Avoiding +lander +InSight +Renovations +reentry +NAVSTAR +ELINT +Edouard +Yvan +syndicates +Tintin +draughtsmanship +Foursquare +Kazemachi +Tabi +Okurimono +Auditor +Arntzen +Willmott +orientated +Apples +MOBO +Apps +Hertsmere +Bushey +Harefield +Hertswood +visualize +MCMA +Tenochtitlan +dikes +recurrent +Porfirio +hinders +Moctezuma +trapping +dispersion +stricter +Metropolitano +Calidad +Monterrey +informatics +Domestic +Metepec +Izcalli +Tlalnepantla +Baz +Degollado +Jaltenco +Escobedo +peripheric +zonas +marginales +ciudades +Tecamachalco +Molinito +Chamapa +Bosque +Zona +Esmeralda +Bosques +Lago +Marginal +Ecatepec +Chalco +Ozumba +Temascalapa +Otumba +Jilotzingo +Juchitepec +Isidro +Fabela +Axapusco +Nopaltepec +Atlautla +Ecatzingo +expressways +Anillo +Mexiquense +Venta +Arco +Norte +segundo +piso +Viaducto +Observatorio +Poniente +ejes +Nezahualcoyotl +Tren +administrations +decentralize +megalopolis +McConaghey +Liaquat +Ghousabad +Maghrib +sweeped +cordoned +Counter +JD +Pepperdine +Toscher +taxing +FTB +Specialization +Probate +CPA +Mnuchin +Vets +putrefying +Svanvik +Orust +Myocum +striving +Classes +Hulbert +Ethical +Goes +lied +Mulhearn +Pillsbury +Winthrop +Pittman +Foglietta +Sandusky +Folwell +Scull +Harman +Audrius +Barzdukas +Paykino +Velikoustyugsky +FV +Rensing +Greuther +Overman +Dede +Complutense +Isla +Generales +Vernor +renomination +subjecting +Barbier +suffused +islanders +Downes +LeBarbier +secreto +domadora +fondo +aljibe +Cuentos +Juventud +Injuria +usufruct +Quinta +materialized +Rosacruz +Lenziemill +Luggie +Blairlinn +Abellio +ScotRail +Strathclyde +Grahamston +DMU +EMU +Springburn +Dalmuir +Sumohadiwidjojo +Subud +disseminate +Susila +Budhi +MSF +Hud +Lonny +Foote +Aydin +oglu +Ibrahimov +Erling +Brustad +Folldal +Fenny +Ollivant +Gallifrey +Esher +PTC +Writersroom +CBeebies +Jetters +Moshi +Eena +Meena +Deeka +dramaNever +primetime +Shadows +collectionDoctor +Jar +Magazines +Webtoon +Chobot +Sportkanalen +Kinnevik +Vitagraph +complain +ingenue +Fireside +scullery +maids +boxoffice +Melay +Boutros +Aotearoa +caucused +contesting +Ratepayers +Mayoral +setback +Hobson +wasteful +infighting +Northey +Denise +Insane +Dorothea +Morristown +amusement +sodding +chronically +restless +oversupply +bedded +hallways +soiled +easing +crowding +infirmaries +electroconvulsive +hydrotherapy +Britton +exerts +afflictions +hydrotherapeutic +cataloging +Annexes +Dormitory +Voorhees +assimilate +wiring +redirection +landscaper +florist +behemoth +stipulation +condos +Financing +ceremonially +Cafeteria +rinks +curative +rugs +excitable +Trenton +Buttolph +asylums +gneiss +Preservationists +Northstar +memorialize +Abkhaz +Anatoly +Kulikov +Prosecutor +Dordogne +Parc +naturel +Pardulphus +ribiera +Puy +topographically +Bos +Variscan +Massif +Neoproterozoic +micaschists +Parautochthonous +Micaschist +Chantres +Pennsylvanian +leucogranite +granodiorite +Granodiorite +leptynites +flatlying +Hettangian +sandstones +Sinemurian +cryptocrystalline +bioclastic +oolithic +Bajocian +clayey +conglomeratic +Eocene +colluvial +gravelly +terrasses +Mindel +staage +mantelling +hillslopes +gelifraction +paragneisses +tourmaline +Manganese +nontronite +Neuil +pyromorphite +Vaugoubert +pensioner +Shorne +roundarm +rounders +Bendigo +Transdev +Dynastic +Philopator +Heliodorus +kingship +yearned +Syrians +destabilise +Cappadocia +Eumenes +Attalus +conspiring +watchful +Cilician +Apphus +Pergamon +Zenophanes +Berytus +Sidetes +Pamphylia +claimant +Hellenized +Philistine +besieging +Accaron +Judeans +expelling +Ashdod +looming +Parthians +Parthian +triumphal +Herakles +Bisitun +Eleutheros +sooner +Ammonius +Denied +reintegration +Hierax +despairing +benevolently +Josephus +Amanus +Diodorus +Heliades +Cretans +Alexandrian +epitomised +Cretan +Larissa +Agema +Chalcis +Mallus +Coracesium +wavered +Ascalon +Zur +regal +epiphet +Macedon +Boeotian +Konos +boldness +Luring +Sarpedon +journeyed +encroachment +detested +legitimizing +Dor +Orthosia +Molon +Timarchus +Usurper +impiety +Antiochenes +Antje +Curie +Freie +Wolfson +forecasts +predictability +decadal +Contributing +Intergovernmental +Expert +Oshkosh +Airshow +Andreaskerk +Competitions +UDN +Jago +Supergroup +Milica +Govor +Zvezde +Arlinton +Millonarios +Dalys +radiochemist +isotopes +Eastbourne +Aberystwyth +Royds +Pirret +Hitchins +atom +Fajans +Radium +Transmutation +syllabus +popularising +biplanes +imagines +surpluses +macroeconomic +bureaus +exhaustible +Elders +enslave +Hebraic +Abolish +Drown +Problem +soddyite +Winifred +Ischnodoris +sigalota +Autostichidae +plical +cloudy +invariant +Buhler +Reichstein +generality +aij +supremum +Launceston +Basslink +Alinta +Bowls +interstate +Mooney +Museums +militaria +JNET +dolomitization +Widman +Thymopoiesis +inductive +immunologically +matures +reactivity +Positive +apoptosis +Lymphoid +Progenitors +ELPs +Precursors +Molecules +ETP +subcapsular +DN +potentials +incapable +maturing +downregulation +upregulation +downregulate +Aire +deletion +deletes +Thymus +Autoimmune +DiGeorge +oncogenic +dendritic +Mature +Erinn +Zandy +Hartig +upped +Flame +Holby +Newsreaders +premiering +Margolis +Colville +Wemyss +Hauptdolomit +Rainstick +Circles +Bovine +Cuts +Exu +Nazares +Technicians +Circo +Cellar +antecedent +Commissary +hath +offal +invalids +intolerable +stench +rededication +Doynel +Dodds +Cevallos +Reelection +Carmen +Musoma +Pontefract +Dioceses +resistivity +quantization +nonzero +Got +Leno +juggling +Guernsey +Lore +CityHopper +ageing +Deliveries +Het +Financieele +Acro +BOC +drooped +Fondly +Dolphin +tendering +winglet +aerodynamic +Recruitment +Seasonal +IATA +crewed +BBJ +refreshments +Rows +legroom +quicker +fluctuates +incognito +copilot +cityhopper +Gaiety +Shakespearian +Manhunt +Ewing +Mikel +Yom +Kippur +Muki +undetected +Palmachim +Glock +Mauser +Litani +Mole +Addis +Ababa +disguises +Duvdevan +Grapes +Yussef +militant +Sharp +Delivered +Baalbek +Sheik +Hamas +IDF +Nahal +Downhill +skiers +Shirasawa +Gifu +sherwani +khara +dupatta +salwar +burqa +hijab +Jandial +Bhir +Dharmarajika +Wali +dispensary +Pind +Dadan +UET +TITE +competency +Chakwal +CISCO +ASIC +DSP +undergrad +Cantt +Masjid +automaker +Cromwell +stressing +overreached +graying +Goldwater +Scranton +Javits +forthrightly +correcting +Germond +beset +deflected +billboard +bemused +morals +Thinks +spate +anybody +Decrying +disavowed +Hoff +connotations +Manchurian +rinse +Diggers +greeting +presaged +embittered +stalking +moderates +dismay +Spiro +memorably +Flyweight +Manuwa +Moras +Aldana +Carmouche +Antonina +Kish +Pudilova +psyched +buzzer +leaped +boos +denigrating +CKE +layup +rattled +roar +iced +fatigued +Ostertag +Longley +Buechler +fadeaway +inbound +Blackhawks +Guokas +USF +Koret +SFSU +wellness +Stebbins +redshift +OASIS +SOAP +UDP +Arendal +Trygve +Stray +Wincenty +Wronki +Muzyczne +kompozycji +Gentile +darkness +unassisted +categorisations +entrapment +finning +flutter +buoyancy +hooking +snagging +grapnel +anchorline +DSMB +ascents +reels +popularised +eschews +memorising +disorientation +shallower +Scuba +Instructors +TDI +ANDI +potentialities +ditigal +wrecksite +thrive +admiralty +Internationally +Unesco +Revenge +Assets +Terms +NHRA +SAHRA +contravening +Excise +Inspectors +interfered +Recife +Panhandle +navigational +Shoals +foundered +silhouette +Scilly +Lying +Frederiksted +Subic +Coolidge +Inket +Scapa +Flow +popularisation +Trimix +Lermontov +Corveta +Ipiranga +Chemel +Shelton +plenitude +Oakham +Missioner +Amicus +Rigby +Islamists +canonical +shire +Raleigh +Yeomen +Longs +Packington +gilt +contrivances +disinherited +Renfrew +Alphonsa +comarca +Osona +Olinda +Mudd +Expository +Mimno +IEEE +Areospace +Maulvi +Khasias +Mandi +baptisms +Fr +Burmacherra +paish +Voorde +Parishes +Mugaipar +oblates +Immaculate +Lokhipur +Jaflong +Borolekha +Srimongol +hospitable +Ngom +Priso +sprinting +governmentality +Gauche +unfolded +Massacring +imploded +Invited +Defert +juridical +Nietzsche +Burntisland +Arbroath +Broughty +duplicating +repay +nearer +regretted +deviate +Assent +reinstating +levelling +LMS +Manuscripts +Gibbula +loculosa +feeble +postnuclear +sublamellar +intercalated +keels +retractively +angulated +supraperipheral +stout +parietal +False +Sammo +Huang +offends +Feeling +shaky +gunpoint +saddened +riddling +Calmly +Chan +Jacky +Sheng +Shou +Gorka +Patricio +Rona +Arenas +Getxo +Aragon +Bubi +sentimental +Asghar +TWAS +Najafi +Maclovio +Abbotts +precept +evaluates +Langport +cemeteries +Carantoc +Dindraithou +hillfort +byrig +Gordano +Limebreach +Dobunni +Deposits +Archaeologist +Cunliffe +Claudius +Gothicus +Clevedon +kururt +Baltiysk +Lastochka +Nordbahnhof +Neukuhren +Warnicken +equipments +Ataul +RG +Optics +Shatma +Patharia +Chotolekha +Faujdarhat +Shamoeeki +Patrika +Biborthon +Bangladesher +Shera +Bigyani +Medhabi +Manusher +Golpo +cite +Costs +Heenan +Alarming +Challenges +Sarakhampittayakhom +Sarakham +Amphoe +Mathayom +Positaram +coeducational +Macrobrachium +agwi +Cooch +Examination +Deeping +Prenton +Woking +Pontones +Sistema +Artes +Psychopathology +INBAL +Frida +Rom +Legionnaire +Gale +Kinavesa +Gwendolyn +Juhas +Slash +Rambo +Stallone +Intrusion +Kinevesa +Tornado +Margheriti +Giancarlo +Prete +Ferdinando +Baldi +Warbus +Damn +Doomsdayer +Sarna +Transnational +Gallegos +Marcos +Obero +vacillated +marginality +visitng +neoliberal +firestorm +authoritarianism +Triepeolus +verbesinae +subelliptical +basally +Coloration +venter +custard +premontane +chytrid +Batrachochytrium +dendrobatidis +Chagres +Keihin +Akakusa +Yokosuka +Addicted +Cleaners +booked +Customers +fridge +chargeable +iPlayer +Farringdon +Londoners +Housekeepers +centralises +cleans +Smarta +Impressive +sitcoms +Eunhyuk +Shindong +Chuncheon +USO +panelists +Tak +partaking +Lamble +Virbia +xanthopleura +Lokomotivfabrik +StEG +luggage +compartment +Pippins +Starkel +Melter +Netzel +Rader +Suds +Mensor +Ritner +Doclean +Dukljanska +akademija +nauka +Dioclitiana +Scientiarum +CANU +dominantly +Gambler +Newfield +Margia +Aileen +moneymakers +Gritzko +Tamara +Vidor +Roster +Har +symphonic +LeAnn +Gladys +Frederica +Idina +Intimate +masterclasses +Quartets +Composers +headlining +LaKisha +arias +Chorus +Celena +Sishel +Babidge +Movies +Mythic +Connection +Songbook +Tappara +Halmay +Daniels +Dundonians +Maidens +Strathmartine +Boswells +Templeton +Dundonian +McCluskey +tenement +esteemed +Witbread +Respect +Galloway +Masalski +Mykolas +Juozapas +voivode +Mscislaw +Convocation +chipset +Programmers +diagnosing +coprocessor +compatibles +SMM +emulation +ACIA +RESET +SUN +gamers +peripherals +Pressing +screenshots +rackmount +DISCiPLE +blanking +vblank +Shui +Taijitu +Fencing +hypophyseal +pars +infundibulum +pituitary +carotid +anthropogenic +glaciology +glacials +methanotrophs +subglacial +Greenhouse +millennial +pacemaker +Milankovitch +receded +emitted +Increased +Dyke +Cycles +radiative +proglacial +bergs +abrupt +eruptions +subaerial +volcanism +ka +comparatively +proxy +Dioxide +sequestered +erosional +Studying +deglacialized +Abdelhak +Madinda +Harouna +Doula +Amadou +Senegal +Sellas +Lito +GEC +DIL +amplifier +headphones +Tuned +superheterodyne +pin +Diffusion +CDI +transistor +oscillator +mixer +AGC +shunt +regulator +reacquired +stats +Rushing +LisaRaye +Luguelin +Demish +Bralon +backstretch +Armagomphus +Careful +Possibly +bustard +pinkish +tarsus +leucistic +Pagosa +Magness +Heber +watchers +Introductions +attractiveness +dabbling +Feeding +incubates +clump +sedges +bustards +fledging +flightless +Mortality +Predators +raven +raccoon +wolverine +coyote +wolves +otter +snapping +gull +horned +owl +fox +mink +bobcat +coyotes +bobbing +Predation +substantiated +Photos +swanskins +quills +ingesting +Surprising +Grinnell +Banko +Lumsden +Mothe +summering +massed +Swans +reintroduce +impediment +Eurasian +Riva +Magazin +Maja +Blagdan +Hrvatska +radiotelevizija +Ballroom +Kvarner +Opatija +Ljudevit +lacunae +Opernwelt +Schauspiel +Tanztheater +MoKS +Modellversuch +Neuenfels +Margit +Marcks +Wagenfeld +Stadtbibliothek +Kunsthalle +Goetheplatz +playhouse +Reopened +Haus +foyer +Brauhauskeller +vaults +Pauli +Akteure +Macbeth +Bayerischer +Opernhaus +Jahres +Komische +Toleranzpreis +KulturForum +Ludger +Gegen +Nadi +Kartaadipoetra +Susilo +Bambang +Yudhoyono +Golkar +Erna +Obimpeh +PNDC +Meissen +Wartburg +Tuta +Burgwerben +Stendal +Spremberg +Margraviate +Ascania +Lucka +Berthold +Marburg +Angelus +Luisenfriedhof +commemoration +Catherina +Weichenhan +jedem +Sonnenuntergange +ich +verwundet +zum +Wellness +CFB +Gagetown +Iskra +Velinova +Duc +Enghien +Kinch +Manresa +Fumiko +chefs +maitre +Varenne +Globes +Cristal +uninspired +menus +menu +asparagus +celeriac +alluvia +traction +hens +Spratt +gastronomic +collages +Macron +siamenisis +carp +minnow +peat +oxygastroides +prahok +Klong +Mekong +Tonle +Tapi +discontinuing +Albertus +Webster +Elmira +Behrend +CUNYAC +GNAC +Molten +LNAH +Zhigalovsky +Irkutsk +Lena +monsoonal +Dolliver +legislators +Balkan +disloyal +Pondatti +Nallava +Kushboo +Kovai +Sathyapriya +Jayamala +Deva +rapes +berserk +seller +Uthamarasa +transpires +Pulamaipithan +Mallige +Hoove +Kishor +Shashikumar +Rupini +Hamsalekha +Udaya +Senthoora +Parcinski +remeasured +certifying +Schmidt +Toms +GPRA +Houari +Liamine +Henryk +Ecole +Mimodrame +sincerity +Woli +choreographed +Niedziakowski +Scena +Kameralna +choreograph +Profundity +curva +Pharsalia +Lucan +Annaeus +Taur +Matan +Fretilin +Timorese +Xanana +Rayhan +Biruni +sharh +Cisleithania +Franziska +empires +Reischrat +powerhouses +Graz +Metternich +Schoss +Herrnau +Clary +Aldringen +modernizer +Beinecke +DFC +Disparity +daunts +Mahlower +Heinersdorf +Zehlendorf +landowners +Industriebahn +Reichsbahn +quadruplication +fares +Welthauptstadt +Germania +Biesenhorst +Kaulsdorf +Tempelhof +Hilfsrangierbahnhof +thinned +resumption +Rathenow +Tlo +signalbox +precincts +Sigridshorst +reconnected +ideologies +grandchildren +Toluids +Chagatayid +Tumeds +Borokhula +Parwan +Heda +Dolqolqu +Jalut +Hulegu +Uls +preamble +edict +Yehe +Monggul +Khabul +Ambaghai +retaliated +Jurchen +parched +steppes +wettest +Toghrul +Mongolic +Khereid +Jamukha +Jadran +Merkit +Naimans +insolent +khagan +Yekhe +Ulus +Kurultai +Gur +Tayang +Zasag +Tanguts +Zabiha +Kashrut +Shehita +Referring +pacified +thereupon +Musuluman +Huihui +Zhuhu +Qiu +Chuji +Caspian +subjugate +Subutai +Kaifeng +Tangut +Chagan +Xiao +Zhala +Tianze +Tianxiang +Tumen +Submitted +shah +Khwarizmian +darughachis +Gaeseong +Ganghwa +encroaching +principality +Principality +Alania +Maghas +Transcaucasia +Plano +journeying +worsened +Buri +stationing +kheshig +Koreans +Templar +Sajo +destabilized +Arghun +Kilij +Assassins +Abbasids +recuperate +Suspicious +Khoja +Naku +Sartaq +Kodoe +Aral +throng +Kadan +Shiremun +falconer +cleverly +edicts +commoners +outflank +Iyeku +Duan +Xingzhi +Qoridai +Uryankhadai +kurultais +Chagataids +menace +Nizari +Khurshah +Girdkuh +merciless +Abbasid +Ayyubids +Armenians +Seljuks +Mayafarriqin +Dissension +Mughan +resupply +Wuchang +noyans +blockaded +Ariqboqe +Shangdu +Xiangyang +sinicize +Champa +statuses +Nogai +Chatagai +Khagans +Patterns +adorning +transplanted +Almaligh +saluting +holiness +distracting +Ilkhans +Struggling +Kobeleg +Zawkhan +Ayurbarwada +Buyantu +Buqa +enthroned +Chapar +Ilkhanid +Ragibagh +Kypchak +lavish +mollify +Bahatur +Qasarid +Togha +Georgians +Eretna +Anatolia +Plague +warlords +Janibeg +reasserted +Demanding +uluses +Berdibek +unbacked +hyperinflationary +Gongmin +empress +Situ +Changchub +Gyaltsen +Timur +Timurid +Yarkent +Altishahr +lances +bombardier +catapults +Skillful +couriers +inculcated +nerge +Mohi +Khwarezmshah +seapower +seaborne +seas +Crusader +Penalties +Cities +Kurultais +Khitans +curtail +Manichaeism +Nestorian +scribes +ingenious +rested +relaying +speediest +paiza +Yam +dogsled +yam +qirad +commenda +uncoined +tradable +ambassadors +authorization +Rabban +Sauma +venturing +Khanbaliq +Policies +sowing +modernize +Imperialists +Shogunists +Gatling +utilises +Warscape +Similarly +Nepela +NRW +Coomes +Lanaghan +Robynne +Richview +Cascade +Dewdney +Callaway +Tweedsmuir +Hamber +Hozameen +midsummer +Rhododendron +Pinewoods +Blackwall +chairlifts +RV +Doppelmayr +Loon +Rentals +Silverdaisy +Okanagan +Nicomen +Rull +Corts +Valencianes +Mustapha +Afandi +Bereza +Kartuska +fascists +Narodowa +Organizacja +Pawiak +Livezeanu +AGM +medaled +comproed +appoints +registrations +Merkley +Offense +Superclinic +Superclinics +Caravan +Retreats +Conductor +evaluations +Qualifier +Eliopoulos +Marys +Abner +recollections +calf +Fergie +Pitcher +Charrette +turbulent +retrospectively +isometrically +Whenever +shadowrunners +retires +retrieves +Armitage +gunned +vulpine +medics +morgue +shapeshifter +hacking +courier +malevolent +Aneki +dragon +mastermind +timeframe +Aspects +Kamenek +indignation +cesspit +creatively +staffers +uncompressed +Innovative +moody +somber +prying +underexposed +dystopian +Bandah +sadly +GamesRadar +rebooted +BlueSky +SNE +Shadownrun +Schemes +Kickstarter +NPC +Tunis +Odeh +Nahli +Runway +chests +Misr +garbled +Kimblery +Senecan +Kimberly +Rusden +riotous +ribald +patapyhsical +superpower +firebox +Schenectady +appliances +styling +shroud +Kantola +forte +preferential +Mohawks +Northerns +labored +Steamtown +Paragon +scullin +Niagra +windup +Prototypes +tourer +ACO +Interlagos +Sebring +IMSA +motorsport +LMP +Bei +Cowry +bioinformatics +relational +Yerong +Boree +TrainLink +XPT +grappled +Caprivi +Reichstag +swivel +Mykola +Karapyshi +Kievotdelstroy +Polygraphic +Fedorova +Bulasheva +Riverkings +Hosts +recap +Cheerleader +Interview +Ginza +Ihei +Shinjuku +Osaka +Travers +Hinds +Nascot +Duel +Rounding +Yuko +Trois +femmes +flics +Solanum +muricatum +honeydew +nightshades +tomato +eggplant +domestication +domesticated +archaeologically +pulpy +chroniclers +freeze +replant +rooting +pruned +Seedlings +intolerant +tomatillos +Longotoma +Distal +Ripe +Delicate +Flavin +adenine +dinucleotide +Coenzyme +mechanistically +metabolizing +acetyl +thioesters +homotetramer +tetramer +monomers +dimer +dimers +steroid +heterotetramic +deprotonates +Hydrogen +ribityl +pKa +hydrophobic +dimethylbenzene +oxidize +signifying +intolerance +metabolize +Intolerance +hypoglycemia +Sudden +Lysine +Glutamate +lysine +compromising +mutated +Highfield +Lilian +Girton +Bodleian +stridency +Lectures +Festschrift +Essays +medievalist +fastidious +elegance +austere +loyalties +dissociated +Ducos +Gunton +Elphin +Ardagh +Killaloe +Armagh +USPG +Yotam +afflicted +tzaraath +Albright +genealogy +archeologist +Glueck +Eilat +wadi +officered +permeate +prophets +Israelite +profuse +Assyria +Rezin +unacquainted +Nal +regnal +calendars +Scriptural +antiquities +WQVE +Iray +censor +codirectional +headstones +perspicuousness +connectors +oligarchism +depolarisation +crispiest +woofed +upstarts +waiter +unselfconfident +serfdoms +unwearyingly +surmise +nonindustrialized +hardheartedly +needlelike +crabweed +Curryville +embanked +teashop +queerer +yardage +potableness +unnodding +tetrapods +unamazedly +flintwork +abashment +earhole +tuts +peddles +subclassification +kinaesthesias +semimetallic +scaffoldage +Jocelin +subirrigation +grotto +megalithic +blurts +pinstriped +hyperglycemic +permuted +untwirl +fifthly +reunify +unthoughtfully +alchemic +crenelled +unimmured +diagrammatically +preception +unadjusted +quoit +lifetimes +mineralizable +recertifies +impunity +sifting +ploughboy +fatherly +perforator +nightmen +smoothen +poignantly +foolishness +questionings +acceptances +scatterbrains +unpicturable +reacquainting +unexemplified +effused +propellor +conceptualists +hares +pamphletage +insets +disaffectation +broadcasted +disabler +unmedicated +unconverged +nestles +Danielson +earthling +urbanizes +reapportions +Abyssinian +populists +honing +belligerency +industrious +entombment +nonrealistic +reprovable +hopscotch +faiths +parabolized +unsimilar +seditionists +Luthersville +federalists +puller +exalt +elocutions +Kauffman +unartistic +costars +draughtman +decently +wobbling +dejected +oscillational +nakedness +curmudgeon +unpourable +fabricators +demonetized +needlework +serialist +squeezing +chitchatting +remortgages +garishly +pollens +swordmanship +intriguer +emblaze +repealability +unrationalizing +chancing +mistrusting +Walterville +beholder +pontificates +nondeclaratively +Calhoun +duality +preadjustment +parlances +gunstocks +emaciating +interknotted +leastways +Episcopally +seagull +metropolitancy +associational +Natassia +culled +emblazes +Capulet +untinged +sustainer +northernness +disputations +semiconduction +unresponsive +aristocratically +aggravatingly +rooters +retraversing +convects +discards +reinflamed +postcordial +rejuvenations +untruthfulness +antagonizing +monetizes +sportier +thighbone +fatalness +placabilty +sleuth +condemnations +soulfully +rebate +unbiasedly +admensuration +trounced +cavelike +innoculate +patronisable +gusto +heralds +bipeds +estrangement +indiscriminatory +brassiere +sparkingly +groggier +nonexculpatory +agrammatical +unsorted +caravaner +MERS +slumberland +reembodying +brainwasher +nonperformer +idealities +unwrinkles +widthways +odious +dullness +tautness +ICB +resensitize +prewrapped +respites +textuality +modularizes +entailed +virginity +pinnings +agonizing +Bowden +beeping +reassemblies +rancors +repaginated +Johnsten +unvoyaging +remedying +Holdenville +satisfyingly +ferried +Jacksonburg +quaffs +coaxed +uglier +designable +Hallstadt +centralizing +transmittance +dissatisfactions +unboasting +numbest +unsurmised +insubstantiation +unapocryphal +scuffle +unforgeable +Brewerton +Byrd +hawkweed +duelling +nonsuited +preannouncement +regrouping +undersides +unconvincing +commensurability +Parkston +wittier +Clearbrook +conjectures +abysmal +boondoggled +bedeck +espouses +MSPHE +hectoringly +feedstuffs +shacklers +maulers +therewith +doggone +fazes +Stearne +disheartened +Reichsmark +untrain +repatronizing +reproposed +Corby +overheats +sanctimonial +quarreller +outserved +Crestline +ghoul +untransferred +Colburn +pseudobiological +Bevin +nontentatively +dispersers +gelatinoid +populationist +taperingly +biotherapy +Liggitt +roughen +unamiably +housesitting +sucroses +merengues +nonflexibly +reevacuation +undecadently +evaporation +Birchard +placebos +swooshes +occlusions +maloccluded +slur +nonadvocate +unabatedly +nondisruptive +maoists +Talcott +unrestorative +countercharm +Balder +subverted +recompression +untidily +corkscrewed +Leesen +Guinn +nonperjured +sweatproof +Lowder +Weatherby +unfeigning +amateurish +Dieball +seismoscope +Semite +forewarningly +obsequial +vineless +biostatic +conflation +shelterer +unborrowed +pressurizing +hotpot +burgling +undepicted +readaptive +Lilith +subfrontal +electivism +redictated +interrer +chagrining +LeeAnn +overbrush +dungaree +eclipsing +reaccrediting +refabrication +repracticing +stoutest +rebating +subcommander +Carolynn +grubbier +propranolol +rebathed +dualize +regroups +screener +Arabela +recallers +subnets +muesli +Anchorville +Kirtland +catholicized +nonbotanical +reglossing +menstruated +pillions +trowelful +Montgomeryshire +celebrative +strapper +Winterville +newsgroup +cryptography +confidentially +flatboats +insatiety +EUNET +distained +unwordably +translocated +revaccinating +Jordain +regauge +nontreasonable +unreliably +misadjusting +Scottsburg +Andee +flashers +annotators +apolitically +overexcites +uncharging +diazepam +buzzword +resurrects +reapportion +footbridges +elevatory +respirations +lopsidedness +reattach +martyrdoms +overgarnish +accentuating diff --git a/conf/ocr_en/en_character.csv b/conf/ocr_en/en_character.csv new file mode 100644 index 0000000..f7970ce --- /dev/null +++ b/conf/ocr_en/en_character.csv @@ -0,0 +1 @@ +0123456789!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ €ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz diff --git a/conf/pannel/labelnames.json b/conf/pannel/labelnames.json new file mode 100644 index 0000000..0f17455 --- /dev/null +++ b/conf/pannel/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":[ "光伏板","覆盖物","裂缝" ] +} \ No newline at end of file diff --git a/conf/pannel/para.json b/conf/pannel/para.json new file mode 100644 index 0000000..ba3f563 --- /dev/null +++ b/conf/pannel/para.json @@ -0,0 +1,6 @@ +{ + "post_process":{ + "name":"post_process","conf_thres":0.5,"iou_thres":0.25,"classes":3, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } +} \ No newline at end of file diff --git a/conf/para.json b/conf/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/pedestrian/labelnames.json b/conf/pedestrian/labelnames.json new file mode 100644 index 0000000..2a70b7d --- /dev/null +++ b/conf/pedestrian/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["行人"], + "labelIndexs":["SL01"] +} diff --git a/conf/pedestrian/para.json b/conf/pedestrian/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/pedestrian/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/platech.ttf b/conf/platech.ttf new file mode 100644 index 0000000..d66a970 Binary files /dev/null and b/conf/platech.ttf differ diff --git a/conf/pothole/labelnames.json b/conf/pothole/labelnames.json new file mode 100755 index 0000000..f193b60 --- /dev/null +++ b/conf/pothole/labelnames.json @@ -0,0 +1,6 @@ +{ + + "labelnames":["坑槽"], + "labelnames_实际":["坑槽"], + "labelIndexs":["SL01" ] +} diff --git a/conf/pothole/para.json b/conf/pothole/para.json new file mode 100755 index 0000000..424b747 --- /dev/null +++ b/conf/pothole/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ + "name":"post_process","conf_thres":0.25,"iou_thres":0.25,"classes":9, + "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,0],[255,255,0],[255,0,0],[255,0,127],[255,0,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + } + + +} diff --git a/conf/river/labelnames.json b/conf/river/labelnames.json new file mode 100644 index 0000000..d9f18c0 --- /dev/null +++ b/conf/river/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames":["排口","水生植被","其它","漂浮物","污染排口","菜地","违建","岸坡垃圾"], + "labelIndexs":["SL04","SL011","SL013","SL001","SL001","SL002","SL003","SL004" ], + "labelOrders":[0,1,2,3,4,5,6,7] +} diff --git a/conf/river/para.json b/conf/river/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/river/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/river2/labelnames.json b/conf/river2/labelnames.json new file mode 100755 index 0000000..4a49a41 --- /dev/null +++ b/conf/river2/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames":[ "漂浮物","垃圾","排口","非法建筑","非法种植","水生植物","游泳人员","钓鱼人员","船只","蓝藻"] , + "labelIndexs":[ "SL04","SL05","SL06","SL07","SL08","SL09","SL10","SL11","SL12","SL13" ], + "labelOrders":[0,1,2,3,4,5,6,7,8,9] +} diff --git a/conf/river2/para.json b/conf/river2/para.json new file mode 100755 index 0000000..9e76aac --- /dev/null +++ b/conf/river2/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.3,"ovlap_thres_crossCategory":0.65,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/riverT/labelnames.json b/conf/riverT/labelnames.json new file mode 100755 index 0000000..4a49a41 --- /dev/null +++ b/conf/riverT/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames":[ "漂浮物","垃圾","排口","非法建筑","非法种植","水生植物","游泳人员","钓鱼人员","船只","蓝藻"] , + "labelIndexs":[ "SL04","SL05","SL06","SL07","SL08","SL09","SL10","SL11","SL12","SL13" ], + "labelOrders":[0,1,2,3,4,5,6,7,8,9] +} diff --git a/conf/riverT/para.json b/conf/riverT/para.json new file mode 100755 index 0000000..9e76aac --- /dev/null +++ b/conf/riverT/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.3,"ovlap_thres_crossCategory":0.65,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/road/labelnames.json b/conf/road/labelnames.json new file mode 100644 index 0000000..b9a57f7 --- /dev/null +++ b/conf/road/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames_实际":["纵向裂缝","横向裂缝","修补","网状裂纹","坑槽","块状裂纹","积水"], + "labelnames":["裂缝","裂缝","修补","裂缝","坑槽","裂缝","积水"], + "labelIndexs":["SL030","SL031","SL032","SL033","SL034","SL035","SL036"] +} diff --git a/conf/road/para.json b/conf/road/para.json new file mode 100644 index 0000000..dfe9cbe --- /dev/null +++ b/conf/road/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5, + "rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + + } + + +} diff --git a/conf/rubbish/labelnames.json b/conf/rubbish/labelnames.json new file mode 100755 index 0000000..32b9631 --- /dev/null +++ b/conf/rubbish/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["建筑垃圾","白色垃圾","其他垃圾"] +} diff --git a/conf/rubbish/para.json b/conf/rubbish/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/rubbish/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/ship/labelnames.json b/conf/ship/labelnames.json new file mode 100755 index 0000000..d7b2fb2 --- /dev/null +++ b/conf/ship/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["船只"], + "labelIndexs":["SL01"] +} diff --git a/conf/ship/para.json b/conf/ship/para.json new file mode 100755 index 0000000..7808956 --- /dev/null +++ b/conf/ship/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/ship2/class_dict.csv b/conf/ship2/class_dict.csv new file mode 100644 index 0000000..829e2d7 --- /dev/null +++ b/conf/ship2/class_dict.csv @@ -0,0 +1,4 @@ +name,r,g,b,cls +0,0,0,0,bg +1,128,0,0,road +2,0,128,0,vehicle diff --git a/conf/ship2/labelnames.json b/conf/ship2/labelnames.json new file mode 100644 index 0000000..9081b53 --- /dev/null +++ b/conf/ship2/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames_实际":[ "0","1","2","3","4","5","6","7","8","9","10","11","12","13","boat" ], + "labelnames":[ "0","1","2","3","4","5","6","7","8","9","10","11","12","13","船只" ], + "labelIndexs":["SL050", "SL051", "SL052", "SL053", "SL054", "SL055", "SL056", "SL057", "SL058", "SL059", "SL060", "SL061", "SL062","SL063", "SL064" ] +} diff --git a/conf/ship2/para.json b/conf/ship2/para.json new file mode 100644 index 0000000..dfe9cbe --- /dev/null +++ b/conf/ship2/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5, + "rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + + } + + +} diff --git a/conf/smartSite/labelnames.json b/conf/smartSite/labelnames.json new file mode 100755 index 0000000..f1a8600 --- /dev/null +++ b/conf/smartSite/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":[ "工人","塔式起重机","悬臂","起重机","压路机","推土机","挖掘机","卡车","装载机","泵车","混凝土搅拌车","打桩","其他车辆" ], + "labelIndexs":["SL041", "SL042","SL043","SL044","SL045","SL046","SL047","SL048","SL049","SL050","SL051","SL052","SL053" ] +} diff --git a/conf/smartSite/para.json b/conf/smartSite/para.json new file mode 100644 index 0000000..64ea004 --- /dev/null +++ b/conf/smartSite/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":13,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/smogfire/labelnames.json b/conf/smogfire/labelnames.json new file mode 100644 index 0000000..07167c3 --- /dev/null +++ b/conf/smogfire/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["火焰","烟雾"], + "labelIndexs":["SL01","SL02"] +} diff --git a/conf/smogfire/para.json b/conf/smogfire/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/smogfire/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/conf/trafficAccident/class_dict.csv b/conf/trafficAccident/class_dict.csv new file mode 100644 index 0000000..829e2d7 --- /dev/null +++ b/conf/trafficAccident/class_dict.csv @@ -0,0 +1,4 @@ +name,r,g,b,cls +0,0,0,0,bg +1,128,0,0,road +2,0,128,0,vehicle diff --git a/conf/trafficAccident/labelnames.json b/conf/trafficAccident/labelnames.json new file mode 100644 index 0000000..2bddacf --- /dev/null +++ b/conf/trafficAccident/labelnames.json @@ -0,0 +1,5 @@ +{ + "labelnames_实际":["事故"], + "labelnames":["事故"], + "labelIndexs":["SL040"] +} diff --git a/conf/trafficAccident/para.json b/conf/trafficAccident/para.json new file mode 100644 index 0000000..dfe9cbe --- /dev/null +++ b/conf/trafficAccident/para.json @@ -0,0 +1,10 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5, + "rainbows":[[0,0,255],[0,0,255],[255,0,0],[0,0,255],[255,255,0],[0,0,255],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + + } + + +} diff --git a/conf/vehicle/labelnames.json b/conf/vehicle/labelnames.json new file mode 100644 index 0000000..0d89570 --- /dev/null +++ b/conf/vehicle/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["车辆"], + "labelIndexs":["SL01"] +} diff --git a/conf/vehicle/para.json b/conf/vehicle/para.json new file mode 100644 index 0000000..7808956 --- /dev/null +++ b/conf/vehicle/para.json @@ -0,0 +1,7 @@ +{ + + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] } + + +} diff --git a/crowd.py b/crowd.py new file mode 100644 index 0000000..97f0953 --- /dev/null +++ b/crowd.py @@ -0,0 +1,190 @@ +import argparse +from PIL import Image +from crowdUtils.engine import standard_transforms,preprocess,postprocess,DictToObject,AnchorPointsf +from crowdUtils.models import build_model +from segutils import trtUtils2 +import os,torch,cv2,time +import numpy as np +import warnings +import tensorrt as trt +from copy import deepcopy +warnings.filterwarnings('ignore') + +class crowdModel(object): + def __init__(self, weights=None, + par={'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225],'threshold':0.5, + 'modelPar':{'backbone':'vgg16_bn', 'gpu_id':0,'anchorFlag':False,'line':2,'width':None,'height':None , 'output_dir':'./output', 'row':2} + } + ): + print('-'*20,par['modelPar'] ) + self.mean = par['mean'] + self.std =par['std'] + + self.width = par['modelPar']['width'] + self.height = par['modelPar']['height'] + self.minShape = par['input_profile_shapes'][0] + self.maxShape = par['input_profile_shapes'][2] + self.IOShapes0,self.IOShapes1 = [ None,None,None ],[ None,None,None ] + self.Oshapes0,self.Oshapes1 = [ None,None,None ], [ None,None,None ] + + + self.modelPar = DictToObject(par['modelPar']) + self.threshold = par['threshold'] + self.device = 'cuda:0' + + + + if weights.endswith('.engine') or weights.endswith('.trt'): + self.infer_type ='trt' + elif weights.endswith('.pth') or weights.endswith('.pt') : + self.infer_type ='pth' + else: + print('#########ERROR:',weights,': no registered inference type, exit') + sys.exit(0) + if self.infer_type=='trt': + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + self.engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + #self.stream=cuda.Stream() + self.bindingNames=[ self.engine.get_binding_name(ib) for ib in range(len(self.engine)) ] + print('############load seg model trt success: ',weights,self.bindingNames) + self.inputs,self.outputs,self.bindings,self.stream=None,None,None,None + self.context = self.engine.create_execution_context() + + elif self.infer_type=='pth': + #self.model = DirectionalPointDetector(3, self.par['depth_factor'], self.par['NUM_FEATURE_MAP_CHANNEL']).to(self.device) + + self.model = build_model(self.modelPar) + checkpoint = torch.load(args.weight_path, map_location='cpu') + self.model.load_state_dict(checkpoint['model']) + self.model=self.model.to(self.device) + if not self.modelPar.anchorFlag: + if self.infer_type=='trt': + self.anchors = AnchorPointsf(pyramid_levels=[3,], strides=None, row=self.modelPar.row, line=self.modelPar.line,device='cpu' ) + elif self.infer_type=='pth': + self.anchors = AnchorPointsf(pyramid_levels=[3,], strides=None, row=self.modelPar.row, line=self.modelPar.line ,device='cuda:0') + + print('#########加载模型:',weights,' 类型:',self.infer_type) + + def preprocess(self,img): + tmpImg = preprocess(img,mean=self.mean, std=self.std,minShape=self.minShape,maxShape=self.maxShape) + if self.infer_type=='pth': + tmpImg = torch.from_numpy(tmpImg) + tmpImg = torch.Tensor(tmpImg).unsqueeze(0) + + elif self.infer_type=='trt': + #if not self.height: + chs, height, width= tmpImg.shape[0:3] + self.width, self.height = width,height + self.IOShapes1 = [ (1, chs, height, width ),(1, height//4*width//4,2),(1, height//4*width//4,2) ] + self.Oshapes1 = [ (1, height//4*width//4,2),(1, height//4*width//4,2) ] + tmpImg = tmpImg[np.newaxis,:,:,:]#CHW->NCHW + + return tmpImg + def ms(self,t1,t0): + return '%.1f'%( (t1-t0)*1000 ) + def eval(self,img): + time0 = time.time() + img_b = img.copy() + #print('-----line54:',img.shape) + samples = self.preprocess(img) + + time1 = time.time() + if self.infer_type=='pth': + samples = samples.to(self.device) + elif self.infer_type=='trt' : + + #print('##### line83: 决定是否申请 内存 ',self.IOShapes1, self.IOShapes0,self.IOShapes1==self.IOShapes0) + #if self.IOShapes1 != self.IOShapes0: + self.inputs,self.outputs,self.bindings,self.stream = trtUtils2.allocate_buffers(self.engine,self.IOShapes1) + #print('##### line96: 开辟新内存成功 ' ,self.height,self.width) + self.IOShapes0=deepcopy(self.IOShapes1) + + + + time2 = time.time() + if not self.modelPar.anchorFlag: + self.anchor_points = self.anchors.eval(samples) + if self.infer_type=='pth': + # run inference + self.model.eval() + with torch.no_grad(): + outputs = self.model(samples) + outputs['pred_points'] = outputs['pred_points'] + self.anchor_points + #print('###line64:',outputs.keys(), outputs['pred_points'].shape, outputs['pred_logits'].shape) + elif self.infer_type=='trt': + outputs = trtUtils2.trt_inference( samples,self.height,self.width,self.context,self.inputs,self.outputs,self.bindings,self.stream,input_name = self.bindingNames[0]) + for i in range(len(self.Oshapes1)): + outputs[i] = torch.from_numpy( np.reshape(outputs[i],self.Oshapes1[i])) + + outputs={'pred_points':outputs[0], 'pred_logits':outputs[1]} + #print('###line117:',outputs.keys(), outputs['pred_points'].shape, outputs['pred_logits'].shape) + outputs['pred_points'] = outputs['pred_points'] + self.anchor_points + + + time3 = time.time() + points,scores = self.postprocess(outputs) + time4 = time.time() + infos = 'precess:%s datacopy:%s infer:%s post:%s'%( self.ms(time1,time0) , self.ms(time2,time1), self.ms(time3,time2), self.ms(time4,time3) ) + + p2 = self.toOBBformat(points,scores,cls=0 ) + + presults=[ img_b, points,p2 ] + + + + + return presults, infos + + def postprocess(self,outputs): + return postprocess(outputs,threshold=self.threshold) + def toOBBformat(self,points,scores,cls=0): + outs = [] + for i in range(len(points)): + pt,score = points[i],scores[i] + pts4=[pt]*4 + ret = [ pts4,score,cls] + outs.append(ret) + return outs + +def main(): + + par={'mean':[0.485, 0.456, 0.406], 'std':[0.229, 0.224, 0.225],'threshold':0.5, 'output_dir':'./output','input_profile_shapes':[(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)],'modelPar':{'backbone':'vgg16_bn', 'gpu_id':0,'anchorFlag':False, 'width':None,'height':None ,'line':2, 'row':2} + } + weights='weights/best_mae_dynamic.engine' + #weights='weights/best_mae.pth' + cmodel = crowdModel(weights,par) + + img_path = "./testImages" + File = os.listdir(img_path) + targetList = [] + for file in File[0:]: + COORlist = [] + imgPath = img_path + os.sep + file + img_raw = np.array(Image.open(imgPath).convert('RGB') ) + points, infos = cmodel.eval(img_raw) + print(file,infos,img_raw.shape) + img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR) + # 打印预测图像中人头的个数 + for p in points: + img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), 2, (0, 255, 0), -1) + COORlist.append((int(p[0]), int(p[1]))) + # 将各测试图像中的人头坐标存储在targetList中, 格式:[[(x1, y1),(x2, y2),...], [(X1, Y1),(X2, Y2),..], ...] + targetList.append(COORlist) + time.sleep(2) + # 保存预测图片 + cv2.imwrite(os.path.join(par['output_dir'], file), img_to_draw) + #print(targetList ) + + + + + + +if __name__ == '__main__': + + par = {'backbone':'vgg16_bn', 'gpu_id':0, 'line':2, 'output_dir':'./output', 'row':2,'anchorFlag':False, 'weight_path':'./weights/best_mae.pth'} + args = DictToObject(par) + + targetList = main() + print("line81", targetList) diff --git a/crowdUtils/engine.py b/crowdUtils/engine.py new file mode 100644 index 0000000..f850712 --- /dev/null +++ b/crowdUtils/engine.py @@ -0,0 +1,299 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Train and eval functions used in main.py +Mostly copy-paste from DETR (https://github.com/facebookresearch/detr). +""" +import math +import os +import sys +from typing import Iterable + +import torch +#print( os.path.abspath( os.path.dirname(__file__) ) ) +sys.path.append( os.path.abspath( os.path.dirname(__file__) ) ) +import util.misc as utils +from util.misc import NestedTensor +import numpy as np +import time +import torchvision.transforms as standard_transforms +import cv2 +import PIL + +class DictToObject: + def __init__(self, dictionary): + for key, value in dictionary.items(): + if isinstance(value, dict): + setattr(self, key, DictToObject(value)) + else: + setattr(self, key, value) + +def letterImage(img,minShape,maxShape): + iH,iW = img.shape[0:2] + minH,minW = minShape[2:] + maxH,maxW = maxShape[2:] + flag=False + if iHmaxH or iW>maxW: + fy = iH/maxH; fx = iW/maxW; ff = max(fx,fy) + newH,newW = int(iH/ff), int(iW/ff);flag=True + if flag: + assert minH<=newH and newH<= maxH , 'iH%d,iW:%d , newH:%d newW:%d, fx:%.1f fy:%.1f'%(iH,iW,newH,newW,fx,fy) + assert minW<=newW and newW<= maxW, 'iH%d,iW:%d , newH:%d newW:%d, fx:%.1f fy:%.1f'%(iH,iW,newH,newW,fx,fy) + return cv2.resize(img,(newW,newH)) + else: + return img + + + +def postprocess(outputs,threshold=0.5): + + outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0] + outputs_points = outputs['pred_points'][0] + points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist() + scores = outputs_scores[outputs_scores > threshold].detach().cpu().numpy().tolist() + + return points,scores + +def toOBBformat(points,scores,cls=0): + outs = [] + for i in range(len(points)): + pt,score = points[i],scores[i] + pts4=[pt]*4 + ret = [ pts4,score,cls] + outs.append(ret) + return outs + + #[ [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score, cls ], [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score ,cls ],........ ] + +def preprocess(img,mean,std,minShape,maxShape): + #img--numpy,(H,W,C) + #输入-RGB格式,(C,H,W) + if isinstance(img,PIL.Image.Image): + img = np.array(img) + + img = letterImage(img,minShape,maxShape) + height,width = img.shape[0:2] + + new_width = width // 128 * 128 + new_height = height // 128 * 128 + img = cv2.resize( img, (new_width, new_height) ) + + img = img/255. + tmpImg = np.zeros((new_height,new_width,3)) + + + tmpImg[:,:,0]=(img[:,:,0]-mean[0])/std[0] + tmpImg[:,:,1]=(img[:,:,1]-mean[1])/std[1] + tmpImg[:,:,2]=(img[:,:,2]-mean[2])/std[2] + tmpImg = tmpImg.transpose((2,0,1)).astype(np.float32)# HWC->CHW + #tmpImg = tmpImg[np.newaxis,:,:,:]#CHW->NCHW + return tmpImg + +class DeNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor): + for t, m, s in zip(tensor, self.mean, self.std): + t.mul_(s).add_(m) + return tensor + +# generate the reference points in grid layout +def generate_anchor_points(stride=16, row=3, line=3): + row_step = stride / row + line_step = stride / line + + shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2 + shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2 + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + anchor_points = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + return anchor_points +def shift(shape, stride, anchor_points): + shift_x = (np.arange(0, shape[1]) + 0.5) * stride + shift_y = (np.arange(0, shape[0]) + 0.5) * stride + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + shifts = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + A = anchor_points.shape[0] + K = shifts.shape[0] + all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2))) + all_anchor_points = all_anchor_points.reshape((K * A, 2)) + + return all_anchor_points + + +class AnchorPointsf(object): + def __init__(self, pyramid_levels=[3,], strides=None, row=3, line=3,device='cpu'): + + if pyramid_levels is None: + self.pyramid_levels = [3, 4, 5, 6, 7] + else: + self.pyramid_levels = pyramid_levels + + if strides is None: + self.strides = [2 ** x for x in self.pyramid_levels] + + self.row = row + self.line = line + self.device = device + def eval(self, image): + image_shape = image.shape[2:] + image_shape = np.array(image_shape) + image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels] + + all_anchor_points = np.zeros((0, 2)).astype(np.float32) + # get reference points for each level + for idx, p in enumerate(self.pyramid_levels): + anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line) + shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points) + all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0) + + all_anchor_points = np.expand_dims(all_anchor_points, axis=0) + # send reference points to device + if torch.cuda.is_available() and self.device!='cpu': + return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda() + else: + return torch.from_numpy(all_anchor_points.astype(np.float32)) +def vis(samples, targets, pred, vis_dir, des=None): + ''' + samples -> tensor: [batch, 3, H, W] + targets -> list of dict: [{'points':[], 'image_id': str}] + pred -> list: [num_preds, 2] + ''' + gts = [t['point'].tolist() for t in targets] + + pil_to_tensor = standard_transforms.ToTensor() + + restore_transform = standard_transforms.Compose([ + DeNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + standard_transforms.ToPILImage() + ]) + # draw one by one + for idx in range(samples.shape[0]): + sample = restore_transform(samples[idx]) + sample = pil_to_tensor(sample.convert('RGB')).numpy() * 255 + sample_gt = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy() + sample_pred = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy() + + max_len = np.max(sample_gt.shape) + + size = 2 + # draw gt + for t in gts[idx]: + sample_gt = cv2.circle(sample_gt, (int(t[0]), int(t[1])), size, (0, 255, 0), -1) + # draw predictions + for p in pred[idx]: + sample_pred = cv2.circle(sample_pred, (int(p[0]), int(p[1])), size, (0, 0, 255), -1) + + name = targets[idx]['image_id'] + # save the visualized images + if des is not None: + cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_gt.jpg'.format(int(name), + des, len(gts[idx]), len(pred[idx]))), sample_gt) + cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_pred.jpg'.format(int(name), + des, len(gts[idx]), len(pred[idx]))), sample_pred) + else: + cv2.imwrite( + os.path.join(vis_dir, '{}_gt_{}_pred_{}_gt.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))), + sample_gt) + cv2.imwrite( + os.path.join(vis_dir, '{}_gt_{}_pred_{}_pred.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))), + sample_pred) + + +# the training routine +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, max_norm: float = 0): + model.train() + criterion.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + # iterate all training samples + for samples, targets in data_loader: + samples = samples.to(device) + targets = [{k: v.to(device) for k, v in t.items()} for t in targets] + # forward + outputs = model(samples) + # calc the losses + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + # reduce all losses + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) + + loss_value = losses_reduced_scaled.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + print(loss_dict_reduced) + sys.exit(1) + # backward + optimizer.zero_grad() + losses.backward() + if max_norm > 0: + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + optimizer.step() + # update logger + metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + +# the inference routine +@torch.no_grad() +def evaluate_crowd_no_overlap(model, data_loader, device, vis_dir=None): + model.eval() + + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + # run inference on all images to calc MAE + maes = [] + mses = [] + for samples, targets in data_loader: + samples = samples.to(device) + + outputs = model(samples) + outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0] + + outputs_points = outputs['pred_points'][0] + + gt_cnt = targets[0]['point'].shape[0] + # 0.5 is used by default + threshold = 0.5 + + points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist() + predict_cnt = int((outputs_scores > threshold).sum()) + # if specified, save the visualized images + if vis_dir is not None: + vis(samples, targets, [points], vis_dir) + # accumulate MAE, MSE + mae = abs(predict_cnt - gt_cnt) + mse = (predict_cnt - gt_cnt) * (predict_cnt - gt_cnt) + maes.append(float(mae)) + mses.append(float(mse)) + # calc MAE, MSE + mae = np.mean(maes) + mse = np.sqrt(np.mean(mses)) + + return mae, mse diff --git a/crowdUtils/models/__init__.py b/crowdUtils/models/__init__.py new file mode 100644 index 0000000..ba2b88c --- /dev/null +++ b/crowdUtils/models/__init__.py @@ -0,0 +1,8 @@ +from .p2pnet import build + +# build the P2PNet model +# set training to 'True' during training + + +def build_model(args, training=False): + return build(args, training) diff --git a/crowdUtils/models/backbone.py b/crowdUtils/models/backbone.py new file mode 100644 index 0000000..044aa90 --- /dev/null +++ b/crowdUtils/models/backbone.py @@ -0,0 +1,73 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backbone modules. +""" +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn +import sys,os + +sys.path.append(os.path.abspath(os.path.dirname(__file__)) ) +import vgg_ as models + + +class BackboneBase_VGG(nn.Module): + def __init__(self, backbone: nn.Module, num_channels: int, name: str, return_interm_layers: bool): + super().__init__() + features = list(backbone.features.children()) + if return_interm_layers: + if name == 'vgg16_bn': + self.body1 = nn.Sequential(*features[:13]) + self.body2 = nn.Sequential(*features[13:23]) + self.body3 = nn.Sequential(*features[23:33]) + self.body4 = nn.Sequential(*features[33:43]) + else: + self.body1 = nn.Sequential(*features[:9]) + self.body2 = nn.Sequential(*features[9:16]) + self.body3 = nn.Sequential(*features[16:23]) + self.body4 = nn.Sequential(*features[23:30]) + else: + if name == 'vgg16_bn': + self.body = nn.Sequential(*features[:44]) # 16x down-sample + elif name == 'vgg16': + self.body = nn.Sequential(*features[:30]) # 16x down-sample + self.num_channels = num_channels + self.return_interm_layers = return_interm_layers + + def forward(self, tensor_list): + out = [] + + if self.return_interm_layers: + xs = tensor_list + for _, layer in enumerate([self.body1, self.body2, self.body3, self.body4]): + xs = layer(xs) + out.append(xs) + + else: + xs = self.body(tensor_list) + out.append(xs) + return out + + +class Backbone_VGG(BackboneBase_VGG): + """ResNet backbone with frozen BatchNorm.""" + def __init__(self, name: str, return_interm_layers: bool): + if name == 'vgg16_bn': + backbone = models.vgg16_bn(pretrained=False) + elif name == 'vgg16': + backbone = models.vgg16(pretrained=False) + num_channels = 256 + super().__init__(backbone, num_channels, name, return_interm_layers) + + +def build_backbone(args): + #backbone = Backbone_VGG(args.backbone, False) + backbone = Backbone_VGG(args.backbone, True) + return backbone + + +if __name__ == '__main__': + Backbone_VGG('vgg16', True) diff --git a/crowdUtils/models/matcher.py b/crowdUtils/models/matcher.py new file mode 100644 index 0000000..85b86fd --- /dev/null +++ b/crowdUtils/models/matcher.py @@ -0,0 +1,83 @@ + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Mostly copy-paste from DETR (https://github.com/facebookresearch/detr). +""" +import torch +from scipy.optimize import linear_sum_assignment +from torch import nn + + +class HungarianMatcher_Crowd(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1, cost_point: float = 1): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the foreground object + cost_point: This is the relative weight of the L1 error of the points coordinates in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_point = cost_point + assert cost_class != 0 or cost_point != 0, "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, targets): + """ Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "points": Tensor of dim [batch_size, num_queries, 2] with the predicted point coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_points] (where num_target_points is the number of ground-truth + objects in the target) containing the class labels + "points": Tensor of dim [num_target_points, 2] containing the target point coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_points) + """ + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] + out_points = outputs["pred_points"].flatten(0, 1) # [batch_size * num_queries, 2] + + # Also concat the target labels and points + # tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_points = torch.cat([v["point"] for v in targets]) + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -out_prob[:, tgt_ids] + + # Compute the L2 cost between point + cost_point = torch.cdist(out_points, tgt_points, p=2) + + # Compute the giou cost between point + + # Final cost matrix + C = self.cost_point * cost_point + self.cost_class * cost_class + C = C.view(bs, num_queries, -1).cpu() + + sizes = [len(v["point"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +def build_matcher_crowd(args): + return HungarianMatcher_Crowd(cost_class=args.set_cost_class, cost_point=args.set_cost_point) diff --git a/crowdUtils/models/p2pnet.py b/crowdUtils/models/p2pnet.py new file mode 100644 index 0000000..4acfe21 --- /dev/null +++ b/crowdUtils/models/p2pnet.py @@ -0,0 +1,364 @@ +import torch +import torch.nn.functional as F +from torch import nn +import os,sys + +#print( os.path.abspath( os.path.dirname(os.path.dirname(__file__) )) ) +sys.path.append(os.path.abspath( os.path.dirname(os.path.dirname(__file__) )) ) +from util.misc import (NestedTensor, nested_tensor_from_tensor_list, + accuracy, get_world_size, interpolate, + is_dist_avail_and_initialized) + +from .backbone import build_backbone +from .matcher import build_matcher_crowd + +import numpy as np +import time + + +# the network frmawork of the regression branch +class RegressionModel(nn.Module): + def __init__(self, num_features_in, num_anchor_points=4, feature_size=256): + super(RegressionModel, self).__init__() + + self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) + self.act1 = nn.ReLU() + + self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act2 = nn.ReLU() + + self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act3 = nn.ReLU() + + self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act4 = nn.ReLU() + + self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1) + + # sub-branch forward + def forward(self, x): + out = self.conv1(x) + out = self.act1(out) + + out = self.conv2(out) + out = self.act2(out) + + out = self.output(out) + + out = out.permute(0, 2, 3, 1) + + return out.contiguous().view(out.shape[0], -1, 2) + + +# the network frmawork of the classification branch +class ClassificationModel(nn.Module): + def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256): + super(ClassificationModel, self).__init__() + + self.num_classes = num_classes + self.num_anchor_points = num_anchor_points + + self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) + self.act1 = nn.ReLU() + + self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act2 = nn.ReLU() + + self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act3 = nn.ReLU() + + self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act4 = nn.ReLU() + + self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1) + self.output_act = nn.Sigmoid() + + # sub-branch forward + def forward(self, x): + out = self.conv1(x) + out = self.act1(out) + + out = self.conv2(out) + out = self.act2(out) + + out = self.output(out) + + out1 = out.permute(0, 2, 3, 1) + + batch_size, width, height, _ = out1.shape + + out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes) + + return out2.contiguous().view(x.shape[0], -1, self.num_classes) + + +# generate the reference points in grid layout +def generate_anchor_points(stride=16, row=3, line=3): + row_step = stride / row + line_step = stride / line + + shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2 + shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2 + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + anchor_points = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + return anchor_points + + +# shift the meta-anchor to get an acnhor points +def shift(shape, stride, anchor_points): + shift_x = (np.arange(0, shape[1]) + 0.5) * stride + shift_y = (np.arange(0, shape[0]) + 0.5) * stride + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + shifts = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + A = anchor_points.shape[0] + K = shifts.shape[0] + all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2))) + all_anchor_points = all_anchor_points.reshape((K * A, 2)) + + return all_anchor_points + + + + +# this class generate all reference points on all pyramid levels +class AnchorPoints(nn.Module): + def __init__(self, pyramid_levels=None, strides=None, row=3, line=3): + super(AnchorPoints, self).__init__() + + if pyramid_levels is None: + self.pyramid_levels = [3, 4, 5, 6, 7] + else: + self.pyramid_levels = pyramid_levels + + if strides is None: + self.strides = [2 ** x for x in self.pyramid_levels] + + self.row = row + self.line = line + + def forward(self, image): + image_shape = image.shape[2:] + image_shape = np.array(image_shape) + image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels] + + all_anchor_points = np.zeros((0, 2)).astype(np.float32) + # get reference points for each level + for idx, p in enumerate(self.pyramid_levels): + anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line) + shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points) + all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0) + + all_anchor_points = np.expand_dims(all_anchor_points, axis=0) + # send reference points to device + if torch.cuda.is_available(): + return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda() + else: + return torch.from_numpy(all_anchor_points.astype(np.float32)) + + +class Decoder(nn.Module): + def __init__(self, C3_size, C4_size, C5_size, feature_size=256): + super(Decoder, self).__init__() + + # upsample C5 to get P5 from the FPN paper + self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + # add P5 elementwise to C4 + self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + # add P4 elementwise to C3 + self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + def forward(self, inputs): + C3, C4, C5 = inputs + + P5_x = self.P5_1(C5) + P5_upsampled_x = self.P5_upsampled(P5_x) + P5_x = self.P5_2(P5_x) + + P4_x = self.P4_1(C4) + P4_x = P5_upsampled_x + P4_x + P4_upsampled_x = self.P4_upsampled(P4_x) + P4_x = self.P4_2(P4_x) + + P3_x = self.P3_1(C3) + P3_x = P3_x + P4_upsampled_x + P3_x = self.P3_2(P3_x) + + return [P3_x, P4_x, P5_x] + + +# the defenition of the P2PNet model +class P2PNet(nn.Module): + def __init__(self, backbone, row=2, line=2,anchorFlag=True): + super().__init__() + self.backbone = backbone + self.num_classes = 2 + self.anchorFlag = anchorFlag + # the number of all anchor points + num_anchor_points = row * line + + self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points) + self.classification = ClassificationModel(num_features_in=256, \ + num_classes=self.num_classes, \ + num_anchor_points=num_anchor_points) + if self.anchorFlag: + self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line) + + self.fpn = Decoder(256, 512, 512) + + def forward(self, samples: NestedTensor): + # get the backbone features + features = self.backbone(samples) + # forward the feature pyramid + features_fpn = self.fpn([features[1], features[2], features[3]]) + + batch_size = features[0].shape[0] + # print("line227", batch_size) + # run the regression and classification branch + regression = self.regression(features_fpn[1]) * 100 # 8x + classification = self.classification(features_fpn[1]) + + if self.anchorFlag: + anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1) + #decode the points as prediction + output_coord = regression + anchor_points + else: + output_coord = regression + output_class = classification + out = {'pred_logits': output_class, 'pred_points': output_coord} + + return out + + +class SetCriterion_Crowd(nn.Module): + + def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): + """ Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[0] = self.eos_coef + self.register_buffer('empty_weight', empty_weight) + + def loss_labels(self, outputs, targets, indices, num_points): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert 'pred_logits' in outputs + src_logits = outputs['pred_logits'] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full(src_logits.shape[:2], 0, + dtype=torch.int64, device=src_logits.device) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) + losses = {'loss_ce': loss_ce} + + return losses + + def loss_points(self, outputs, targets, indices, num_points): + + assert 'pred_points' in outputs + idx = self._get_src_permutation_idx(indices) + src_points = outputs['pred_points'][idx] + target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = F.mse_loss(src_points, target_points, reduction='none') + + losses = {} + losses['loss_point'] = loss_bbox.sum() / num_points + + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs): + loss_map = { + 'labels': self.loss_labels, + 'points': self.loss_points, + } + assert loss in loss_map, f'do you really want to compute {loss} loss?' + return loss_map[loss](outputs, targets, indices, num_points, **kwargs) + + def forward(self, outputs, targets): + """ This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']} + + indices1 = self.matcher(output1, targets) + + num_points = sum(len(t["labels"]) for t in targets) + num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_points) + num_boxes = torch.clamp(num_points / get_world_size(), min=1).item() + + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes)) + + return losses + + +# create the P2PNet model +def build(args, training): + # treats persons as a single class + num_classes = 1 + + backbone = build_backbone(args) + model = P2PNet(backbone, args.row, args.line,anchorFlag=args.anchorFlag) + if not training: + return model + + weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef} + losses = ['labels', 'points'] + matcher = build_matcher_crowd(args) + criterion = SetCriterion_Crowd(num_classes, \ + matcher=matcher, weight_dict=weight_dict, \ + eos_coef=args.eos_coef, losses=losses) + + return model, criterion \ No newline at end of file diff --git a/crowdUtils/models/vgg_.py b/crowdUtils/models/vgg_.py new file mode 100644 index 0000000..1182cfe --- /dev/null +++ b/crowdUtils/models/vgg_.py @@ -0,0 +1,194 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Mostly copy-paste from torchvision references. +""" +import torch +import torch.nn as nn + + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + +model_paths = { + 'vgg16_bn': './vggWeights/vgg16_bn-6c64b313.pth', + 'vgg16': './vggWeights/vgg16-397923af.pth', +} + + +class VGG(nn.Module): + + def __init__(self, features, num_classes=1000, init_weights=True): + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + if init_weights: + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg, batch_norm=False, sync=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + if sync: + print('use sync backbone') + layers += [conv2d, nn.SyncBatchNorm(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfgs = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def _vgg(arch, cfg, batch_norm, pretrained, progress, sync=False, **kwargs): + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm, sync=sync), **kwargs) + if pretrained: + state_dict = torch.load(model_paths[arch]) + model.load_state_dict(state_dict) + return model + + +def vgg11(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) + + +def vgg11_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) + + +def vgg13(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) + + +def vgg13_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) + + +def vgg16(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) + + +def vgg16_bn(pretrained=False, progress=True, sync=False, **kwargs): + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, sync=sync, **kwargs) + + +def vgg19(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) + + +def vgg19_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs) diff --git a/crowdUtils/run.sh b/crowdUtils/run.sh new file mode 100644 index 0000000..4114c57 --- /dev/null +++ b/crowdUtils/run.sh @@ -0,0 +1,4 @@ +gpu=2080Ti +url=/mnt/thsw2/DSP2/weights/crowdCounting/weights +python toTrt.py --weights ${url}/crowdCounting.pth +mv ${url}/crowdCounting_dynamic.engine ${url}/crowdCounting_${gpu}_dynamic.engine diff --git a/crowdUtils/toTrt.py b/crowdUtils/toTrt.py new file mode 100755 index 0000000..518d108 --- /dev/null +++ b/crowdUtils/toTrt.py @@ -0,0 +1,57 @@ +import sys +from models import build_model +sys.path.extend(['..','.' ]) +from segutils.trtUtils2 import pth2onnx,onnx2engine,onnx_inference +from engine import DictToObject +from pathlib import Path +import torch +import os +import tensorrt as trt +import numpy as np +import argparse + + +def main(opt): + #pth_model='../weights/best_mae.pth' + pth_model=opt.weights.strip() + onnx_name = pth_model.replace('.pth','_dynamic.onnx') + trt_name = onnx_name.replace('.onnx','.engine') + dynamic_hw ={'input':{0:'batch',2:'H',3:'W'}, + 'output0':{1:'C'}, + 'output1':{1:'C'}, + + } + + + + par = {'backbone':'vgg16_bn', 'gpu_id':0, 'line':2, 'output_dir':'./output', 'row':2, 'anchorFlag':False,'weight_path':'./weights/best_mae.pth'} + args = DictToObject(par) + model = build_model(args) + + + pthFile = Path(pth_model) + checkpoint = torch.load(pthFile, map_location='cpu') + model.load_state_dict(checkpoint['model']) + model = model.to('cuda:0') + + + inputShape =(1, 3, 128*4,128*4)#(bs,channels,height,width) + + input_profile_shapes = [(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)] + + pth2onnx(model,onnx_name,input_shape=(1,3,512,512),input_names=['input'],output_names=[ 'output0' ,'output1'],dynamix_axis=dynamic_hw) + + onnx2engine(onnx_name,trt_name,input_shape=[1,3,-1,-1],half=True,max_batch_size=1,input_profile_shapes=input_profile_shapes) + + + + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='/mnt/thsw2/DSP2/weights/cityMangement2/weights/urbanManagement/DMPR/dp_detector_499.pth', help='model path(s)') + opt = parser.parse_args() + + main(opt) \ No newline at end of file diff --git a/README.md b/crowdUtils/util/__init__.py similarity index 100% rename from README.md rename to crowdUtils/util/__init__.py diff --git a/crowdUtils/util/misc.py b/crowdUtils/util/misc.py new file mode 100644 index 0000000..7cfe7d7 --- /dev/null +++ b/crowdUtils/util/misc.py @@ -0,0 +1,518 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from typing import Optional, List + +import torch +import torch.distributed as dist +from torch import Tensor + +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +# if float(torchvision.__version__[:3]) < 0.7: +# from torchvision.ops import _new_empty_tensor +# from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + +def collate_fn_crowd(batch): + # re-organize the batch + batch_new = [] + for b in batch: + imgs, points = b + if imgs.ndim == 3: + imgs = imgs.unsqueeze(0) + for i in range(len(imgs)): + batch_new.append((imgs[i, :, :, :], points[i])) + batch = batch_new + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + +def _max_by_axis_pad(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + + block = 128 + + for i in range(2): + maxes[i+1] = ((maxes[i+1] - 1) // block + 1) * block + return maxes + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + + # TODO make it support different-sized images + max_size = _max_by_axis_pad([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + for img, pad_img in zip(tensor_list, tensor): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + else: + raise ValueError('not supported') + return tensor + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if float(torchvision.__version__[:3]) < 0.7: + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) + + +class FocalLoss(nn.Module): + r""" + This criterion is a implemenation of Focal Loss, which is proposed in + Focal Loss for Dense Object Detection. + + Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class]) + + The losses are averaged across observations for each minibatch. + + Args: + alpha(1D Tensor, Variable) : the scalar factor for this criterion + gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5), + putting more focus on hard, misclassified examples + size_average(bool): By default, the losses are averaged over observations for each minibatch. + However, if the field size_average is set to False, the losses are + instead summed for each minibatch. + + + """ + def __init__(self, class_num, alpha=None, gamma=2, size_average=True): + super(FocalLoss, self).__init__() + if alpha is None: + self.alpha = Variable(torch.ones(class_num, 1)) + else: + if isinstance(alpha, Variable): + self.alpha = alpha + else: + self.alpha = Variable(alpha) + self.gamma = gamma + self.class_num = class_num + self.size_average = size_average + + def forward(self, inputs, targets): + N = inputs.size(0) + C = inputs.size(1) + P = F.softmax(inputs) + + class_mask = inputs.data.new(N, C).fill_(0) + class_mask = Variable(class_mask) + ids = targets.view(-1, 1) + class_mask.scatter_(1, ids.data, 1.) + + if inputs.is_cuda and not self.alpha.is_cuda: + self.alpha = self.alpha.cuda() + alpha = self.alpha[ids.data.view(-1)] + + probs = (P*class_mask).sum(1).view(-1,1) + + log_p = probs.log() + batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p + + if self.size_average: + loss = batch_loss.mean() + else: + loss = batch_loss.sum() + return loss \ No newline at end of file diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/common.py b/models/common.py new file mode 100644 index 0000000..028dedd --- /dev/null +++ b/models/common.py @@ -0,0 +1,405 @@ +# YOLOv5 common modules + +import math +from copy import copy +from pathlib import Path + +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +from PIL import Image +from torch.cuda import amp + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized + +import warnings + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.LeakyReLU(0.1, inplace=True) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # filename: imgs = 'data/images/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_synchronized()] + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, str): # filename or uri + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) + + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) + + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if pprint: + print(str.rstrip(', ')) + if show: + img.show(self.files[i]) # show + if save: + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) + self.display(save=True, save_dir=save_dir) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/models/experimental.py b/models/experimental.py new file mode 100644 index 0000000..7a7824b --- /dev/null +++ b/models/experimental.py @@ -0,0 +1,135 @@ +# YOLOv5 experimental modules + +import numpy as np +import torch +import torch.nn as nn +import os +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(GhostBottleneck, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + #attempt_download(w) + assert os.path.exists(w),"%s not exists" + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble diff --git a/models/export.py b/models/export.py new file mode 100644 index 0000000..c527a47 --- /dev/null +++ b/models/export.py @@ -0,0 +1,123 @@ +"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats + +Usage: + $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1 +""" + +import argparse +import sys +import time + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn + +import models +from models.experimental import attempt_load +from utils.activations import Hardswish, SiLU +from utils.general import colorstr, check_img_size, check_requirements, set_logging +from utils.torch_utils import select_device + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only + parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + + # Update model + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + # elif isinstance(m, models.yolo.Detect): + # m.forward = m.forward_export # assign forward (optional) + model.model[-1].export = not opt.grid # set Detect() layer grid export + y = model(img) # dry run + + # TorchScript export ----------------------------------------------------------------------------------------------- + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img, strict=False) + ts.save(f) + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # ONNX export ------------------------------------------------------------------------------------------------------ + prefix = colorstr('ONNX:') + try: + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = opt.weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + output_names=['classes', 'boxes'] if y is None else ['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # CoreML export ---------------------------------------------------------------------------------------------------- + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {onnx.__version__}...') + # convert model from torchscript and apply pixel scaling as per detect.py + model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + f = opt.weights.replace('.pt', '.mlmodel') # filename + model.save(f) + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # Finish + print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml new file mode 100644 index 0000000..a07a4dc --- /dev/null +++ b/models/hub/anchors.yaml @@ -0,0 +1,58 @@ +# Default YOLOv5 anchors for COCO data + + +# P5 ------------------------------------------------------------------------------------------------------------------- +# P5-640: +anchors_p5_640: + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 + + +# P6 ------------------------------------------------------------------------------------------------------------------- +# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 +anchors_p6_640: + - [ 9,11, 21,19, 17,41 ] # P3/8 + - [ 43,32, 39,70, 86,64 ] # P4/16 + - [ 65,131, 134,130, 120,265 ] # P5/32 + - [ 282,180, 247,354, 512,387 ] # P6/64 + +# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 +anchors_p6_1280: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 +anchors_p6_1920: + - [ 28,41, 67,59, 57,141 ] # P3/8 + - [ 144,103, 129,227, 270,205 ] # P4/16 + - [ 209,452, 455,396, 358,812 ] # P5/32 + - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + + +# P7 ------------------------------------------------------------------------------------------------------------------- +# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 +anchors_p7_640: + - [ 11,11, 13,30, 29,20 ] # P3/8 + - [ 30,46, 61,38, 39,92 ] # P4/16 + - [ 78,80, 146,66, 79,163 ] # P5/32 + - [ 149,150, 321,143, 157,303 ] # P6/64 + - [ 257,402, 359,290, 524,372 ] # P7/128 + +# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 +anchors_p7_1280: + - [ 19,22, 54,36, 32,77 ] # P3/8 + - [ 70,83, 138,71, 75,173 ] # P4/16 + - [ 165,159, 148,334, 375,151 ] # P5/32 + - [ 334,317, 251,626, 499,474 ] # P6/64 + - [ 750,326, 534,814, 1079,818 ] # P7/128 + +# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 +anchors_p7_1920: + - [ 29,34, 81,55, 47,115 ] # P3/8 + - [ 105,124, 207,107, 113,259 ] # P4/16 + - [ 247,238, 222,500, 563,227 ] # P5/32 + - [ 501,476, 376,939, 749,711 ] # P6/64 + - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml new file mode 100644 index 0000000..38dcc44 --- /dev/null +++ b/models/hub/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml new file mode 100644 index 0000000..ff7638c --- /dev/null +++ b/models/hub/yolov3-tiny.yaml @@ -0,0 +1,41 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml new file mode 100644 index 0000000..f2e7613 --- /dev/null +++ b/models/hub/yolov3.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml new file mode 100644 index 0000000..e772bff --- /dev/null +++ b/models/hub/yolov5-fpn.yaml @@ -0,0 +1,42 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 6, BottleneckCSP, [1024]], # 9 + ] + +# YOLOv5 FPN head +head: + [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml new file mode 100644 index 0000000..0633a90 --- /dev/null +++ b/models/hub/yolov5-p2.yaml @@ -0,0 +1,54 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 9 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 + [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) + + [ -1, 1, Conv, [ 128, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 + [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) + + [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml new file mode 100644 index 0000000..3728a11 --- /dev/null +++ b/models/hub/yolov5-p6.yaml @@ -0,0 +1,56 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml new file mode 100644 index 0000000..ca8f849 --- /dev/null +++ b/models/hub/yolov5-p7.yaml @@ -0,0 +1,67 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 + [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], + [ -1, 3, C3, [ 1280, False ] ], # 13 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 + [ -1, 3, C3, [ 1024, False ] ], # 17 + + [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 21 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 25 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + + [ -1, 1, Conv, [ 1024, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 + [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + + [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + ] diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml new file mode 100644 index 0000000..340f95a --- /dev/null +++ b/models/hub/yolov5-panet.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + +# YOLOv5 PANet head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml new file mode 100644 index 0000000..11298b0 --- /dev/null +++ b/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml new file mode 100644 index 0000000..48afc86 --- /dev/null +++ b/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml new file mode 100644 index 0000000..f2d6667 --- /dev/null +++ b/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml new file mode 100644 index 0000000..1df577a --- /dev/null +++ b/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml new file mode 100644 index 0000000..5ebc021 --- /dev/null +++ b/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/yolo.py b/models/yolo.py new file mode 100644 index 0000000..39eddec --- /dev/null +++ b/models/yolo.py @@ -0,0 +1,276 @@ +# YOLOv5 YOLO-specific modules + +import argparse +import logging +import sys +from copy import deepcopy + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # convi + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3, C3TR]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml new file mode 100644 index 0000000..71ebf86 --- /dev/null +++ b/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml new file mode 100644 index 0000000..3c749c9 --- /dev/null +++ b/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml new file mode 100644 index 0000000..aca669d --- /dev/null +++ b/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml new file mode 100644 index 0000000..d3babdf --- /dev/null +++ b/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/obbUtils/__init__.py b/obbUtils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/obbUtils/convert.sh b/obbUtils/convert.sh new file mode 100644 index 0000000..4d8817d --- /dev/null +++ b/obbUtils/convert.sh @@ -0,0 +1 @@ +python diff --git a/obbUtils/decoder.py b/obbUtils/decoder.py new file mode 100644 index 0000000..5dc7388 --- /dev/null +++ b/obbUtils/decoder.py @@ -0,0 +1,207 @@ +import torch.nn.functional as F +import torch + + +class DecDecoder_test(object): + def __init__(self, K, conf_thresh, num_classes): + self.K = K + self.conf_thresh = conf_thresh + self.num_classes = num_classes + + def _topk(self, scores): + batch, cat, height, width = scores.size() + + topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), self.K) + + topk_inds = topk_inds % (height * width) + topk_ys = (topk_inds // width).int().float() + topk_xs = (topk_inds % width).int().float() + + topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), self.K) + topk_clses = (topk_ind // self.K).int() + topk_inds = self._gather_feat( topk_inds.view(batch, -1, 1), topk_ind).view(batch, self.K) + topk_ys = self._gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, self.K) + topk_xs = self._gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, self.K) + + return topk_score, topk_inds, topk_clses, topk_ys, topk_xs + + + def _nms(self, heat, kernel=3): + hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=(kernel - 1) // 2) + keep = (hmax == heat).float() + return heat * keep + + def _gather_feat(self, feat, ind, mask=None): + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + + ''' + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + ''' + return feat + + def _tranpose_and_gather_feat(self, feat, ind): + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = self._gather_feat(feat, ind) + return feat + + def ctdet_decode(self, pr_decs): + heat = pr_decs['hm'] + wh = pr_decs['wh'] + reg = pr_decs['reg'] + cls_theta = pr_decs['cls_theta'] + + batch, c, height, width = heat.size() + heat = self._nms(heat) + + scores, inds, clses, ys, xs = self._topk(heat) + reg = self._tranpose_and_gather_feat(reg, inds) + reg = reg.view(batch, self.K, 2) + xs = xs.view(batch, self.K, 1) + reg[:, :, 0:1] + ys = ys.view(batch, self.K, 1) + reg[:, :, 1:2] + clses = clses.view(batch, self.K, 1).float() + scores = scores.view(batch, self.K, 1) + wh = self._tranpose_and_gather_feat(wh, inds) + wh = wh.view(batch, self.K, 10) + # add + cls_theta = self._tranpose_and_gather_feat(cls_theta, inds) + cls_theta = cls_theta.view(batch, self.K, 1) + mask = (cls_theta>0.8).float().view(batch, self.K, 1) + # + tt_x = (xs+wh[..., 0:1])*mask + (xs)*(1.-mask) + tt_y = (ys+wh[..., 1:2])*mask + (ys-wh[..., 9:10]/2)*(1.-mask) + rr_x = (xs+wh[..., 2:3])*mask + (xs+wh[..., 8:9]/2)*(1.-mask) + rr_y = (ys+wh[..., 3:4])*mask + (ys)*(1.-mask) + bb_x = (xs+wh[..., 4:5])*mask + (xs)*(1.-mask) + bb_y = (ys+wh[..., 5:6])*mask + (ys+wh[..., 9:10]/2)*(1.-mask) + ll_x = (xs+wh[..., 6:7])*mask + (xs-wh[..., 8:9]/2)*(1.-mask) + ll_y = (ys+wh[..., 7:8])*mask + (ys)*(1.-mask) + # + detections = torch.cat([xs, # cen_x + ys, # cen_y + tt_x, + tt_y, + rr_x, + rr_y, + bb_x, + bb_y, + ll_x, + ll_y, + scores, + clses], + dim=2) + + + return detections + + + +class DecDecoder(object): + def __init__(self, K, conf_thresh, num_classes): + self.K = K + self.conf_thresh = conf_thresh + self.num_classes = num_classes + + def _topk(self, scores): + batch, cat, height, width = scores.size() + + topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), self.K) + + topk_inds = topk_inds % (height * width) + topk_ys = (topk_inds // width).int().float() + topk_xs = (topk_inds % width).int().float() + + topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), self.K) + topk_clses = (topk_ind // self.K).int() + topk_inds = self._gather_feat( topk_inds.view(batch, -1, 1), topk_ind).view(batch, self.K) + topk_ys = self._gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, self.K) + topk_xs = self._gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, self.K) + + return topk_score, topk_inds, topk_clses, topk_ys, topk_xs + + + def _nms(self, heat, kernel=3): + hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=(kernel - 1) // 2) + keep = (hmax == heat).float() + return heat * keep + + def _gather_feat(self, feat, ind, mask=None): + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + + + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + + return feat + + def _tranpose_and_gather_feat(self, feat, ind): + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = self._gather_feat(feat, ind) + return feat + + def ctdet_decode(self, pr_decs): + heat = pr_decs['hm'] + wh = pr_decs['wh'] + reg = pr_decs['reg'] + cls_theta = pr_decs['cls_theta'] + + batch, c, height, width = heat.size() + heat = self._nms(heat) + + scores, inds, clses, ys, xs = self._topk(heat) + reg = self._tranpose_and_gather_feat(reg, inds) + reg = reg.view(batch, self.K, 2) + xs = xs.view(batch, self.K, 1) + reg[:, :, 0:1] + ys = ys.view(batch, self.K, 1) + reg[:, :, 1:2] + clses = clses.view(batch, self.K, 1).float() + scores = scores.view(batch, self.K, 1) + wh = self._tranpose_and_gather_feat(wh, inds) + wh = wh.view(batch, self.K, 10) + # add + cls_theta = self._tranpose_and_gather_feat(cls_theta, inds) + cls_theta = cls_theta.view(batch, self.K, 1) + mask = (cls_theta>0.8).float().view(batch, self.K, 1) + # + tt_x = (xs+wh[..., 0:1])*mask + (xs)*(1.-mask) + tt_y = (ys+wh[..., 1:2])*mask + (ys-wh[..., 9:10]/2)*(1.-mask) + rr_x = (xs+wh[..., 2:3])*mask + (xs+wh[..., 8:9]/2)*(1.-mask) + rr_y = (ys+wh[..., 3:4])*mask + (ys)*(1.-mask) + bb_x = (xs+wh[..., 4:5])*mask + (xs)*(1.-mask) + bb_y = (ys+wh[..., 5:6])*mask + (ys+wh[..., 9:10]/2)*(1.-mask) + ll_x = (xs+wh[..., 6:7])*mask + (xs-wh[..., 8:9]/2)*(1.-mask) + ll_y = (ys+wh[..., 7:8])*mask + (ys)*(1.-mask) + # + detections = torch.cat([xs, # cen_x + ys, # cen_y + tt_x, + tt_y, + rr_x, + rr_y, + bb_x, + bb_y, + ll_x, + ll_y, + scores, + clses], + dim=2) + + + #return detections + + index = (scores>self.conf_thresh).squeeze(0).squeeze(1) + detections = detections[:,index,:] + #print('####line203 decoder.py ', detections.size(),scores.size()) + return detections.data.cpu().numpy() + + + \ No newline at end of file diff --git a/obbUtils/drownUtils.py b/obbUtils/drownUtils.py new file mode 100644 index 0000000..4d6750f --- /dev/null +++ b/obbUtils/drownUtils.py @@ -0,0 +1,222 @@ +import numpy as np +import time,cv2 +def ms(t1,t0): + return (t1-t0)*1000.0 +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) + +def mixDrowing_water_postprocess(preds,_mask_cv,pars ): + '''考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + #zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + + zoom_factor = original_width/480.0 + + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][4]=='head' or preds[i][4]=='person': + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + # print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + final_output_luoshui=final_head_person_filterboat + t11 = time.time() + + timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + + return final_output_luoshui,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + diff --git a/obbUtils/func_utils.py b/obbUtils/func_utils.py new file mode 100644 index 0000000..bccf819 --- /dev/null +++ b/obbUtils/func_utils.py @@ -0,0 +1,112 @@ +import os,sys +import torch +import numpy as np +sys.path.extend(['../AIlib2/obbUtils']) +#import datasets.DOTA_devkit.ResultMerge_multi_process +#from datasets.DOTA_devkit.ResultMerge_multi_process import py_cpu_nms_poly_fast, py_cpu_nms_poly +from dotadevkit.ops.ResultMerge import py_cpu_nms_poly_fast, py_cpu_nms_poly +import time + +# def decode_prediction(predictions, dsets, args, img_id, down_ratio): +def decode_prediction(predictions, category, model_size, down_ratio,ori_image): + t1=time.time() + predictions = predictions[0, :, :] + + # ttt1=time.time() + # # ori_image = dsets.load_image(dsets.img_ids.index(img_id)) #加载了原图第2次????这里耗时 改1 + # ttt2 = time.time() + # print(f'jiazaitupian. ({(1E3 * (ttt2 - ttt1)):.1f}ms) ') + h, w, c = ori_image.shape + + pts0 = {cat: [] for cat in category} + scores0 = {cat: [] for cat in category} + for pred in predictions: + cen_pt = np.asarray([pred[0], pred[1]], np.float32) + tt = np.asarray([pred[2], pred[3]], np.float32) + rr = np.asarray([pred[4], pred[5]], np.float32) + bb = np.asarray([pred[6], pred[7]], np.float32) + ll = np.asarray([pred[8], pred[9]], np.float32) + tl = tt + ll - cen_pt + bl = bb + ll - cen_pt + tr = tt + rr - cen_pt + br = bb + rr - cen_pt + score = pred[10] + clse = pred[11] + pts = np.asarray([tr, br, bl, tl], np.float32) + pts[:, 0] = pts[:, 0] * down_ratio / model_size[0] * w + pts[:, 1] = pts[:, 1] * down_ratio / model_size[1] * h + pts0[category[int(clse)]].append(pts) + scores0[category[int(clse)]].append(score) + t2=time.time() + #print('###line40:decode_prediction time: %.1f ',(t2-t1)*1000.0) + return pts0, scores0 + + +def non_maximum_suppression(pts, scores): + nms_item = np.concatenate([pts[:, 0:1, 0], + pts[:, 0:1, 1], + pts[:, 1:2, 0], + pts[:, 1:2, 1], + pts[:, 2:3, 0], + pts[:, 2:3, 1], + pts[:, 3:4, 0], + pts[:, 3:4, 1], + scores[:, np.newaxis]], axis=1) + nms_item = np.asarray(nms_item, np.float64) + keep_index = py_cpu_nms_poly_fast(dets=nms_item, thresh=0.1) + return nms_item[keep_index] + + +def write_results(args, + model, + dsets, + down_ratio, + device, + decoder, + result_path, + print_ps=False): + results = {cat: {img_id: [] for img_id in dsets.img_ids} for cat in dsets.category} + for index in range(len(dsets)): + data_dict = dsets.__getitem__(index) + image = data_dict['image'].to(device) + img_id = data_dict['img_id'] + image_w = data_dict['image_w'] + image_h = data_dict['image_h'] + + with torch.no_grad(): + pr_decs = model(image) + + + decoded_pts = [] + decoded_scores = [] + torch.cuda.synchronize(device) + predictions = decoder.ctdet_decode(pr_decs) + pts0, scores0 = decode_prediction(predictions, dsets, args, img_id, down_ratio) + decoded_pts.append(pts0) + decoded_scores.append(scores0) + + # nms + for cat in dsets.category: + if cat == 'background': + continue + pts_cat = [] + scores_cat = [] + for pts0, scores0 in zip(decoded_pts, decoded_scores): + pts_cat.extend(pts0[cat]) + scores_cat.extend(scores0[cat]) + pts_cat = np.asarray(pts_cat, np.float32) + scores_cat = np.asarray(scores_cat, np.float32) + if pts_cat.shape[0]: + nms_results = non_maximum_suppression(pts_cat, scores_cat) + results[cat][img_id].extend(nms_results) + if print_ps: + print('testing {}/{} data {}'.format(index+1, len(dsets), img_id)) + + for cat in dsets.category: + if cat == 'background': + continue + with open(os.path.join(result_path, 'Task1_{}.txt'.format(cat)), 'w') as f: + for img_id in results[cat]: + for pt in results[cat][img_id]: + f.write('{} {:.12f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format( + img_id, pt[8], pt[0], pt[1], pt[2], pt[3], pt[4], pt[5], pt[6], pt[7])) diff --git a/obbUtils/load_obb_model.py b/obbUtils/load_obb_model.py new file mode 100644 index 0000000..aeddb86 --- /dev/null +++ b/obbUtils/load_obb_model.py @@ -0,0 +1,64 @@ +import torch +import numpy as np +import cv2 +import time +import os +import sys +sys.path.extend(['../AIlib2/obbUtils']) +import matplotlib.pyplot as plt +import func_utils +import time +import torchvision.transforms as transforms +from obbmodels import ctrbox_net +import decoder +import tensorrt as trt +import onnx +import onnxruntime as ort + +def load_model_decoder_OBB(par={'down_ratio':4,'num_classes':15,'weights':'weights_dota/obb.pth'}): + weights=par['weights'] + heads = par['heads'] + heads['hm']=par['num_classes'] + par['heads']=heads + if weights.endswith('.pth') or weights.endswith('.pt'): + resume=par['weights'] + down_ratio = par['down_ratio'] + model = ctrbox_net.CTRBOX(heads=heads, + pretrained=True, + down_ratio=down_ratio, + final_kernel=1, + head_conv=256) + + + checkpoint = torch.load(resume, map_location=lambda storage, loc: storage) + print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch'])) + state_dict_ = checkpoint['model_state_dict'] + model.load_state_dict(state_dict_, strict=True) + model.eval() + model = model.to(par['device']) + model = model.half() if par['half'] else model + par['saveType']='pth' + elif weights.endswith('.onnx'): + onnx_model = onnx.load(weights) + onnx.checker.check_model(onnx_model) + # 设置模型session以及输入信息 + sess = ort.InferenceSession(str(weights),providers= ort.get_available_providers()) + print('len():',len( sess.get_inputs() )) + input_name = sess.get_inputs()[0].name + model = {'sess':sess,'input_name':input_name} + par['saveType']='onnx' + elif weights.endswith('.engine'): + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + print('#####load TRT file:',weights,'success #####') + par['saveType']='trt' + + + decoder2 = decoder.DecDecoder(K=par['K'], + conf_thresh=par['conf_thresh'], + num_classes=par['num_classes']) + + + + return model, decoder2 diff --git a/obbUtils/obbmodels/ctrbox_net.py b/obbUtils/obbmodels/ctrbox_net.py new file mode 100644 index 0000000..63632c5 --- /dev/null +++ b/obbUtils/obbmodels/ctrbox_net.py @@ -0,0 +1,90 @@ +import torch.nn as nn +import numpy as np +import torch +from .model_parts import CombinationModule +from . import resnet +import decoder + + +class CTRBOX(nn.Module): + def __init__(self, heads, pretrained, down_ratio, final_kernel, head_conv): + super(CTRBOX, self).__init__() + + # channels = [3, 64, 256, 512, 1024, 2048] + # assert down_ratio in [2, 4, 8, 16] + # self.l1 = int(np.log2(down_ratio)) + # self.base_network = resnet.resnet101(pretrained=pretrained) + # self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + # self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + # self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + #channels = [3, 64, 256, 512, 1024, 2048] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet50(pretrained=pretrained) + #self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + #self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + #self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + + #channels = [3, 64, 64, 128, 256, 512] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet34(pretrained=pretrained) + #self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + #self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + #self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + channels = [3, 64, 64, 128, 256, 512] + assert down_ratio in [2, 4, 8, 16] + self.l1 = int(np.log2(down_ratio)) + self.base_network = resnet.resnet18(pretrained=pretrained) + self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + print('#####################ctrbox_net.py ##############') + self.heads = heads + for head in self.heads: + classes = self.heads[head] + if head == 'wh': + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=3, padding=1, bias=True)) + else: + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=final_kernel, stride=1, padding=final_kernel // 2, bias=True)) + if 'hm' in head: + fc[-1].bias.data.fill_(-2.19) + else: + self.fill_fc_weights(fc) + + self.__setattr__(head, fc) + + + def fill_fc_weights(self, m): + if isinstance(m, nn.Conv2d): + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.base_network(x) + # import matplotlib.pyplot as plt + # import os + # for idx in range(x[1].shape[1]): + # temp = x[1][0,idx,:,:] + # temp = temp.data.cpu().numpy() + # plt.imsave(os.path.join('dilation', '{}.png'.format(idx)), temp) + c4_combine = self.dec_c4(x[-1], x[-2]) + c3_combine = self.dec_c3(c4_combine, x[-3]) + c2_combine = self.dec_c2(c3_combine, x[-4]) + + dec_dict = {} + for head in self.heads: + dec_dict[head] = self.__getattr__(head)(c2_combine) + if 'hm' in head or 'cls' in head: + dec_dict[head] = torch.sigmoid(dec_dict[head]) + return dec_dict diff --git a/obbUtils/obbmodels/ctrbox_net_bak.py b/obbUtils/obbmodels/ctrbox_net_bak.py new file mode 100644 index 0000000..92e85bb --- /dev/null +++ b/obbUtils/obbmodels/ctrbox_net_bak.py @@ -0,0 +1,204 @@ +import torch.nn as nn +import numpy as np +import torch +from .model_parts import CombinationModule +from . import resnet +import decoder + +class CTRBOX_trt(nn.Module): + def __init__(self, heads, pretrained, down_ratio, final_kernel, head_conv,test_flag=False): + super(CTRBOX_trt, self).__init__() + + # channels = [3, 64, 256, 512, 1024, 2048] + # assert down_ratio in [2, 4, 8, 16] + # self.l1 = int(np.log2(down_ratio)) + # self.base_network = resnet.resnet101(pretrained=pretrained) + # self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + # self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + # self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + #channels = [3, 64, 256, 512, 1024, 2048] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet50(pretrained=pretrained) + #self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + #self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + #self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + + #channels = [3, 64, 64, 128, 256, 512] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet34(pretrained=pretrained) + #self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + #self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + #self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + self.test_flag=test_flag + channels = [3, 64, 64, 128, 256, 512] + assert down_ratio in [2, 4, 8, 16] + self.l1 = int(np.log2(down_ratio)) + self.base_network = resnet.resnet18(pretrained=pretrained) + self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + + self.heads = heads + + if self.test_flag: + self.decoder = decoder.DecDecoder_test(K=100, + conf_thresh=0.18, + num_classes=15) + + + + for head in self.heads: + classes = self.heads[head] + if head == 'wh': + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=3, padding=1, bias=True)) + else: + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=final_kernel, stride=1, padding=final_kernel // 2, bias=True)) + if 'hm' in head: + fc[-1].bias.data.fill_(-2.19) + else: + self.fill_fc_weights(fc) + + self.__setattr__(head, fc) + + + def fill_fc_weights(self, m): + if isinstance(m, nn.Conv2d): + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.base_network(x) + # import matplotlib.pyplot as plt + # import os + # for idx in range(x[1].shape[1]): + # temp = x[1][0,idx,:,:] + # temp = temp.data.cpu().numpy() + # plt.imsave(os.path.join('dilation', '{}.png'.format(idx)), temp) + c4_combine = self.dec_c4(x[-1], x[-2]) + c3_combine = self.dec_c3(c4_combine, x[-3]) + c2_combine = self.dec_c2(c3_combine, x[-4]) + + dec_dict = {} + for head in self.heads: + dec_dict[head] = self.__getattr__(head)(c2_combine) + if 'hm' in head or 'cls' in head: + dec_dict[head] = torch.sigmoid(dec_dict[head]) + + predictions = self.decoder.ctdet_decode(dec_dict) + #'hm': 'wh':'reg': 'cls_theta': + print('###############line102#############') + return predictions, dec_dict['hm'], dec_dict['wh'], dec_dict['reg'], dec_dict['cls_theta'] + #if self.test_flag: + # predictions = self.decoder.ctdet_decode(dec_dict) + # return predictions + #else: + # return dec_dict + +class CTRBOX_pth(nn.Module): + def __init__(self, heads, pretrained, down_ratio, final_kernel, head_conv,test_flag=False): + super(CTRBOX_pth, self).__init__() + + # channels = [3, 64, 256, 512, 1024, 2048] + # assert down_ratio in [2, 4, 8, 16] + # self.l1 = int(np.log2(down_ratio)) + # self.base_network = resnet.resnet101(pretrained=pretrained) + # self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + # self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + # self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + #channels = [3, 64, 256, 512, 1024, 2048] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet50(pretrained=pretrained) + #self.dec_c2 = CombinationModule(512, 256, batch_norm=True) + #self.dec_c3 = CombinationModule(1024, 512, batch_norm=True) + #self.dec_c4 = CombinationModule(2048, 1024, batch_norm=True) + + + #channels = [3, 64, 64, 128, 256, 512] + #assert down_ratio in [2, 4, 8, 16] + #self.l1 = int(np.log2(down_ratio)) + #self.base_network = resnet.resnet34(pretrained=pretrained) + #self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + #self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + #self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + self.test_flag=test_flag + channels = [3, 64, 64, 128, 256, 512] + assert down_ratio in [2, 4, 8, 16] + self.l1 = int(np.log2(down_ratio)) + self.base_network = resnet.resnet18(pretrained=pretrained) + self.dec_c2 = CombinationModule(128, 64, batch_norm=True) + self.dec_c3 = CombinationModule(256, 128, batch_norm=True) + self.dec_c4 = CombinationModule(512, 256, batch_norm=True) + + + self.heads = heads + if self.test_flag: + self.decoder = decoder.DecDecoder_test(K=100, + conf_thresh=0.18, + num_classes=15) + + + for head in self.heads: + classes = self.heads[head] + if head == 'wh': + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=3, padding=1, bias=True)) + else: + fc = nn.Sequential(nn.Conv2d(channels[self.l1], head_conv, kernel_size=3, padding=1, bias=True), + # nn.BatchNorm2d(head_conv), # BN not used in the paper, but would help stable training + nn.ReLU(inplace=True), + nn.Conv2d(head_conv, classes, kernel_size=final_kernel, stride=1, padding=final_kernel // 2, bias=True)) + if 'hm' in head: + fc[-1].bias.data.fill_(-2.19) + else: + self.fill_fc_weights(fc) + + self.__setattr__(head, fc) + + + def fill_fc_weights(self, m): + if isinstance(m, nn.Conv2d): + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + x = self.base_network(x) + # import matplotlib.pyplot as plt + # import os + # for idx in range(x[1].shape[1]): + # temp = x[1][0,idx,:,:] + # temp = temp.data.cpu().numpy() + # plt.imsave(os.path.join('dilation', '{}.png'.format(idx)), temp) + c4_combine = self.dec_c4(x[-1], x[-2]) + c3_combine = self.dec_c3(c4_combine, x[-3]) + c2_combine = self.dec_c2(c3_combine, x[-4]) + + dec_dict = {} + for head in self.heads: + dec_dict[head] = self.__getattr__(head)(c2_combine) + if 'hm' in head or 'cls' in head: + dec_dict[head] = torch.sigmoid(dec_dict[head]) + + + if self.test_flag: + predictions = self.decoder.ctdet_decode(dec_dict) + print('##line301:',predictions ) + return predictions, dec_dict['hm'], dec_dict['wh'], dec_dict['reg'], dec_dict['cls_theta'] + else: + return dec_dict diff --git a/obbUtils/obbmodels/model_parts.py b/obbUtils/obbmodels/model_parts.py new file mode 100644 index 0000000..a9e6d97 --- /dev/null +++ b/obbUtils/obbmodels/model_parts.py @@ -0,0 +1,37 @@ +import torch.nn.functional as F +import torch.nn as nn +import torch + +class CombinationModule(nn.Module): + def __init__(self, c_low, c_up, batch_norm=False, group_norm=False, instance_norm=False): + super(CombinationModule, self).__init__() + if batch_norm: + self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1), + nn.BatchNorm2d(c_up), + nn.ReLU(inplace=True)) + self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1), + nn.BatchNorm2d(c_up), + nn.ReLU(inplace=True)) + elif group_norm: + self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1), + nn.GroupNorm(num_groups=32, num_channels=c_up), + nn.ReLU(inplace=True)) + self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1), + nn.GroupNorm(num_groups=32, num_channels=c_up), + nn.ReLU(inplace=True)) + elif instance_norm: + self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1), + nn.InstanceNorm2d(num_features=c_up), + nn.ReLU(inplace=True)) + self.cat_conv = nn.Sequential(nn.Conv2d(c_up * 2, c_up, kernel_size=1, stride=1), + nn.InstanceNorm2d(num_features=c_up), + nn.ReLU(inplace=True)) + else: + self.up = nn.Sequential(nn.Conv2d(c_low, c_up, kernel_size=3, padding=1, stride=1), + nn.ReLU(inplace=True)) + self.cat_conv = nn.Sequential(nn.Conv2d(c_up*2, c_up, kernel_size=1, stride=1), + nn.ReLU(inplace=True)) + + def forward(self, x_low, x_up): + x_low = self.up(F.interpolate(x_low, x_up.shape[2:], mode='bilinear', align_corners=False)) + return self.cat_conv(torch.cat((x_up, x_low), 1)) \ No newline at end of file diff --git a/obbUtils/obbmodels/resnet.py b/obbUtils/obbmodels/resnet.py new file mode 100644 index 0000000..e21e2bf --- /dev/null +++ b/obbUtils/obbmodels/resnet.py @@ -0,0 +1,356 @@ +import torch +import torch.nn as nn + +try: + from torch.hub import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + __constants__ = ['downsample'] + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + # self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + # self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + feat = [] + feat.append(x) # C0 + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + feat.append(x) # C1 + x = self.maxpool(x) + + x = self.layer1(x) + feat.append(x) # C2 + x = self.layer2(x) + feat.append(x) # C3 + x = self.layer3(x) + feat.append(x) # C4 + x = self.layer4(x) + feat.append(x) # C5 + + + # x = self.avgpool(x) + # x = torch.flatten(x, 1) + # x = self.fc(x) + # + return feat + + +def _resnet(arch, block, layers, pretrained, progress, **kwargs): + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) + model.load_state_dict(state_dict, strict=False) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained=False, progress=True, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" `_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained=False, progress=True, **kwargs): + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" `_ + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) \ No newline at end of file diff --git a/obbUtils/ocrTrt.py b/obbUtils/ocrTrt.py new file mode 100644 index 0000000..cbebb8b --- /dev/null +++ b/obbUtils/ocrTrt.py @@ -0,0 +1,448 @@ +import torch +import argparse +import sys,os + +from torchvision import transforms +import cv2,glob +import numpy as np +import matplotlib.pyplot as plt +import time +from pathlib import Path +from concurrent.futures import ThreadPoolExecutor +import tensorrt as trt + +#import pycuda.driver as cuda + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +def infer_usage(): + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + #weights = '../weights/BiSeNet/checkpoint.pth' + #segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + + weights = '../weights/BiSeNet/checkpoint_640X360_epo33.pth' + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights,modelsize=(640,360)) + + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + out_dir ='results/'; + os.makedirs(out_dir,exist_ok=True) + for im,image_url in enumerate(image_urls[0:]): + #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + H,W,C = image_array0.shape + time_1=time.time() + pred,outstr = segmodel.eval(image_array0 ) + + #plt.figure(1);plt.imshow(pred); + #plt.show() + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0')): + print('####begin to export to onnx') + import onnx + + im = torch.rand(inputShape).to(device) + seg_model.eval() + text_for_pred = torch.LongTensor(1, 90).fill_(0).to(device) + + + out=seg_model(im) + print('###test model infer example####') + train=False + dynamic = False + opset=11 + torch.onnx.export(seg_model, (im),onnxFile, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + #torch.onnx.export(model, (dummy_input, dummy_text), "vitstr.onnx", verbose=True) + + + print('output onnx file:',onnxFile) +def ONNXtoTrt(onnxFile,trtFile,half=True): + import tensorrt as trt + #onnx = Path('../weights/BiSeNet/checkpoint.onnx') + #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx') + time0=time.time() + #half=True; + verbose=True;workspace=4;prefix=colorstr('TensorRT:') + #f = onnx.with_suffix('.engine') # TensorRT engine file + f=trtFile + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnxFile)): + raise RuntimeError('failed to load ONNX file: %s'%( onnxFile )) + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + print(f'{prefix} Network Description:') + for inp in inputs: + print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + print(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + time1=time.time() + print('output trtfile from ONNX, time:%.4f s, half: ,'%(time1-time0),trtFile,half) +def ONNX_eval(): + import onnx + import numpy as np + import onnxruntime as ort + import cv2 + + #model_path = '../weights/BiSeNet/checkpoint.onnx';modelSize=(512,512);mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) + model_path = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx';modelSize=(640,360);mean = (0.485, 0.456, 0.406);std = (0.229, 0.224, 0.225) + # 验证模型合法性 + onnx_model = onnx.load(model_path) + onnx.checker.check_model(onnx_model) + # 读入图像并调整为输入维度 + img = cv2.imread("../../river_demo/images/slope/菜地_20220713_青年河8_4335_1578.jpg") + H,W,C=img.shape + img = cv2.resize(img,modelSize).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + # 设置模型session以及输入信息 + sess = ort.InferenceSession(model_path,providers= ort.get_available_providers()) + print('len():',len( sess.get_inputs() )) + input_name1 = sess.get_inputs()[0].name + #input_name2 = sess.get_inputs()[1].name + #input_name3 = sess.get_inputs()[2].name + + #output = sess.run(None, {input_name1: img, input_name2: img, input_name3: img}) + output = sess.run(None, {input_name1: img}) + pred = np.argmax(output[0], axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + #plt.imshow(pred);plt.show() + print( 'type:',type(output) , output[0].shape, output[0].dtype ) + + #weights = Path('../weights/BiSeNet/checkpoint.engine') + + half = False;device = 'cuda:0' + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + #image_urls=glob.glob('../../river_demo/images/slope/*') + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + #out_dir ='../../river_demo/images/results/' + out_dir ='results' + os.makedirs(out_dir,exist_ok=True) + + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + #img=segPreProcess_image(image_array0).to(device) + img=segPreProcess_image(image_array0,modelSize=modelSize,mean=mean,std=std,numpy=True) + + #img = cv2.resize(img,(512,512)).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + + + H,W,C = image_array0.shape + time_1=time.time() + #pred,outstr = segmodel.eval(image_array0 ) + + + output = sess.run(None, {input_name1: img}) + pred =output[0] + + + + #pred = model(img, augment=False, visualize=False) + + #pred = pred.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + + outstr='###---###' + + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + print('outimage:',out_url) + + + +def EngineInfer_onePic_thread(pars_thread): + + + + + engine,image_array0,out_dir,image_url,im = pars_thread[0:6] + + + H,W,C = image_array0.shape + time0=time.time() + + time1=time.time() + # 运行模型 + + + pred,segInfoStr=segtrtEval(engine,image_array0,par={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}) + pred = 1 - pred + time2=time.time() + + outstr='###---###' + binary0 = pred.copy() + time3 = time.time() + + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + #if len(contours)>0: + # max_id = get_largest_contours(contours) + # binary0[:,:] = 0 + # cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + time4 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time5 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time6 = time.time() + + print('image:%d,%s ,%d*%d, %s,,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,segInfoStr, get_ms(time4,time3),get_ms(time5,time4),get_ms(time5,time0) )) + + + return 'success' +def trt_version(): + return trt.__version__ +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) +def TrtForward(engine,inputs,contextFlag=False): + + t0=time.time() + #with engine.create_execution_context() as context: + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + input_names=['images'];output_names=['output'] + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + t1=time.time() + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + t2=time.time() + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + t3=time.time() + context.execute_v2(bindings) # 执行推理 + t4=time.time() + + + if len(outputs) == 1: + outputs = outputs[0] + outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 ) + return outputs[0],outstr + +def EngineInfer(par): + + modelSize=par['modelSize'];mean = par['mean'] ;std = par['std'] ;RGB_convert_first=par['RGB_convert_first'];device=par['device'] + weights=par['weights']; image_dir=par['image_dir'] + max_threads=par['max_threads'] + image_urls=glob.glob('%s/*'%(image_dir)) + out_dir =par['out_dir'] + + os.makedirs(out_dir,exist_ok=True) + + #trt_model = SegModel_STDC_trt(weights=weights,modelsize=modelSize,std=std,mean=mean,device=device) + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + print('#####load TRT file:',weights,'success #####') + + pars_thread=[] + pars_threads=[] + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + pars_thread=[engine,image_array0,out_dir,image_url,im] + pars_threads.append(pars_thread) + #EngineInfer_onePic_thread(pars_thread) + t1=time.time() + if max_threads==1: + for i in range(len(pars_threads[0:])): + EngineInfer_onePic_thread(pars_threads[i]) + else: + with ThreadPoolExecutor(max_workers=max_threads) as t: + for result in t.map(EngineInfer_onePic_thread, pars_threads): + tt=result + + t2=time.time() + print('All %d images time:%.1f ms, each:%.1f ms , with %d threads'%(len(image_urls),(t2-t1)*1000, (t2-t1)*1000.0/len(image_urls), max_threads) ) + + + +if __name__=='__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)') + opt = parser.parse_args() + print( opt.weights ) + #pthFile = Path('../../../yolov5TRT/weights/river/stdc_360X640.pth') + pthFile = Path(opt.weights) + onnxFile = pthFile.with_suffix('.onnx') + trtFile = onnxFile.with_suffix('.engine') + + nclass = 2; device=torch.device('cuda:0'); + + '''###BiSeNet + weights = '../weights/BiSeNet/checkpoint.pth';;inputShape =(1, 3, 512,512) + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + seg_model=segmodel.model + ''' + + ##STDC net + weights = pthFile + segmodel = SegModel_STDC(nclass=nclass,weights=weights);inputShape =(1, 3, 360,640)#(bs,channels,height,width) + seg_model=segmodel.model + + + + + par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True, + 'weights':trtFile,'device':device,'max_threads':1, + 'image_dir':'../../river_demo/images/road','out_dir' :'results'} + + + #infer_usage() + toONNX(seg_model,onnxFile,inputShape=inputShape,device=device) + ONNXtoTrt(onnxFile,trtFile) + #EngineInfer(par) + #ONNX_eval() + + + + + + + + diff --git a/obbUtils/pth2onnx.py b/obbUtils/pth2onnx.py new file mode 100644 index 0000000..047d604 --- /dev/null +++ b/obbUtils/pth2onnx.py @@ -0,0 +1,64 @@ + +import sys +#sys.path.extend(['..','../AIlib2' ]) +from ocrTrt import toONNX,ONNXtoTrt +from collections import OrderedDict +import torch +import argparse +from load_obb_model import load_model_decoder_OBB + + +def getModel(opt): + + + ###倾斜框(OBB)的ship目标检测 + par={ + 'model_size':(608,608), #width,height + 'K':100, #Maximum of objects' + 'conf_thresh':0.18,##Confidence threshold, 0.1 for general evaluation + 'device':"cuda:0", + + 'down_ratio':4,'num_classes':15, + 'weights':opt.weights, + 'dataset':'dota', + + 'test_dir': 'images/ship/', + 'result_dir': 'images/results', + 'half': False, + 'mean':(0.5, 0.5, 0.5), + 'std':(1, 1, 1), + 'category':['0','1','2','3','4','5','6','7','8','9','10','11','12','13','boat'], + 'model_size':(608,608),##width,height + 'decoder':None, + 'test_flag':True, + 'heads': {'hm': None,'wh': 10,'reg': 2,'cls_theta': 1}, + + + } + + ####加载模型 + model,decoder2=load_model_decoder_OBB(par) + par['decoder']=decoder2 + model = model.to(par['device']) + return model + + +if __name__=='__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='/mnt/thsw2/DSP2/weights/ship2/obb_608X608.pth', help='model path(s)') + parser.add_argument('--mWidth', type=int, default=608, help='segmodel mWdith') + parser.add_argument('--mHeight', type=int, default=608, help='segmodel mHeight') + opt = parser.parse_args() + + pthmodel = getModel(opt) + + ###转换TRT模型 + onnxFile=opt.weights.replace('.pth','.onnx') + trtFile=opt.weights.replace('.pth','.engine') + + print('#'*20, ' begin to toONNX') + toONNX(pthmodel,onnxFile,inputShape=(1,3,opt.mHeight, opt.mWidth),device='cuda:0') + print('#'*20, ' begin to TRT') + ONNXtoTrt(onnxFile,trtFile,half=False) + diff --git a/obbUtils/pth2onnx.sh b/obbUtils/pth2onnx.sh new file mode 100644 index 0000000..b7f40d6 --- /dev/null +++ b/obbUtils/pth2onnx.sh @@ -0,0 +1,5 @@ +gpu=2080Ti +weights=/mnt/thsw2/DSP2/weights/ship2/obb_608X608 +#python pth2onnx.py --weights ${weights}.pth --mWidth 608 --mHeight 608 +mv ${weights}.engine ${weights}_${gpu}_fp16.engine + diff --git a/obbUtils/shipUtils.py b/obbUtils/shipUtils.py new file mode 100644 index 0000000..4241f3f --- /dev/null +++ b/obbUtils/shipUtils.py @@ -0,0 +1,526 @@ +import torch +import numpy as np +import cv2 +import time +import os +import sys +sys.path.extend(['../AIlib2/obbUtils']) +import matplotlib.pyplot as plt +import func_utils +import time +import torchvision.transforms as transforms +from obbmodels import ctrbox_net +import decoder +import tensorrt as trt +import onnx +import onnxruntime as ort +sys.path.extend(['../AIlib2/utils']) +#sys.path.extend(['../AIlib2/utils']) +from plots import draw_painting_joint +from copy import deepcopy +from scipy import interpolate +def obbTohbb(obb): + obbarray=np.array(obb) + x0=np.min(obbarray[:,0]) + x1=np.max(obbarray[:,0]) + y0=np.min(obbarray[:,1]) + y1=np.max(obbarray[:,1]) + return [x0,y0,x1,y1] +def trt_version(): + return trt.__version__ + +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) +def segTrtForward(engine,inputs,contextFlag=False): + + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + #with engine.create_execution_context() as context: + #input_names=['images'];output_names=['output'] + + namess=[ engine.get_binding_name(index) for index in range(engine.num_bindings) ] + input_names = [namess[0]];output_names=namess[1:] + + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'batch_size:',batch_size , 'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + context.execute_v2(bindings) # 执行推理 + + + + if len(outputs) == 1: + outputs = outputs[0] + return outputs[0] + else: + return outputs +def apply_mask(image, mask, alpha=0.5): + """Apply the given mask to the image. + """ + color = np.random.rand(3) + for c in range(3): + image[:, :, c] = np.where(mask == 1, + image[:, :, c] * + (1 - alpha) + alpha * color[c] * 255, + image[:, :, c]) + return image + +if not os.path.exists('output'): + os.mkdir('output') +saveDir = 'output' +def get_ms(t2,t1): + return (t2-t1)*1000.0 + +def draw_painting_joint_2(box,img,label_array,score=0.5,color=None,font={ 'line_thickness':None,'boxLine_thickness':None, 'fontSize':None},socre_location="leftTop"): + + ###先把中文类别字体赋值到img中 + lh, lw, lc = label_array.shape + imh, imw, imc = img.shape + if socre_location=='leftTop': + x0 , y1 = box[0][0],box[0][1] + elif socre_location=='leftBottom': + x0,y1=box[3][0],box[3][1] + else: + print('plot.py line217 ,label_location:%s not implemented '%( socre_location )) + sys.exit(0) + + x1 , y0 = x0 + lw , y1 - lh + if y0<0:y0=0;y1=y0+lh + if y1>imh: y1=imh;y0=y1-lh + if x0<0:x0=0;x1=x0+lw + if x1>imw:x1=imw;x0=x1-lw + img[y0:y1,x0:x1,:] = label_array + pts_cls=[(x0,y0),(x1,y1) ] + + #把四边形的框画上 + box_tl= font['boxLine_thickness'] or round(0.002 * (imh + imw) / 2) + 1 + cv2.polylines(img, [box], True,color , box_tl) + + ####把英文字符score画到类别旁边 + tl = font['line_thickness'] or round(0.002*(imh+imw)/2)+1#line/font thickness + label = ' %.2f'%(score) + tf = max(tl , 1) # font thickness + fontScale = font['fontSize'] or tl * 0.33 + t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] + + + #if socre_location=='leftTop': + p1,p2= (pts_cls[1][0], pts_cls[0][1]),(pts_cls[1][0]+t_size[0],pts_cls[1][1]) + cv2.rectangle(img, p1 , p2, color, -1, cv2.LINE_AA) + p3 = pts_cls[1][0],pts_cls[1][1]-(lh-t_size[1])//2 + + cv2.putText(img, label,p3, 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + return img + +def OBB_infer(model,ori_image,par): + ''' + 输出:[img_origin,ori_image, out_box,9999],infos + img_origin---原图 + ori_image---画框图 + out_box---检测目标框 + ---格式如下[ [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score, cls ], [ [ (x0,y0),(x1,y1),(x2,y2),(x3,y3) ],score ,cls ],........ ],etc + ---[ [ [(1159, 297), [922, 615], [817, 591], [1054, 272]], 0.865605354309082,14], + [[(1330, 0), [1289, 58], [1228, 50], [1270, 0]], 0.3928087651729584,14] #2023.08.03,修改输出格式 + ] + 9999---无意义,备用 + ''' + + + + + t1 = time.time() + #ori_image = cv2.imread(impth+folders[i]) + + t2 = time.time() + img= cv2.resize(ori_image, (par['model_size'])) + img_origin = ori_image.copy() + t3 = time.time() + + transf2 = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=par['mean'], std=par['std'])]) + img_tensor = transf2(img) + img_tensor1=img_tensor.unsqueeze(0) #转成了需要的tensor格式,中心归一化及颜色通道匹配上 + t4=time.time() + #print('###line170: resize:%.1f ToTensor-Normal-Destd:%.1f '%(get_ms(t3,t2),get_ms(t4,t3) ), img_origin.shape,img_tensor1.size() ) + #img_tensor1= img_tensor1.to( par['device']) #布置到cuda上 + img_tensor1 = img_tensor1.cuda() + t5 =time.time() + img_tensor1 = img_tensor1.half() if par['half'] else img_tensor1 + + if par['saveType']=='trt': + preds= segTrtForward(model,[img_tensor1]) + preds=[x[0] for x in preds ] + pr_decs={} + heads=list(par['heads'].keys()) + pr_decs={ heads[i]: preds[i] for i in range(len(heads)) } + + elif par['saveType']=='pth': + with torch.no_grad(): # no Back propagation + pr_decs = model(img_tensor1) # 前向传播一部分 + + elif par['saveType']=='onnx': + img=img_tensor1.cpu().numpy().astype(np.float32) + preds = model['sess'].run(None, {model['input_name']: img}) + pr_decs={} + heads=list(par['heads'].keys()) + pr_decs={ heads[i]: torch.from_numpy(preds[i]) for i in range(len(heads)) } + + t6 = time.time() + category=par['labelnames'] + #torch.cuda.synchronize(par['device']) # 时间异步变同步 + decoded_pts = [] + decoded_scores = [] + + predictions = par['decoder'].ctdet_decode(pr_decs) # 解码 + t6_1=time.time() + pts0, scores0 = func_utils.decode_prediction(predictions, category,par['model_size'], par['down_ratio'],ori_image) # 改3 + + decoded_pts.append(pts0) + decoded_scores.append(scores0) + t7 = time.time() + + # nms + results = {cat: [] for cat in category} + # '''这里啊 + for cat in category: + if cat == 'background': + continue + pts_cat = [] + scores_cat = [] + for pts0, scores0 in zip(decoded_pts, decoded_scores): + pts_cat.extend(pts0[cat]) + scores_cat.extend(scores0[cat]) + pts_cat = np.asarray(pts_cat, np.float32) + scores_cat = np.asarray(scores_cat, np.float32) + if pts_cat.shape[0]: + nms_results = func_utils.non_maximum_suppression(pts_cat, scores_cat) + results[cat].extend(nms_results) + + + t8 = time.time() + height, width, _ = ori_image.shape + + + # nms + out_box=[] + for cat in category: + if cat == 'background': + continue + result = results[cat] + + for pred in result: + score = pred[-1] + cls = category.index(cat) + boxF=[ max(int(x),0) for x in pred[0:8]] + #box_out=[ cls,[ ( boxF[0], boxF[1]),([boxF[2], boxF[3]]), ([boxF[4], boxF[5]]), ([boxF[6], boxF[7]]) ],score] + box_out=[ [ ( boxF[0], boxF[1]),([boxF[2], boxF[3]]), ([boxF[4], boxF[5]]), ([boxF[6], boxF[7]]) ],score,cls] + ''' + if par['drawBox']: + tl = np.asarray([pred[0], pred[1]], np.float32) + tr = np.asarray([pred[2], pred[3]], np.float32) + br = np.asarray([pred[4], pred[5]], np.float32) + bl = np.asarray([pred[6], pred[7]], np.float32) + box = np.asarray([tl, tr, br, bl], np.int32) + bgColor=par['rainbows'][cls%len( par['rainbows'])] + label_array =par['label_array'][cls] + font=par['digitWordFont'] + label_location=font['label_location'] + ori_image=draw_painting_joint(box,ori_image,label_array,score=score,color=bgColor,font=font,socre_location=label_location) + ''' + out_box.append(box_out) + + t9 = time.time() + + t10 = time.time() + infos=' preProcess:%.1f ToGPU:%.1f infer:%.1f decoder:%.1f, corr_change:%.1f nms:%.1f postProcess:%.1f, total process:%.1f '%( get_ms(t4,t2), get_ms(t5,t4),get_ms(t6,t5),get_ms(t6_1,t6),get_ms(t7,t6_1),get_ms(t8,t7) ,get_ms(t9,t8) ,get_ms(t9,t2) ) + + #'preProcess:%.1f ToGPU:%.1f infer:%.1f decoder:%.1f, corr_change:%.1f nms:%.1f postProcess:%.1f, total process:%.1f '% + #( get_ms(t4,t2), get_ms(t5,t4),get_ms(t6,t5),get_ms(t6_1,t6),get_ms(t7,t6_1), get_ms(t8,t7) ,get_ms(t9,t8) , get_ms(t9,t2) ) + + if len(out_box) > 0: + ret_4pts = np.array([ x[0] for x in out_box ] ) + ret_4pts = rectangle_quadrangle_batch (ret_4pts) + cnt = len(out_box ) + for ii in range(cnt): + out_box[ii][0] = ret_4pts[ii] + + + return [img_origin,ori_image, out_box,9999],infos + +def draw_obb(preds,ori_image,par): + for pred in preds: + box = np.asarray(pred[0][0:4],np.int32) + cls = int(pred[2]);score = pred[1] + bgColor=par['rainbows'][cls%len( par['rainbows'])] + label_array =par['label_array'][cls] + font=par['digitWordFont'] + label_location=font['label_location'] + #print('###line285:',box,cls,score) + ori_image=draw_painting_joint(box,ori_image,label_array,score=score,color=bgColor,font=font,socre_location=label_location) + #cv2.imwrite( 'test.jpg',ori_image ) + return ori_image +def OBB_tracker(sort_tracker,hbbs,obbs,iframe): + #sort_tracker--跟踪器 + #hbbs--目标的水平框[x0,y0,x1,y1] + #obbs--目标的倾斜框box = np.asarray([tl, tr, br, bl], np.int32) + #返回值:sort_tracker,跟踪器 + dets_to_sort = np.empty((0,7), dtype=np.float32) + # NOTE: We send in detected object class too + for x1,y1,x2,y2,conf, detclass in hbbs: + #print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe]) + dets_to_sort = np.vstack((dets_to_sort, + np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) )) + + # Run SORT + tracked_dets = deepcopy(sort_tracker.update(dets_to_sort,obbs) ) + return tracked_dets +def rectangle_quadrangle(vectors): + ##输入的是四个点偏离中心点的向量,(M,4,2) + ##输出:vectors--修正后的向量(M,4,2) + # wh_thetas--矩形的向量 (M,1,3)[w,h,theta] + + distans = np.sqrt(np.sum(vectors**2,axis=2))#(M,4) + mean_dis = np.mean( distans,axis=1 ).reshape(-1,1) #(M,1) + mean_dis = np.tile(mean_dis,(1,4) ) #(M,4) + scale_factors = mean_dis/distans #(M,4) + scale_factors = np.expand_dims(scale_factors, axis=2 ) #(M,4,1) + scale_factors = np.tile(scale_factors, (1,1,2) ) #M(M,4,2) + vectors = vectors*scale_factors + vectors = vectors.astype(np.int32) + cnt = vectors.shape[0] + boxes = [ cv2.minAreaRect( vectors[i] ) for i in range(cnt) ] + wh_thetas = [[x[1][0],x[1][1],x[2] ] for x in boxes]#(M,3),[w,h,theta] + wh_thetas = np.array(wh_thetas)##(M,3) + return vectors,wh_thetas + +def adjust_pts_orders(vectors): + #输入一系列(M,4,2)点 + #输入原定框顺序的(M,4,2) + #前后两个四边形框一次判定,调整下一个四边形框内四个点的顺序,保证与上一个一致。 + cnt = vectors.shape[0] + if cnt<=1: return vectors + else: + out=[];out.append(vectors[0]) + for i in range(1,cnt): + pts1 = out[-1] + pts2 = vectors[i] + + diss,min_dis,min_index,pts2_adjust = pts_setDistance(pts1,pts2) + #if min_index!=0: print(min_index,pts1,pts2 ) + out.append(pts2_adjust) + out = np.array(out) + #if out[4,0,0]==53 and out[4,0,1]==10: + #print('#line339:',out.shape ,' ','in ', vectors.reshape(-1,8) , ' out :',out.reshape(-1,8)) + + return out +def pts_setDistance(pts1,pts2): + #输入是两个四边形的坐标(4,2),pts1保持不变,pts2逐个调整顺序,找到与pts2最匹配的四个点。 + #输出pts2 原始的距离,最匹配点的距离,最匹配的点的序号 + pts3=np.vstack((pts2,pts2)) + diss =[np.sum((pts1-pts3[i:i+4])**2) for i in range(4)] + min_dis = min(diss) + min_index = diss.index(min_dis) + return diss[0],min_dis,min_index,pts3[min_index:min_index+4] +def obbPointsConvert(obbs): + obbArray = np.array(obbs)#( M,4,2) + #计算中心点 + middlePts = np.mean( obbArray,axis=1 )##中心点(M,2) + middlePts = np.expand_dims(middlePts,axis=1)#(M,1,2) + #将中心点扩展成(M,4,2) + vectors = np.tile(middlePts,(1,4,1))#(M,4,2) + #计算偏移向量 + vectors = obbArray - vectors #(M,4,2) + + ##校正偏移向量 + vectors,wh_thetas=rectangle_quadrangle(vectors) #vectors--(M,4,2) + + ##校正每一个框内四个点的顺序 + vectors = adjust_pts_orders(vectors) # (M,4,2) + + #将中心点附在偏移向量后面 + vectors = np.concatenate( (vectors,middlePts),axis=1 )#(M,5,2), + #将数据拉平 + vectors = vectors.reshape(-1,10)#(M,10) + return vectors +def rectangle_quadrangle_batch(obbs): + ##输入出四边形的四个点(M,4,2) + ##输出是矩形话后的4个点(M,4,2) + + obbArray = np.array(obbs)#( M,4,2) + #计算中心点 + middlePts = np.mean( obbArray,axis=1 )##中心点(M,2) + middlePts = np.expand_dims(middlePts,axis=1)#(M,1,2) + #将中心点扩展成(M,4,2) + middlePts = np.tile(middlePts,(1,4,1))#(M,4,2) + #vectors = np.tile(middlePts,(1,4,1))#(M,4,2) + + #计算偏移向量 + vectors = obbArray - middlePts #(M,4,2) + + ##校正偏移向量 + vectors,wh_thetas=rectangle_quadrangle(vectors) #vectors--(M,4,2) + vectors = vectors + middlePts + + return vectors +def obbPointsConvert_reverse(vectors): + vectors = np.array(vectors)#(M,10) + _vectors = vectors[:,:8] #(M,8) + middlePts = vectors[:,8:10] #(M,2) + middlePts = np.tile( middlePts,(1,4) ) #(M,8) + _vectors += middlePts #(M,8) + return _vectors + +def OBB_tracker_batch(imgarray_list,iframe_list,modelPar,obbModelPar,sort_tracker,trackPar,segPar=None): + ''' + 输入: + imgarray_list--图像列表 + iframe_list -- 帧号列表 + modelPar--模型参数,字典,modelPar={'det_Model':,'seg_Model':} + obbModelpar--字典,存放检测相关参数,'half', 'device', 'conf_thres', 'iou_thres','trtFlag_det' + sort_tracker--对象,初始化的跟踪对象。为了保持一致,即使是单帧也要有。 + trackPar--跟踪参数,关键字包括:det_cnt,windowsize + segPar--None,分割模型相关参数。如果用不到,则为None + 输入:[imgarray_list,track_det_result,detResults ] , timeInfos + # timeInfos---时间信息 + # imgarray_list--图像列表 + # track_det_result--numpy 格式(M,14)--( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 , 11, 12, 13 ) + # (x0,y0,x1,y1,x2,y2,x3,y3,xc,yc,conf, detclass,iframe, trackId) + # detResults---给DSP的结果.每一帧是一个list,内部每一个框时一个list,格式为[ [(x0,y0),(x1,y1),(x2,y2),(x3,y3)],score,cls ] 2023.08.03,修改输出格式 + ''' + + det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize'] + trackers_dic={} + index_list = list(range( 0, len(iframe_list) ,det_cnt )); + if len(index_list)>1 and index_list[-1]!= iframe_list[-1]: + index_list.append( len(iframe_list) - 1 ) + #print('###line349:',index_list ,iframe_list) + if len(imgarray_list)==1: #如果是单帧图片,则不用跟踪 + ori_image_list,infos = OBB_infer(modelPar['obbmodel'],imgarray_list[0],obbModelPar) + + #print('##'*20,'line405:',np.array(ori_image_list[2]),ret_4pts ) + return ori_image_list,infos + else: + timeInfos_track='' + t1=time.time() + + for iframe_index, index_frame in enumerate(index_list): + ori_image_list,infos = OBB_infer(modelPar['obbmodel'],imgarray_list[index_frame],obbModelPar) + obbs = [x[0] for x in ori_image_list[2] ];hbbs = [] + + for i in range(len(ori_image_list[2])): + hbb=obbTohbb( ori_image_list[2][i][0] ); + box=[ *hbb, ori_image_list[2][i][1],ori_image_list[2][i][2]] + hbbs.append(box) + + tracked_dets = OBB_tracker(sort_tracker,hbbs,obbs,iframe_list[index_frame] ) + tracks =sort_tracker.getTrackers() + tt=[tracker.id for tracker in tracks] + for tracker in tracks: + trackers_dic[tracker.id]=deepcopy(tracker) + + t2=time.time() + + track_det_result = np.empty((0,14)) + ###( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 , 11, 12, 13 ) + ###(x0,y0,x1,y1,x2,y2,x3,y3,xc,yc,conf, detclass,iframe, trackId) + trackIdIndex=13;frameIndex=12 + #print('###line372:',list(trackers_dic.keys())) + + for trackId in trackers_dic.keys(): + tracker = trackers_dic[trackId] + obb_history = np.array(tracker.obb_history) + hbb_history = np.array(tracker.bbox_history) + #print('#'*20,obb_history.shape ) + if len(obb_history)<2: + #print('#'*20, trackId, ' trace Cnt:',len(obb_history)) + continue + #原来格式 np.asarray([tl, tr, br, bl], np.int32)--->中心点到tl, tr, br, bl的向量 + #print('###line381: 插值转换前 obb_history:',obb_history.shape, ' trackId:',trackId, ' \n' ,obb_history.reshape(-1,8) ) + + obb_history = obbPointsConvert(obb_history) #(M,10) + #print('###line381: 插值前 obb_history:',obb_history.shape , ' hbb_history[:,4:7]:',hbb_history[:,4:7].shape, ' trackId:',trackId,'\n',obb_history) + arrays_box = np.concatenate( (obb_history,hbb_history[:,4:7]),axis=1) + arrays_box = arrays_box.transpose();frames=hbb_history[:,6] + + #frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101 + #frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。 + + ##如果要最小化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集 + #inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ## + + ##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准 + inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1]) + new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 ) + #print('###line389:',trackId, inter_frame_min,inter_frame_max ,frames) + #print(' ##line396: 插值前:' ,arrays_box) + f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose() + move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)[tl, tr, br, bl] + + interpolation_x0s[:,0:8] = obbPointsConvert_reverse(interpolation_x0s[:,0:10] ) + #print('##line403: 插值转换后: ',interpolation_x0s.shape, inter_frame_min,inter_frame_max,frames, '\n',interpolation_x0s ) + #for im in range(10): + # interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use ) + + cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId + + interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) ) + track_det_result = np.vstack(( track_det_result, interpolation_x0s) ) + + + detResults=[] + for iiframe in iframe_list: + boxes_oneFrame = track_det_result[ track_det_result[:,frameIndex]==iiframe ] + ###( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 , 11, 12, 13 ) + ###(x0,y0,x1,y1,x2,y2,x3,y3,xc,yc,conf, detclass,iframe, trackId) + res = [ [ [(b[0],b[1]),(b[2],b[3]),(b[4],b[5]),(b[6],b[7])],b[10],b[11],b[12],b[13] ] + for b in boxes_oneFrame] + + detResults.append( res ) + + t3 = time.time() + timeInfos='%d frames,detect and track:%.1f ,interpolation:%.1f '%( len(index_list), get_ms(t2,t1),get_ms(t3,t2) ) + retResults=[imgarray_list,track_det_result,detResults ] + + + + return retResults, timeInfos + + + diff --git a/ocr.py b/ocr.py new file mode 100644 index 0000000..4d66593 --- /dev/null +++ b/ocr.py @@ -0,0 +1,201 @@ +import tensorrt as trt +import sys,os +import cv2,glob,time +import torch +import utils +import numpy as np +import torch.nn.functional as F +from ocrUtils2.ocrUtils import strLabelConverter , OcrTrtForward,np_resize_keepRation + +class ocrModel(object): + def __init__(self, weights=None, + par={ + #'cfg':'../AIlib2/weights/conf/OCR_Ch/360CC_config.yaml', + 'char_file':'../AIlib2/weights/conf/OCR_Ch/Ch.txt', + 'mode':'ch', + 'nc':3, + 'imgH':32, + 'imgW':256, + 'hidden':256, + 'mean':[0.5,0.5,0.5], + 'std':[0.5,0.5,0.5], + 'dynamic':False, + } + ): + + self.par = par + self.device = 'cuda:0' + self.half =True + self.dynamic = par['dynamic'] + self.par['modelSize'] = (par['imgW'], par['imgH']) + with open(par['char_file'], 'r') as fp: + alphabet = fp.read() + #self.converter = utils.strLabelConverter(alphabet) + self.converter = strLabelConverter(alphabet) + self.nclass = len(alphabet) + 1 + + + if weights.endswith('.engine'): + self.infer_type ='trt' + elif weights.endswith('.pth') or weights.endswith('.pt') : + self.infer_type ='pth' + else: + print('#########ERROR:',weights,': no registered inference type, exit') + sys.exit(0) + + if self.infer_type=='trt': + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + self.model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + #self.context = self.model.create_execution_context() + + elif self.infer_type=='pth': + if par['mode']=='ch': + import ocrUtils2.crnnCh as crnn + self.model = crnn.CRNN(par['nc'], par['hidden'], self.nclass, par['imgH']) + else: + import ocrUtils2.crnn_model as crnn + self.model = crnn.CRNN(par['imgH'], par['nc'], self.nclass,par['hidden'] ) + + self.load_model_weights(weights) + self.model = self.model.to(self.device) + + print('#######load pt model:%s success '%(weights)) + self.par['modelType']=self.infer_type + print('#########加载模型:',weights,' 类型:',self.infer_type) + def eval(self,image): + t0 = time.time() + image = self.preprocess_image(image) + t1 = time.time() + if self.infer_type=='pth': + self.model.eval() + preds = self.model(image) + else: + preds,trtstr=OcrTrtForward(self.model,[image],False) + + t2 = time.time() + preds_size = torch.IntTensor([preds.size(0)]*1) + preds = F.softmax(preds, dim=2) + preds_score, preds = preds.max(2) + #print('##line78:',preds,preds_score) + preds = preds.transpose(1, 0).contiguous().view(-1) + res_real = self.converter.decode(preds, preds_size, raw=False) + t3 = time.time() + timeInfos = 'total:%.1f (preProcess:%.1f ,inference:%.1f, postProcess:%.1f) '%( self.get_ms(t3,t0), self.get_ms(t1,t0), self.get_ms(t2,t1), self.get_ms(t3,t2), ) + + return res_real,timeInfos + + def preprocess_image(self,image): + + if self.par['nc']==1: + image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) + else: image = image[:,:,::-1] #bgr-->rgb + + + + + + if self.dynamic: + H,W = image.shape[0:2] + image = cv2.resize(image, (0, 0), fx=self.par['modelSize'][1] / H, fy=self.par['modelSize'][1] / H, interpolation=cv2.INTER_CUBIC) + else: + re_size = self.par['modelSize'] + image = cv2.resize(image,re_size, interpolation=cv2.INTER_LINEAR) + + if self.infer_type=='trt': + image = np_resize_keepRation(image,self.par['modelSize'][1] ,self.par['modelSize'][0] ) + + image = image.astype(np.float32) + image /= 255.0 + #print('####line105:',image.shape) + if self.par['nc']==1: + image = (image-self.par['mean'][0])/self.par['std'][0] + image = np.expand_dims(image,0) + else: + image[:, :, 0] -= self.par['mean'][0] + image[:, :, 1] -= self.par['mean'][1] + image[:, :, 2] -= self.par['mean'][2] + + image[:, :, 0] /= self.par['std'][0] + image[:, :, 1] /= self.par['std'][1] + image[:, :, 2] /= self.par['std'][2] + + image = np.transpose(image, (2, 0, 1)) + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + if self.device != 'cpu': + image = image.to(self.device) + + return image + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def load_model_weights(self,weight): + checkpoint = torch.load(weight) + if 'state_dict' in checkpoint.keys(): + self.model.load_state_dict(checkpoint['state_dict']) + else: + try: + self.model.load_state_dict(checkpoint) + except: + ##修正模型参数的名字 + state_dict = torch.load(weight) + # create new OrderedDict that does not contain `module.` + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + self.model.load_state_dict(new_state_dict) + +if __name__== "__main__": + + #weights = '/home/thsw2/WJ/src/OCR/benchmarking-chinese-text-recognition/weights/scene_base.pth' + weights = '/mnt/thsw2/DSP2/weights/ocr2/crnn_ch_2080Ti_fp16_192X32.engine' + par={ + #'cfg':'../AIlib2/weights/conf/OCR_Ch/360CC_config.yaml', + 'char_file':'/home/thsw2/WJ/src/OCR/benchmarking-chinese-text-recognition/src/models/CRNN/data/benchmark.txt', + 'mode':'ch', + 'nc':3, + 'imgH':32, + 'imgW':192, + 'hidden':256, + 'mean':[0.5,0.5,0.5], + 'std':[0.5,0.5,0.5], + 'dynamic':False + } + inputDir = '/home/thsw2/WJ/src/OCR/shipNames' + + ''' + weights = '/home/thsw2/WJ/src/DSP2/AIlib2/weights/conf/ocr2/crnn_448X32.pth' + #weights = '/mnt/thsw2/DSP2/weights/ocr2/crnn_en_2080Ti_fp16_448X32.engine' + par={ + #'cfg':'../AIlib2/weights/conf/OCR_Ch/360CC_config.yaml', + 'char_file':'/home/thsw2/WJ/src/DSP2/AIlib2/weights/conf/ocr2/chars2.txt', + 'mode':'en', + 'nc':1, + 'imgH':32, + 'imgW':448, + 'hidden':256, + 'mean':[0.588,0.588,0.588], + 'std':[0.193,0.193,0.193 ], + 'dynamic':True + } + inputDir='/home/thsw2/WJ/src/DSP2/AIdemo2/images/ocr_en' + ''' + + + model = ocrModel(weights=weights,par=par ) + + + imgUrls = glob.glob('%s/*.jpg'%(inputDir)) + + for imgUrl in imgUrls[0:]: + img = cv2.imread(imgUrl) + res_real,timeInfos = model.eval(img) + res_real="".join( list(filter(lambda x:(ord(x) >19968 and ord(x)<63865 ) or (ord(x) >47 and ord(x)<58 ),res_real))) + print(res_real,os.path.basename(imgUrl),timeInfos ) + + + diff --git a/ocrUtils/crnn_model/__init__.py b/ocrUtils/crnn_model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/ocrUtils/crnn_model/model.py b/ocrUtils/crnn_model/model.py new file mode 100644 index 0000000..ed3f3fb --- /dev/null +++ b/ocrUtils/crnn_model/model.py @@ -0,0 +1,35 @@ +import torch.nn as nn +from .modules import ResNet_FeatureExtractor, BidirectionalLSTM + +class Model(nn.Module): + + def __init__(self, input_channel, output_channel, hidden_size, num_class): + super(Model, self).__init__() + """ FeatureExtraction """ + self.FeatureExtraction = ResNet_FeatureExtractor(input_channel, output_channel) + self.FeatureExtraction_output = output_channel # int(imgH/16-1) * 512 + self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1 + + """ Sequence modeling""" + self.SequenceModeling = nn.Sequential( + BidirectionalLSTM(self.FeatureExtraction_output, hidden_size, hidden_size), + BidirectionalLSTM(hidden_size, hidden_size, hidden_size)) + self.SequenceModeling_output = hidden_size + + """ Prediction """ + self.Prediction = nn.Linear(self.SequenceModeling_output, num_class) + + + def forward(self, input, text): + """ Feature extraction stage """ + visual_feature = self.FeatureExtraction(input) + visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h] + visual_feature = visual_feature.squeeze(3) + + """ Sequence modeling stage """ + contextual_feature = self.SequenceModeling(visual_feature) + + """ Prediction stage """ + prediction = self.Prediction(contextual_feature.contiguous()) + + return prediction diff --git a/ocrUtils/crnn_model/modules.py b/ocrUtils/crnn_model/modules.py new file mode 100644 index 0000000..0850b0c --- /dev/null +++ b/ocrUtils/crnn_model/modules.py @@ -0,0 +1,264 @@ +import torch +import torch.nn as nn +import torch.nn.init as init +import torchvision +from torchvision import models +from collections import namedtuple +from packaging import version + + +def init_weights(modules): + for m in modules: + if isinstance(m, nn.Conv2d): + init.xavier_uniform_(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + +class vgg16_bn(torch.nn.Module): + def __init__(self, pretrained=True, freeze=True): + super(vgg16_bn, self).__init__() + if version.parse(torchvision.__version__) >= version.parse('0.13'): + vgg_pretrained_features = models.vgg16_bn( + weights=models.VGG16_BN_Weights.DEFAULT if pretrained else None + ).features + else: #torchvision.__version__ < 0.13 + models.vgg.model_urls['vgg16_bn'] = models.vgg.model_urls['vgg16_bn'].replace('https://', 'http://') + vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features + + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + for x in range(12): # conv2_2 + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 19): # conv3_3 + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(19, 29): # conv4_3 + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(29, 39): # conv5_3 + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + + # fc6, fc7 without atrous conv + self.slice5 = torch.nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6), + nn.Conv2d(1024, 1024, kernel_size=1) + ) + + if not pretrained: + init_weights(self.slice1.modules()) + init_weights(self.slice2.modules()) + init_weights(self.slice3.modules()) + init_weights(self.slice4.modules()) + + init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7 + + if freeze: + for param in self.slice1.parameters(): # only first conv + param.requires_grad= False + + def forward(self, X): + h = self.slice1(X) + h_relu2_2 = h + h = self.slice2(h) + h_relu3_2 = h + h = self.slice3(h) + h_relu4_3 = h + h = self.slice4(h) + h_relu5_3 = h + h = self.slice5(h) + h_fc7 = h + vgg_outputs = namedtuple("VggOutputs", ['fc7', 'relu5_3', 'relu4_3', 'relu3_2', 'relu2_2']) + out = vgg_outputs(h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2) + return out + +class BidirectionalLSTM(nn.Module): + + def __init__(self, input_size, hidden_size, output_size): + super(BidirectionalLSTM, self).__init__() + self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True) + self.linear = nn.Linear(hidden_size * 2, output_size) + + def forward(self, input): + """ + input : visual feature [batch_size x T x input_size] + output : contextual feature [batch_size x T x output_size] + """ + try: # multi gpu needs this + self.rnn.flatten_parameters() + except: # quantization doesn't work with this + pass + recurrent, _ = self.rnn(input) # batch_size x T x input_size -> batch_size x T x (2*hidden_size) + output = self.linear(recurrent) # batch_size x T x output_size + return output + +class VGG_FeatureExtractor(nn.Module): + + def __init__(self, input_channel, output_channel=256): + super(VGG_FeatureExtractor, self).__init__() + self.output_channel = [int(output_channel / 8), int(output_channel / 4), + int(output_channel / 2), output_channel] + self.ConvNet = nn.Sequential( + nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True), + nn.MaxPool2d(2, 2), + nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True), + nn.MaxPool2d(2, 2), + nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True), + nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True), + nn.MaxPool2d((2, 1), (2, 1)), + nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False), + nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), + nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False), + nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), + nn.MaxPool2d((2, 1), (2, 1)), + nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) + + def forward(self, input): + return self.ConvNet(input) + +class ResNet_FeatureExtractor(nn.Module): + """ FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) """ + + def __init__(self, input_channel, output_channel=512): + super(ResNet_FeatureExtractor, self).__init__() + self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3]) + + def forward(self, input): + return self.ConvNet(input) + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = self._conv3x3(inplanes, planes) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = self._conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def _conv3x3(self, in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + out += residual + out = self.relu(out) + + return out + +class ResNet(nn.Module): + + def __init__(self, input_channel, output_channel, block, layers): + super(ResNet, self).__init__() + + self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel] + + self.inplanes = int(output_channel / 8) + self.conv0_1 = nn.Conv2d(input_channel, int(output_channel / 16), + kernel_size=3, stride=1, padding=1, bias=False) + self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16)) + self.conv0_2 = nn.Conv2d(int(output_channel / 16), self.inplanes, + kernel_size=3, stride=1, padding=1, bias=False) + self.bn0_2 = nn.BatchNorm2d(self.inplanes) + self.relu = nn.ReLU(inplace=True) + + self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0]) + self.conv1 = nn.Conv2d(self.output_channel_block[0], self.output_channel_block[ + 0], kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(self.output_channel_block[0]) + + self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) + self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1) + self.conv2 = nn.Conv2d(self.output_channel_block[1], self.output_channel_block[ + 1], kernel_size=3, stride=1, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(self.output_channel_block[1]) + + self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1)) + self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1) + self.conv3 = nn.Conv2d(self.output_channel_block[2], self.output_channel_block[ + 2], kernel_size=3, stride=1, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(self.output_channel_block[2]) + + self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1) + self.conv4_1 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[ + 3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False) + self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3]) + self.conv4_2 = nn.Conv2d(self.output_channel_block[3], self.output_channel_block[ + 3], kernel_size=2, stride=1, padding=0, bias=False) + self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3]) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv0_1(x) + x = self.bn0_1(x) + x = self.relu(x) + x = self.conv0_2(x) + x = self.bn0_2(x) + x = self.relu(x) + + x = self.maxpool1(x) + x = self.layer1(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.maxpool2(x) + x = self.layer2(x) + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.maxpool3(x) + x = self.layer3(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.layer4(x) + x = self.conv4_1(x) + x = self.bn4_1(x) + x = self.relu(x) + x = self.conv4_2(x) + x = self.bn4_2(x) + x = self.relu(x) + + return x diff --git a/ocrUtils/crnn_model/vgg_model.py b/ocrUtils/crnn_model/vgg_model.py new file mode 100644 index 0000000..485b631 --- /dev/null +++ b/ocrUtils/crnn_model/vgg_model.py @@ -0,0 +1,44 @@ +import torch.nn as nn +from .modules import VGG_FeatureExtractor, BidirectionalLSTM + +class Model(nn.Module): + + def __init__(self, input_channel, output_channel, hidden_size, num_class,input_height=64): + super(Model, self).__init__() + """ FeatureExtraction """ + self.FeatureExtraction = VGG_FeatureExtractor(input_channel, output_channel) + self.FeatureExtraction_output = output_channel + + if input_height==64: + self.AdaptiveAvgPool = nn.AvgPool2d(kernel_size=(1, 3), stride=(1,1)) + elif input_height==32: + self.AdaptiveAvgPool = nn.AvgPool2d(kernel_size=(1, 1), stride=(1,1)) + else: + self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) + + """ Sequence modeling""" + self.SequenceModeling = nn.Sequential( + BidirectionalLSTM(self.FeatureExtraction_output, hidden_size, hidden_size), + BidirectionalLSTM(hidden_size, hidden_size, hidden_size)) + self.SequenceModeling_output = hidden_size + + """ Prediction """ + self.Prediction = nn.Linear(self.SequenceModeling_output, num_class) + + + def forward(self, input, text): + """ Feature extraction stage """ + #print('####vgg_model.py line27:',input.size(), 'input[0,0,0:2,0:2] :',input[0,0,0:2,0:2]) + visual_feature = self.FeatureExtraction(input) + #print('###line26:',visual_feature.size() ) + visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) + #print('###line29:',visual_feature.size()) + visual_feature = visual_feature.squeeze(3) + + """ Sequence modeling stage """ + contextual_feature = self.SequenceModeling(visual_feature) + + """ Prediction stage """ + prediction = self.Prediction(contextual_feature.contiguous()) + #print('###line39 vgg_model:',prediction.size()) + return prediction diff --git a/ocrUtils/ocrTrt.py b/ocrUtils/ocrTrt.py new file mode 100644 index 0000000..25babc5 --- /dev/null +++ b/ocrUtils/ocrTrt.py @@ -0,0 +1,448 @@ +import torch +import argparse +import sys,os + +from torchvision import transforms +import cv2,glob +import numpy as np +import matplotlib.pyplot as plt +import time +from pathlib import Path +from concurrent.futures import ThreadPoolExecutor +import tensorrt as trt + +#import pycuda.driver as cuda + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +def infer_usage(): + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + #weights = '../weights/BiSeNet/checkpoint.pth' + #segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + + weights = '../weights/BiSeNet/checkpoint_640X360_epo33.pth' + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights,modelsize=(640,360)) + + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + out_dir ='results/'; + os.makedirs(out_dir,exist_ok=True) + for im,image_url in enumerate(image_urls[0:]): + #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + H,W,C = image_array0.shape + time_1=time.time() + pred,outstr = segmodel.eval(image_array0 ) + + #plt.figure(1);plt.imshow(pred); + #plt.show() + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0')): + print('####begin to export to onnx') + import onnx + + im = torch.rand(inputShape).to(device) + seg_model.eval() + text_for_pred = torch.LongTensor(1, 90).fill_(0).to(device) + + + out=seg_model(im,text_for_pred) + print('###test model infer example####') + train=False + dynamic = False + opset=11 + torch.onnx.export(seg_model, (im,text_for_pred),onnxFile, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + #torch.onnx.export(model, (dummy_input, dummy_text), "vitstr.onnx", verbose=True) + + + print('output onnx file:',onnxFile) +def ONNXtoTrt(onnxFile,trtFile,half=True): + import tensorrt as trt + #onnx = Path('../weights/BiSeNet/checkpoint.onnx') + #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx') + time0=time.time() + #half=True; + verbose=True;workspace=4;prefix=colorstr('TensorRT:') + #f = onnx.with_suffix('.engine') # TensorRT engine file + f=trtFile + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnxFile)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + print(f'{prefix} Network Description:') + for inp in inputs: + print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + print(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + time1=time.time() + print('output trtfile from ONNX, time:%.4f s, half: ,'%(time1-time0),trtFile,half) +def ONNX_eval(): + import onnx + import numpy as np + import onnxruntime as ort + import cv2 + + #model_path = '../weights/BiSeNet/checkpoint.onnx';modelSize=(512,512);mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) + model_path = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx';modelSize=(640,360);mean = (0.485, 0.456, 0.406);std = (0.229, 0.224, 0.225) + # 验证模型合法性 + onnx_model = onnx.load(model_path) + onnx.checker.check_model(onnx_model) + # 读入图像并调整为输入维度 + img = cv2.imread("../../river_demo/images/slope/菜地_20220713_青年河8_4335_1578.jpg") + H,W,C=img.shape + img = cv2.resize(img,modelSize).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + # 设置模型session以及输入信息 + sess = ort.InferenceSession(model_path,providers= ort.get_available_providers()) + print('len():',len( sess.get_inputs() )) + input_name1 = sess.get_inputs()[0].name + #input_name2 = sess.get_inputs()[1].name + #input_name3 = sess.get_inputs()[2].name + + #output = sess.run(None, {input_name1: img, input_name2: img, input_name3: img}) + output = sess.run(None, {input_name1: img}) + pred = np.argmax(output[0], axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + #plt.imshow(pred);plt.show() + print( 'type:',type(output) , output[0].shape, output[0].dtype ) + + #weights = Path('../weights/BiSeNet/checkpoint.engine') + + half = False;device = 'cuda:0' + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + #image_urls=glob.glob('../../river_demo/images/slope/*') + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + #out_dir ='../../river_demo/images/results/' + out_dir ='results' + os.makedirs(out_dir,exist_ok=True) + + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + #img=segPreProcess_image(image_array0).to(device) + img=segPreProcess_image(image_array0,modelSize=modelSize,mean=mean,std=std,numpy=True) + + #img = cv2.resize(img,(512,512)).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + + + H,W,C = image_array0.shape + time_1=time.time() + #pred,outstr = segmodel.eval(image_array0 ) + + + output = sess.run(None, {input_name1: img}) + pred =output[0] + + + + #pred = model(img, augment=False, visualize=False) + + #pred = pred.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + + outstr='###---###' + + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + print('outimage:',out_url) + + + +def EngineInfer_onePic_thread(pars_thread): + + + + + engine,image_array0,out_dir,image_url,im = pars_thread[0:6] + + + H,W,C = image_array0.shape + time0=time.time() + + time1=time.time() + # 运行模型 + + + pred,segInfoStr=segtrtEval(engine,image_array0,par={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}) + pred = 1 - pred + time2=time.time() + + outstr='###---###' + binary0 = pred.copy() + time3 = time.time() + + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + #if len(contours)>0: + # max_id = get_largest_contours(contours) + # binary0[:,:] = 0 + # cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + time4 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time5 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time6 = time.time() + + print('image:%d,%s ,%d*%d, %s,,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,segInfoStr, get_ms(time4,time3),get_ms(time5,time4),get_ms(time5,time0) )) + + + return 'success' +def trt_version(): + return trt.__version__ +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) +def TrtForward(engine,inputs,contextFlag=False): + + t0=time.time() + #with engine.create_execution_context() as context: + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + input_names=['images'];output_names=['output'] + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + t1=time.time() + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + t2=time.time() + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + t3=time.time() + context.execute_v2(bindings) # 执行推理 + t4=time.time() + + + if len(outputs) == 1: + outputs = outputs[0] + outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 ) + return outputs[0],outstr + +def EngineInfer(par): + + modelSize=par['modelSize'];mean = par['mean'] ;std = par['std'] ;RGB_convert_first=par['RGB_convert_first'];device=par['device'] + weights=par['weights']; image_dir=par['image_dir'] + max_threads=par['max_threads'] + image_urls=glob.glob('%s/*'%(image_dir)) + out_dir =par['out_dir'] + + os.makedirs(out_dir,exist_ok=True) + + #trt_model = SegModel_STDC_trt(weights=weights,modelsize=modelSize,std=std,mean=mean,device=device) + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + print('#####load TRT file:',weights,'success #####') + + pars_thread=[] + pars_threads=[] + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + pars_thread=[engine,image_array0,out_dir,image_url,im] + pars_threads.append(pars_thread) + #EngineInfer_onePic_thread(pars_thread) + t1=time.time() + if max_threads==1: + for i in range(len(pars_threads[0:])): + EngineInfer_onePic_thread(pars_threads[i]) + else: + with ThreadPoolExecutor(max_workers=max_threads) as t: + for result in t.map(EngineInfer_onePic_thread, pars_threads): + tt=result + + t2=time.time() + print('All %d images time:%.1f ms, each:%.1f ms , with %d threads'%(len(image_urls),(t2-t1)*1000, (t2-t1)*1000.0/len(image_urls), max_threads) ) + + + +if __name__=='__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)') + opt = parser.parse_args() + print( opt.weights ) + #pthFile = Path('../../../yolov5TRT/weights/river/stdc_360X640.pth') + pthFile = Path(opt.weights) + onnxFile = pthFile.with_suffix('.onnx') + trtFile = onnxFile.with_suffix('.engine') + + nclass = 2; device=torch.device('cuda:0'); + + '''###BiSeNet + weights = '../weights/BiSeNet/checkpoint.pth';;inputShape =(1, 3, 512,512) + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + seg_model=segmodel.model + ''' + + ##STDC net + weights = pthFile + segmodel = SegModel_STDC(nclass=nclass,weights=weights);inputShape =(1, 3, 360,640)#(bs,channels,height,width) + seg_model=segmodel.model + + + + + par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True, + 'weights':trtFile,'device':device,'max_threads':1, + 'image_dir':'../../river_demo/images/road','out_dir' :'results'} + + + #infer_usage() + toONNX(seg_model,onnxFile,inputShape=inputShape,device=device) + ONNXtoTrt(onnxFile,trtFile) + #EngineInfer(par) + #ONNX_eval() + + + + + + + + diff --git a/ocrUtils/ocrUtils.py b/ocrUtils/ocrUtils.py new file mode 100644 index 0000000..e6bec40 --- /dev/null +++ b/ocrUtils/ocrUtils.py @@ -0,0 +1,192 @@ +import torch +import numpy as np +import torchvision.transforms as transforms +import math +from PIL import Image +def custom_mean(x): + return x.prod()**(2.0/np.sqrt(len(x))) + +def contrast_grey(img): + high = np.percentile(img, 90) + low = np.percentile(img, 10) + return (high-low)/np.maximum(10, high+low), high, low + +def adjust_contrast_grey(img, target = 0.4): + contrast, high, low = contrast_grey(img) + if contrast < target: + img = img.astype(int) + ratio = 200./np.maximum(10, high-low) + img = (img - low + 25)*ratio + img = np.maximum(np.full(img.shape, 0) ,np.minimum(np.full(img.shape, 255), img)).astype(np.uint8) + return img + +class NormalizePAD(object): + + def __init__(self, max_size, PAD_type='right'): + self.toTensor = transforms.ToTensor() + self.max_size = max_size + self.max_width_half = math.floor(max_size[2] / 2) + self.PAD_type = PAD_type + + def __call__(self, img): + img = self.toTensor(img) + img.sub_(0.5).div_(0.5) + c, h, w = img.size() + Pad_img = torch.FloatTensor(*self.max_size).fill_(0) + Pad_img[:, :, :w] = img # right pad + if self.max_size[2] != w: # add border Pad + Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w) + + return Pad_img + +class AlignCollate(object): + + def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False, adjust_contrast = 0.): + self.imgH = imgH + self.imgW = imgW + self.keep_ratio_with_pad = keep_ratio_with_pad + self.adjust_contrast = adjust_contrast + + def __call__(self, batch): + #print('##recongnition.py line72: type(batch[0]):',type(batch[0]),batch[0], ) + batch = filter(lambda x: x is not None, batch) + images = batch + + resized_max_w = self.imgW + input_channel = 1 + transform = NormalizePAD((input_channel, self.imgH, resized_max_w)) + + resized_images = [] + for image in images: + w, h = image.size + #### augmentation here - change contrast + if self.adjust_contrast > 0: + image = np.array(image.convert("L")) + image = adjust_contrast_grey(image, target = self.adjust_contrast) + image = Image.fromarray(image, 'L') + + ratio = w / float(h) + if math.ceil(self.imgH * ratio) > self.imgW: + resized_w = self.imgW + else: + resized_w = math.ceil(self.imgH * ratio) + + resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC) + resized_images.append(transform(resized_image)) + + image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0) + return image_tensors + + +class CTCLabelConverter(object): + """ Convert between text-label and text-index """ + + def __init__(self, character, separator_list = {}, dict_pathlist = {}): + # character (str): set of the possible characters. + dict_character = list(character) + + self.dict = {} + for i, char in enumerate(dict_character): + self.dict[char] = i + 1 + + self.character = ['[blank]'] + dict_character # dummy '[blank]' token for CTCLoss (index 0) + + self.separator_list = separator_list + separator_char = [] + for lang, sep in separator_list.items(): + separator_char += sep + self.ignore_idx = [0] + [i+1 for i,item in enumerate(separator_char)] + + ####### latin dict + if len(separator_list) == 0: + dict_list = [] + for lang, dict_path in dict_pathlist.items(): + try: + with open(dict_path, "r", encoding = "utf-8-sig") as input_file: + word_count = input_file.read().splitlines() + dict_list += word_count + except: + pass + else: + dict_list = {} + for lang, dict_path in dict_pathlist.items(): + with open(dict_path, "r", encoding = "utf-8-sig") as input_file: + word_count = input_file.read().splitlines() + dict_list[lang] = word_count + + self.dict_list = dict_list + + def encode(self, text, batch_max_length=25): + """convert text-label into text-index. + input: + text: text labels of each image. [batch_size] + + output: + text: concatenated text index for CTCLoss. + [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)] + length: length of each text. [batch_size] + """ + length = [len(s) for s in text] + text = ''.join(text) + text = [self.dict[char] for char in text] + + return (torch.IntTensor(text), torch.IntTensor(length)) + + def decode_greedy(self, text_index, length): + """ convert text-index into text-label. """ + texts = [] + index = 0 + for l in length: + t = text_index[index:index + l] + # Returns a boolean array where true is when the value is not repeated + a = np.insert(~((t[1:]==t[:-1])),0,True) + # Returns a boolean array where true is when the value is not in the ignore_idx list + b = ~np.isin(t,np.array(self.ignore_idx)) + # Combine the two boolean array + c = a & b + # Gets the corresponding character according to the saved indexes + text = ''.join(np.array(self.character)[t[c.nonzero()]]) + texts.append(text) + index += l + return texts + + def decode_beamsearch(self, mat, beamWidth=5): + texts = [] + for i in range(mat.shape[0]): + t = ctcBeamSearch(mat[i], self.character, self.ignore_idx, None, beamWidth=beamWidth) + texts.append(t) + return texts + + def decode_wordbeamsearch(self, mat, beamWidth=5): + texts = [] + argmax = np.argmax(mat, axis = 2) + + for i in range(mat.shape[0]): + string = '' + # without separators - use space as separator + if len(self.separator_list) == 0: + space_idx = self.dict[' '] + + data = np.argwhere(argmax[i]!=space_idx).flatten() + group = np.split(data, np.where(np.diff(data) != 1)[0]+1) + group = [ list(item) for item in group if len(item)>0] + + for j, list_idx in enumerate(group): + matrix = mat[i, list_idx,:] + t = ctcBeamSearch(matrix, self.character, self.ignore_idx, None,\ + beamWidth=beamWidth, dict_list=self.dict_list) + if j == 0: string += t + else: string += ' '+t + + # with separators + else: + words = word_segmentation(argmax[i]) + + for word in words: + matrix = mat[i, word[1][0]:word[1][1]+1,:] + if word[0] == '': dict_list = [] + else: dict_list = self.dict_list[word[0]] + t = ctcBeamSearch(matrix, self.character, self.ignore_idx, None, beamWidth=beamWidth, dict_list=dict_list) + string += t + texts.append(string) + return texts \ No newline at end of file diff --git a/ocrUtils/pth2onnx.py b/ocrUtils/pth2onnx.py new file mode 100644 index 0000000..4891740 --- /dev/null +++ b/ocrUtils/pth2onnx.py @@ -0,0 +1,48 @@ +import crnn_model.vgg_model as vgg +import sys +from ocrTrt import toONNX,ONNXtoTrt +from collections import OrderedDict +import torch +import argparse +def crnnModel(opt): + input_height=opt.mHeight + input_width=opt.mWidth + ##生成识别模型 + device='cuda:0' + model_path = opt.weights + recog_network, network_params = 'generation2', {'input_channel': 1, 'output_channel': 256, 'hidden_size': 256,'input_height':input_height} + num_class= 97 + model = vgg.Model(num_class=num_class, **network_params) + ##修正模型参数的名字 + state_dict = torch.load(model_path,map_location=device) + + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + model.load_state_dict(new_state_dict) + model = model.to(device) + model.load_state_dict(new_state_dict) + return model + + +if __name__=='__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='english_g2.onnx', help='model path(s)') + parser.add_argument('--mWidth', type=int, default=640, help='segmodel mWdith') + parser.add_argument('--mHeight', type=int, default=360, help='segmodel mHeight') + opt = parser.parse_args() + + pthmodel = crnnModel(opt) + + ###转换TRT模型 + onnxFile=opt.weights.replace('.pth','_%dX%d.onnx'%(opt.mWidth,opt.mHeight)) + trtFile=opt.weights.replace('.pth','_%dX%d.engine'%(opt.mWidth,opt.mHeight)) + + print('#'*20, ' begin to toONNX') + toONNX(pthmodel,onnxFile,inputShape=(1,1,opt.mHeight, opt.mWidth),device='cuda:0') + print('#'*20, ' begin to TRT') + ONNXtoTrt(onnxFile,trtFile,half=False) + diff --git a/ocrUtils/run.sh b/ocrUtils/run.sh new file mode 100644 index 0000000..56f46ae --- /dev/null +++ b/ocrUtils/run.sh @@ -0,0 +1,4 @@ +gpu=2080Ti mWidth=448 mHeight=32 +pth=/mnt/thsw2/DSP2/weights/ocr_en/english_g2 +python pth2onnx.py --weights ${pth}.pth --mWidth ${mWidth} --mHeight ${mHeight} +mv ${pth}_${mWidth}X${mHeight}.engine $pth_${gpu}_fp16_${mWidth}X${mHeight}.engine diff --git a/ocrUtils2/chars.txt b/ocrUtils2/chars.txt new file mode 100644 index 0000000..4ee10a3 --- /dev/null +++ b/ocrUtils2/chars.txt @@ -0,0 +1,92 @@ +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +A +B +C +D +E +F +G +H +I +J +K +L +M +N +O +P +Q +R +S +T +U +V +W +X +Y +Z +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +° +! +" +# +$ +% +& +' +( +) +* ++ +, +- +. +/ +: +; +? +@ +[ +\ +] +^ +_ +` +{ +| +} +~ diff --git a/ocrUtils2/crnnCh.py b/ocrUtils2/crnnCh.py new file mode 100644 index 0000000..10d656f --- /dev/null +++ b/ocrUtils2/crnnCh.py @@ -0,0 +1,86 @@ +import torch.nn as nn +import torch + +class BiLSTM(nn.Module): + def __init__(self, nIn, nHidden, nOut): + super(BiLSTM, self).__init__() + self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True) + self.embedding = nn.Linear(nHidden*2, nOut) + + def forward(self, input): + if not hasattr(self, '_flattened'): + self.rnn.flatten_parameters() + setattr(self, '_flattened', True) + + rnnOut, _ = self.rnn(input) + T, b, c = rnnOut.size() + rnnOut = rnnOut.view(T*b, c) + + output = self.embedding(rnnOut) + output = output.view(T, b, -1) + + return output + + +class CRNN(nn.Module): + def __init__(self, nc, nh, nclass, height, LeakyRelu=False): + super(CRNN, self).__init__() + + kernal_size = [3, 3, 3, 3, 3, 3, 3] + padding_size = [1, 1, 1, 1, 1, 1, 1] + stride_size = [1, 1, 1, 1, 1, 1, 1] + channels = [64, 128, 256, 256, 512, 512, 512] + + cnn = nn.Sequential() + + def convRelu(i, BatchNormalize=False): + if i == 0: + nIn = nc + else: + nIn = channels[i-1] + nOut = channels[i] + cnn.add_module('conv{0}'.format(i), + nn.Conv2d(nIn, nOut, kernal_size[i], stride_size[i], padding_size[i])) + if BatchNormalize: + cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut)) + if LeakyRelu: + cnn.add_module('relu{0}'.format(i), nn.LeakyReLU(0.2, inplace=True)) + else: + cnn.add_module('relu{0}'.format(i), nn.ReLU(True)) + convRelu(0) + cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d((2, 2), (1, 2), (1, 0))) + convRelu(1) + cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d((2, 2), (1, 2), (1, 0))) + convRelu(2, True) + convRelu(3) + cnn.add_module('pooling{0}'.format(2), + nn.MaxPool2d((2,2), (2,1), (0,1))) + convRelu(4, True) + convRelu(5) + cnn.add_module('pooling{0}'.format(3), + nn.MaxPool2d((2,2), (2,1), (0,1))) + convRelu(6, True) + + self.cnn = cnn + + self.avg_pooling = nn.AvgPool2d(kernel_size=(height//4, 1), stride=(height//4, 1)) + + self.rnn = nn.Sequential( + BiLSTM(512, nh, nh), + BiLSTM(nh, nh, nclass) + ) + + + def forward(self, input): + conv = self.cnn(input) + conv = self.avg_pooling(conv) + conv = conv.squeeze(2) + conv = conv.permute(2, 0, 1) + output = self.rnn(conv) + return output + +if __name__=="__main__": + img = torch.randn(60, 3, 64, 100).cuda(1) + crnn = CRNN(3,256, 36, 64).cuda(1) + res = crnn(img) + print(res.size()) diff --git a/ocrUtils2/crnn_model.py b/ocrUtils2/crnn_model.py new file mode 100644 index 0000000..fbe70c7 --- /dev/null +++ b/ocrUtils2/crnn_model.py @@ -0,0 +1,125 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +class BidirectionalLSTM(nn.Module): + # Inputs hidden units Out + def __init__(self, nIn, nHidden, nOut): + super(BidirectionalLSTM, self).__init__() + + self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True) + self.embedding = nn.Linear(nHidden * 2, nOut) + + def forward(self, input): + recurrent, _ = self.rnn(input) + T, b, h = recurrent.size() + t_rec = recurrent.view(T * b, h) + + output = self.embedding(t_rec) # [T * b, nOut] + output = output.view(T, b, -1) + + return output + +class CRNN(nn.Module): + def __init__(self, imgH, nc, nclass, nh, n_rnn=2, leakyRelu=False): + super(CRNN, self).__init__() + + assert imgH % 16 == 0, 'imgH has to be a multiple of 16' + + ks = [3, 3, 3, 3, 3, 3, 2] + ps = [1, 1, 1, 1, 1, 1, 0] + ss = [1, 1, 1, 1, 1, 1, 1] + nm = [64, 128, 256, 256, 512, 512, 512] + + cnn = nn.Sequential() + + def convRelu(i, batchNormalization=False): + nIn = nc if i == 0 else nm[i - 1] + nOut = nm[i] + cnn.add_module('conv{0}'.format(i), + nn.Conv2d(nIn, nOut, ks[i], ss[i], ps[i])) + if batchNormalization: + cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut)) + if leakyRelu: + cnn.add_module('relu{0}'.format(i), + nn.LeakyReLU(0.2, inplace=True)) + else: + cnn.add_module('relu{0}'.format(i), nn.ReLU(True)) + + convRelu(0) + cnn.add_module('pooling{0}'.format(0), nn.MaxPool2d(2, 2)) # 64x16x64 + convRelu(1) + cnn.add_module('pooling{0}'.format(1), nn.MaxPool2d(2, 2)) # 128x8x32 + convRelu(2, True) + convRelu(3) + cnn.add_module('pooling{0}'.format(2), + nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 256x4x16 + convRelu(4, True) + convRelu(5) + cnn.add_module('pooling{0}'.format(3), + nn.MaxPool2d((2, 2), (2, 1), (0, 1))) # 512x2x16 + convRelu(6, True) # 512x1x16 + + self.cnn = cnn + self.rnn = nn.Sequential( + BidirectionalLSTM(512, nh, nh), + BidirectionalLSTM(nh, nh, nclass)) + + def forward(self, input): + + # conv features + conv = self.cnn(input) + b, c, h, w = conv.size() + #print(conv.size()) + assert h == 1, "the height of conv must be 1" + conv = conv.squeeze(2) # b *512 * width + conv = conv.permute(2, 0, 1) # [w, b, c] + output = F.log_softmax(self.rnn(conv), dim=2) + + return output + +def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + m.weight.data.normal_(0.0, 0.02) + elif classname.find('BatchNorm') != -1: + m.weight.data.normal_(1.0, 0.02) + m.bias.data.fill_(0) + +def load_model_weights(model,weight): + + checkpoint = torch.load(weight) + if 'state_dict' in checkpoint.keys(): + model.load_state_dict(checkpoint['state_dict']) + else: + try: + model.load_state_dict(checkpoint) + except: + ##修正模型参数的名字 + state_dict = torch.load(weight) + # create new OrderedDict that does not contain `module.` + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + model.load_state_dict(new_state_dict) + +def get_crnn(config,weights=None): + + model = CRNN(config.MODEL.IMAGE_SIZE.H, 1, config.MODEL.NUM_CLASSES + 1, config.MODEL.NUM_HIDDEN) + + if weights: + load_model_weights(model,weights) + ''' + checkpoint = torch.load(weights) + if 'state_dict' in checkpoint.keys(): + model.load_state_dict(checkpoint['state_dict']) + else: + model.load_state_dict(checkpoint) + ''' + else: + model.apply(weights_init) + + return model diff --git a/ocrUtils2/ocrTrt.py b/ocrUtils2/ocrTrt.py new file mode 100644 index 0000000..e4feeef --- /dev/null +++ b/ocrUtils2/ocrTrt.py @@ -0,0 +1,452 @@ +import torch +import argparse +import sys,os + +from torchvision import transforms +import cv2,glob +import numpy as np +import matplotlib.pyplot as plt +import time +from pathlib import Path +from concurrent.futures import ThreadPoolExecutor +import tensorrt as trt + +#import pycuda.driver as cuda + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +def infer_usage(): + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + #weights = '../weights/BiSeNet/checkpoint.pth' + #segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + + weights = '../weights/BiSeNet/checkpoint_640X360_epo33.pth' + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights,modelsize=(640,360)) + + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + out_dir ='results/'; + os.makedirs(out_dir,exist_ok=True) + for im,image_url in enumerate(image_urls[0:]): + #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + H,W,C = image_array0.shape + time_1=time.time() + pred,outstr = segmodel.eval(image_array0 ) + + #plt.figure(1);plt.imshow(pred); + #plt.show() + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0')): + print('####begin to export to onnx') + import onnx + + im = torch.rand(inputShape).to(device) + seg_model.eval() + text_for_pred = torch.LongTensor(1, 90).fill_(0).to(device) + + try: + out=seg_model(im,text_for_pred) + input2=(im,text_for_pred) + except Exception as e: + out = seg_model(im) + input2=(im) + print('###test model infer example####') + train=False + dynamic = False + opset=11 + torch.onnx.export(seg_model, input2 ,onnxFile, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + #torch.onnx.export(model, (dummy_input, dummy_text), "vitstr.onnx", verbose=True) + + + print('output onnx file:',onnxFile) +def ONNXtoTrt(onnxFile,trtFile,half=True): + import tensorrt as trt + #onnx = Path('../weights/BiSeNet/checkpoint.onnx') + #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx') + time0=time.time() + #half=True; + verbose=True;workspace=4;prefix=colorstr('TensorRT:') + #f = onnx.with_suffix('.engine') # TensorRT engine file + f=trtFile + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnxFile)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + print(f'{prefix} Network Description:') + for inp in inputs: + print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + print(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + time1=time.time() + print('output trtfile from ONNX, time:%.4f s, half: ,'%(time1-time0),trtFile,half) +def ONNX_eval(): + import onnx + import numpy as np + import onnxruntime as ort + import cv2 + + #model_path = '../weights/BiSeNet/checkpoint.onnx';modelSize=(512,512);mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) + model_path = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx';modelSize=(640,360);mean = (0.485, 0.456, 0.406);std = (0.229, 0.224, 0.225) + # 验证模型合法性 + onnx_model = onnx.load(model_path) + onnx.checker.check_model(onnx_model) + # 读入图像并调整为输入维度 + img = cv2.imread("../../river_demo/images/slope/菜地_20220713_青年河8_4335_1578.jpg") + H,W,C=img.shape + img = cv2.resize(img,modelSize).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + # 设置模型session以及输入信息 + sess = ort.InferenceSession(model_path,providers= ort.get_available_providers()) + print('len():',len( sess.get_inputs() )) + input_name1 = sess.get_inputs()[0].name + #input_name2 = sess.get_inputs()[1].name + #input_name3 = sess.get_inputs()[2].name + + #output = sess.run(None, {input_name1: img, input_name2: img, input_name3: img}) + output = sess.run(None, {input_name1: img}) + pred = np.argmax(output[0], axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + #plt.imshow(pred);plt.show() + print( 'type:',type(output) , output[0].shape, output[0].dtype ) + + #weights = Path('../weights/BiSeNet/checkpoint.engine') + + half = False;device = 'cuda:0' + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + #image_urls=glob.glob('../../river_demo/images/slope/*') + image_urls=glob.glob('../../../../data/无人机起飞测试图像/*') + #out_dir ='../../river_demo/images/results/' + out_dir ='results' + os.makedirs(out_dir,exist_ok=True) + + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + #img=segPreProcess_image(image_array0).to(device) + img=segPreProcess_image(image_array0,modelSize=modelSize,mean=mean,std=std,numpy=True) + + #img = cv2.resize(img,(512,512)).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + + + H,W,C = image_array0.shape + time_1=time.time() + #pred,outstr = segmodel.eval(image_array0 ) + + + output = sess.run(None, {input_name1: img}) + pred =output[0] + + + + #pred = model(img, augment=False, visualize=False) + + #pred = pred.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + + outstr='###---###' + + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + print('outimage:',out_url) + + + +def EngineInfer_onePic_thread(pars_thread): + + + + + engine,image_array0,out_dir,image_url,im = pars_thread[0:6] + + + H,W,C = image_array0.shape + time0=time.time() + + time1=time.time() + # 运行模型 + + + pred,segInfoStr=segtrtEval(engine,image_array0,par={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}) + pred = 1 - pred + time2=time.time() + + outstr='###---###' + binary0 = pred.copy() + time3 = time.time() + + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + #if len(contours)>0: + # max_id = get_largest_contours(contours) + # binary0[:,:] = 0 + # cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + time4 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time5 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time6 = time.time() + + print('image:%d,%s ,%d*%d, %s,,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,segInfoStr, get_ms(time4,time3),get_ms(time5,time4),get_ms(time5,time0) )) + + + return 'success' +def trt_version(): + return trt.__version__ +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) +def TrtForward(engine,inputs,contextFlag=False): + + t0=time.time() + #with engine.create_execution_context() as context: + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + input_names=['images'];output_names=['output'] + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + t1=time.time() + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + t2=time.time() + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + t3=time.time() + context.execute_v2(bindings) # 执行推理 + t4=time.time() + + + if len(outputs) == 1: + outputs = outputs[0] + outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 ) + return outputs[0],outstr + +def EngineInfer(par): + + modelSize=par['modelSize'];mean = par['mean'] ;std = par['std'] ;RGB_convert_first=par['RGB_convert_first'];device=par['device'] + weights=par['weights']; image_dir=par['image_dir'] + max_threads=par['max_threads'] + image_urls=glob.glob('%s/*'%(image_dir)) + out_dir =par['out_dir'] + + os.makedirs(out_dir,exist_ok=True) + + #trt_model = SegModel_STDC_trt(weights=weights,modelsize=modelSize,std=std,mean=mean,device=device) + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + print('#####load TRT file:',weights,'success #####') + + pars_thread=[] + pars_threads=[] + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + pars_thread=[engine,image_array0,out_dir,image_url,im] + pars_threads.append(pars_thread) + #EngineInfer_onePic_thread(pars_thread) + t1=time.time() + if max_threads==1: + for i in range(len(pars_threads[0:])): + EngineInfer_onePic_thread(pars_threads[i]) + else: + with ThreadPoolExecutor(max_workers=max_threads) as t: + for result in t.map(EngineInfer_onePic_thread, pars_threads): + tt=result + + t2=time.time() + print('All %d images time:%.1f ms, each:%.1f ms , with %d threads'%(len(image_urls),(t2-t1)*1000, (t2-t1)*1000.0/len(image_urls), max_threads) ) + + + +if __name__=='__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)') + opt = parser.parse_args() + print( opt.weights ) + #pthFile = Path('../../../yolov5TRT/weights/river/stdc_360X640.pth') + pthFile = Path(opt.weights) + onnxFile = pthFile.with_suffix('.onnx') + trtFile = onnxFile.with_suffix('.engine') + + nclass = 2; device=torch.device('cuda:0'); + + '''###BiSeNet + weights = '../weights/BiSeNet/checkpoint.pth';;inputShape =(1, 3, 512,512) + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + seg_model=segmodel.model + ''' + + ##STDC net + weights = pthFile + segmodel = SegModel_STDC(nclass=nclass,weights=weights);inputShape =(1, 3, 360,640)#(bs,channels,height,width) + seg_model=segmodel.model + + + + + par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True, + 'weights':trtFile,'device':device,'max_threads':1, + 'image_dir':'../../river_demo/images/road','out_dir' :'results'} + + + #infer_usage() + toONNX(seg_model,onnxFile,inputShape=inputShape,device=device) + ONNXtoTrt(onnxFile,trtFile) + #EngineInfer(par) + #ONNX_eval() + + + + + + + + diff --git a/ocrUtils2/ocrUtils.py b/ocrUtils2/ocrUtils.py new file mode 100644 index 0000000..45f0778 --- /dev/null +++ b/ocrUtils2/ocrUtils.py @@ -0,0 +1,314 @@ +import torch +import numpy as np +import torchvision.transforms as transforms +import math, yaml +from easydict import EasyDict as edict +from PIL import Image +import cv2 +from torch.autograd import Variable +import time +import tensorrt as trt +def trt_version(): + return trt.__version__ + +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) + +def OcrTrtForward(engine,inputs,contextFlag=False): + + t0=time.time() + #with engine.create_execution_context() as context: + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + namess=[ engine.get_tensor_name(index) for index in range(engine.num_bindings) ] + input_names = [namess[0]];output_names=namess[1:] + + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + t1=time.time() + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + t2=time.time() + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + t3=time.time() + context.execute_v2(bindings) # 执行推理 + t4=time.time() + + + if len(outputs) == 1: + outputs = outputs[0] + outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 ) + return outputs[0],outstr + +def np_resize_keepRation(img,inp_h, inp_w): + #print(img.shape,inp_h,inp_w) + img_h, img_w = img.shape[0:2] + + + fy=inp_h/img_h + keep_w = int(img_w* fy ) + Rsize=( keep_w , img_h) + img = cv2.resize(img, Rsize ) + #resize后是120,max是160,120-160的地方用边界的值填充 + if keep_w < inp_w: + if len(img.shape)==3: + img_out = np.zeros((inp_h, inp_w,3 ),dtype=np.uint8) + img_out[:,:keep_w]=img[:,:] + for j in range(3): + img_out[:,keep_w:,j] = np.tile(img[:,keep_w-1:,j], inp_w-keep_w) + else: + img_out = np.zeros((inp_h, inp_w ),dtype=np.uint8) + img_out[:,:keep_w]=img[:,:] + + img_out[:,keep_w:] = np.tile(img[:,keep_w-1:], inp_w-keep_w) + else: + img_out = cv2.resize(img,(inp_w,inp_h)) + return img_out + +def recognition_ocr(config, img, model, converter, device,par={}): + model_mode=par['model_mode'];contextFlag=par['contextFlag'] + if len(img.shape)==3: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + # github issues: https://github.com/Sierkinhane/CRNN_Chinese_Characters_Rec/issues/211 + h, w = img.shape + # fisrt step: resize the height and width of image to (32, x) + + img = cv2.resize(img, (0, 0), fx=config.MODEL.IMAGE_SIZE.H / h, fy=config.MODEL.IMAGE_SIZE.H / h, interpolation=cv2.INTER_CUBIC) + if model_mode=='trt': + img = np_resize_keepRation(img,par['imgH'], par['imgW']) + img = np.expand_dims(img,axis=2) + + + # normalize + img = img.astype(np.float32) + img = (img / 255. - config.DATASET.MEAN) / config.DATASET.STD + img = img.transpose([2, 0, 1]) + img = torch.from_numpy(img) + + img = img.to(device) + img = img.view(1, *img.size()) + + + if model_mode=='trt': + img_input = img.to('cuda:0') + time2 = time.time() + preds,trtstr=OcrTrtForward(model,[img],contextFlag) + else: + model.eval() + preds = model(img) + _, preds = preds.max(2) + preds = preds.transpose(1, 0).contiguous().view(-1) + + preds_size = Variable(torch.IntTensor([preds.size(0)])) + sim_pred = converter.decode(preds.data, preds_size.data, raw=False) + return sim_pred +class strLabelConverter(object): + """Convert between str and label. + + NOTE: + Insert `blank` to the alphabet for CTC. + + Args: + alphabet (str): set of the possible characters. + ignore_case (bool, default=True): whether or not to ignore all of the case. + """ + + def __init__(self, alphabet, ignore_case=False): + self._ignore_case = ignore_case + if self._ignore_case: + alphabet = alphabet.lower() + self.alphabet = alphabet + '-' # for `-1` index + + self.dict = {} + for i, char in enumerate(alphabet): + # NOTE: 0 is reserved for 'blank' required by wrap_ctc + self.dict[char] = i + 1 + + def encode(self, text): + """Support batch or single str. + + Args: + text (str or list of str): texts to convert. + + Returns: + torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts. + torch.IntTensor [n]: length of each text. + """ + + length = [] + result = [] + decode_flag = True if type(text[0])==bytes else False + + for item in text: + + if decode_flag: + item = item.decode('utf-8','strict') + length.append(len(item)) + for char in item: + index = self.dict[char] + result.append(index) + text = result + return (torch.IntTensor(text), torch.IntTensor(length)) + + def decode(self, t, length, raw=False): + """Decode encoded texts back into strs. + + Args: + torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts. + torch.IntTensor [n]: length of each text. + + Raises: + AssertionError: when the texts and its length does not match. + + Returns: + text (str or list of str): texts to convert. + """ + if length.numel() == 1: + length = length[0] + assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length) + if raw: + return ''.join([self.alphabet[i - 1] for i in t]) + else: + char_list = [] + for i in range(length): + if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): + char_list.append(self.alphabet[t[i] - 1]) + return ''.join(char_list) + else: + # batch mode + assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum()) + texts = [] + index = 0 + for i in range(length.numel()): + l = length[i] + texts.append( + self.decode( + t[index:index + l], torch.IntTensor([l]), raw=raw)) + index += l + return texts + + +def get_alphabets(txtfile ): + print(txtfile) + with open(txtfile,'r') as fp: + lines=fp.readlines() + alphas=[x.strip() for x in lines] + return "".join(alphas) +def get_cfg(cfg,char_file): + with open(cfg, 'r') as f: + #config = yaml.load(f) + config = yaml.load(f, Loader=yaml.FullLoader) + config = edict(config) + config.DATASET.ALPHABETS = get_alphabets(char_file.strip() ) + config.MODEL.NUM_CLASSES = len(config.DATASET.ALPHABETS) + return config +def custom_mean(x): + return x.prod()**(2.0/np.sqrt(len(x))) + +def contrast_grey(img): + high = np.percentile(img, 90) + low = np.percentile(img, 10) + return (high-low)/np.maximum(10, high+low), high, low + +def adjust_contrast_grey(img, target = 0.4): + contrast, high, low = contrast_grey(img) + if contrast < target: + img = img.astype(int) + ratio = 200./np.maximum(10, high-low) + img = (img - low + 25)*ratio + img = np.maximum(np.full(img.shape, 0) ,np.minimum(np.full(img.shape, 255), img)).astype(np.uint8) + return img + +class NormalizePAD(object): + + def __init__(self, max_size, PAD_type='right'): + self.toTensor = transforms.ToTensor() + self.max_size = max_size + self.max_width_half = math.floor(max_size[2] / 2) + self.PAD_type = PAD_type + + def __call__(self, img): + img = self.toTensor(img) + img.sub_(0.5).div_(0.5) + c, h, w = img.size() + Pad_img = torch.FloatTensor(*self.max_size).fill_(0) + Pad_img[:, :, :w] = img # right pad + if self.max_size[2] != w: # add border Pad + Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w) + + return Pad_img + +class AlignCollate(object): + + def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False, adjust_contrast = 0.): + self.imgH = imgH + self.imgW = imgW + self.keep_ratio_with_pad = keep_ratio_with_pad + self.adjust_contrast = adjust_contrast + + def __call__(self, batch): + #print('##recongnition.py line72: type(batch[0]):',type(batch[0]),batch[0], ) + batch = filter(lambda x: x is not None, batch) + images = batch + + resized_max_w = self.imgW + input_channel = 1 + transform = NormalizePAD((input_channel, self.imgH, resized_max_w)) + + resized_images = [] + for image in images: + w, h = image.size + #### augmentation here - change contrast + if self.adjust_contrast > 0: + image = np.array(image.convert("L")) + image = adjust_contrast_grey(image, target = self.adjust_contrast) + image = Image.fromarray(image, 'L') + + ratio = w / float(h) + if math.ceil(self.imgH * ratio) > self.imgW: + resized_w = self.imgW + else: + resized_w = math.ceil(self.imgH * ratio) + + resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC) + resized_images.append(transform(resized_image)) + + image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0) + return image_tensors + + diff --git a/ocrUtils2/pth2onnx.py b/ocrUtils2/pth2onnx.py new file mode 100644 index 0000000..bf8a25f --- /dev/null +++ b/ocrUtils2/pth2onnx.py @@ -0,0 +1,72 @@ +#import crnn_model.vgg_model as vgg + +import sys +from ocrTrt import toONNX,ONNXtoTrt +from collections import OrderedDict +import torch +import argparse +def crnnModel(opt): + input_height=opt.mHeight + input_width=opt.mWidth + mode=opt.mode.strip() + ##生成识别模型 + device='cuda:0' + model_path = opt.weights + + if mode=='en': + import crnn_model + model = crnn_model.CRNN(32, 1, 93, 256 ) + else: + import crnnCh as crnn + model = crnn.CRNN(3, 256, 7935, 32) + + + print('####line24:',mode) + checkpoint = torch.load(model_path) + if 'state_dict' in checkpoint.keys(): + model.load_state_dict(checkpoint['state_dict']) + else: + try: + model.load_state_dict(checkpoint) + except: + ##修正模型参数的名字 + state_dict = torch.load(model_path) + # create new OrderedDict that does not contain `module.` + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + name = k[7:] # remove `module.` + new_state_dict[name] = v + # load params + model.load_state_dict(new_state_dict) + + + model = model.to(device) + + + return model + + +if __name__=='__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='english_g2.onnx', help='model path(s)') + parser.add_argument('--mWidth', type=int, default=640, help='segmodel mWdith') + parser.add_argument('--mHeight', type=int, default=360, help='segmodel mHeight') + parser.add_argument('--mode', type=str, default='en', help='segmodel mHeight') + + opt = parser.parse_args() + + pthmodel = crnnModel(opt) + + ###转换TRT模型 + onnxFile=opt.weights.replace('.pth','_%dX%d.onnx'%(opt.mWidth,opt.mHeight)) + trtFile=opt.weights.replace('.pth','_%dX%d.engine'%(opt.mWidth,opt.mHeight)) + + print('#'*20, ' begin to toONNX') + if opt.mode=='en':inputShape=(1,1,opt.mHeight, opt.mWidth) + else: inputShape=(1,3,opt.mHeight, opt.mWidth) + toONNX(pthmodel,onnxFile,inputShape=inputShape,device='cuda:0') + print('#'*20, ' begin to TRT') + ONNXtoTrt(onnxFile,trtFile,half=False) + diff --git a/ocrUtils2/run.sh b/ocrUtils2/run.sh new file mode 100644 index 0000000..9038a0d --- /dev/null +++ b/ocrUtils2/run.sh @@ -0,0 +1,7 @@ +#gpu=2080Ti mWidth=448 mHeight=32 mode=en pth=/mnt/thsw2/DSP2/weights/ocr2/crnn_en + +gpu=2080Ti mWidth=192 mHeight=32 mode=ch pth=/mnt/thsw2/DSP2/weights/ocr2/crnn_ch +python pth2onnx.py --weights ${pth}.pth --mode ${mode} --mWidth ${mWidth} --mHeight ${mHeight} + + +mv ${pth}_${mWidth}X${mHeight}.engine ${pth}_${gpu}_fp16_${mWidth}X${mHeight}.engine diff --git a/p2pNet.py b/p2pNet.py new file mode 100644 index 0000000..6a58b4f --- /dev/null +++ b/p2pNet.py @@ -0,0 +1,44 @@ +import os +import torch +import time +import cv2 +from PIL import Image +import torchvision.transforms as standard_transforms +from p2pnetUtils.p2pnet import build +from loguru import logger + +class p2NnetModel(object): + def __init__(self, weights=None, par={}): + + self.par = par + self.device = torch.device(par['device']) + assert os.path.exists(weights), "%s not exists" + self.model = build(par) + self.model.to(self.device) + checkpoint = torch.load(weights, map_location=self.device) + self.model.load_state_dict(checkpoint['model']) + self.model.eval() + self.transform = standard_transforms.Compose([ + standard_transforms.ToTensor(), + standard_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ]) + + def eval(self, image): + t0 = time.time() + img_raw = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + img_raw = Image.fromarray(img_raw) + width, height = img_raw.size + new_width = width // 128 * 128 + new_height = height // 128 * 128 + img_raw = img_raw.resize((new_width, new_height), Image.ANTIALIAS) + img = self.transform(img_raw) + samples = torch.Tensor(img).unsqueeze(0) + samples = samples.to(self.device) + + preds = self.model(samples) + t3 = time.time() + timeOut = 'p2pnet :%.1f (pre-process:%.1f, ) ' % (self.get_ms(t3, t0), self.get_ms(t3, t0)) + return preds + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 \ No newline at end of file diff --git a/p2pnetUtils/__init__.py b/p2pnetUtils/__init__.py new file mode 100644 index 0000000..ba2b88c --- /dev/null +++ b/p2pnetUtils/__init__.py @@ -0,0 +1,8 @@ +from .p2pnet import build + +# build the P2PNet model +# set training to 'True' during training + + +def build_model(args, training=False): + return build(args, training) diff --git a/p2pnetUtils/backbone.py b/p2pnetUtils/backbone.py new file mode 100644 index 0000000..eb5bf9c --- /dev/null +++ b/p2pnetUtils/backbone.py @@ -0,0 +1,69 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backbone modules. +""" +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn + +import p2pnetUtils.vgg_ as models + +class BackboneBase_VGG(nn.Module): + def __init__(self, backbone: nn.Module, num_channels: int, name: str, return_interm_layers: bool): + super().__init__() + features = list(backbone.features.children()) + if return_interm_layers: + if name == 'vgg16_bn': + self.body1 = nn.Sequential(*features[:13]) + self.body2 = nn.Sequential(*features[13:23]) + self.body3 = nn.Sequential(*features[23:33]) + self.body4 = nn.Sequential(*features[33:43]) + else: + self.body1 = nn.Sequential(*features[:9]) + self.body2 = nn.Sequential(*features[9:16]) + self.body3 = nn.Sequential(*features[16:23]) + self.body4 = nn.Sequential(*features[23:30]) + else: + if name == 'vgg16_bn': + self.body = nn.Sequential(*features[:44]) # 16x down-sample + elif name == 'vgg16': + self.body = nn.Sequential(*features[:30]) # 16x down-sample + self.num_channels = num_channels + self.return_interm_layers = return_interm_layers + + def forward(self, tensor_list): + out = [] + + if self.return_interm_layers: + xs = tensor_list + for _, layer in enumerate([self.body1, self.body2, self.body3, self.body4]): + xs = layer(xs) + out.append(xs) + + else: + xs = self.body(tensor_list) + out.append(xs) + return out + + +class Backbone_VGG(BackboneBase_VGG): + """ResNet backbone with frozen BatchNorm.""" + def __init__(self, name: str, return_interm_layers: bool): + if name == 'vgg16_bn': + backbone = models.vgg16_bn(pretrained=True) + elif name == 'vgg16': + backbone = models.vgg16(pretrained=True) + num_channels = 256 + super().__init__(backbone, num_channels, name, return_interm_layers) + + +def build_backbone(args): + backbone = Backbone_VGG(args['backbone'], True) + return backbone + + +if __name__ == '__main__': + Backbone_VGG('vgg16', True) diff --git a/p2pnetUtils/matcher.py b/p2pnetUtils/matcher.py new file mode 100644 index 0000000..7358854 --- /dev/null +++ b/p2pnetUtils/matcher.py @@ -0,0 +1,83 @@ + +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Mostly copy-paste from DETR (https://github.com/facebookresearch/detr). +""" +import torch +from scipy.optimize import linear_sum_assignment +from torch import nn + + +class HungarianMatcher_Crowd(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1, cost_point: float = 1): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the foreground object + cost_point: This is the relative weight of the L1 error of the points coordinates in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_point = cost_point + assert cost_class != 0 or cost_point != 0, "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, targets): + """ Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "points": Tensor of dim [batch_size, num_queries, 2] with the predicted point coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_points] (where num_target_points is the number of ground-truth + objects in the target) containing the class labels + "points": Tensor of dim [num_target_points, 2] containing the target point coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_points) + """ + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] + out_points = outputs["pred_points"].flatten(0, 1) # [batch_size * num_queries, 2] + + # Also concat the target labels and points + # tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_points = torch.cat([v["point"] for v in targets]) + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -out_prob[:, tgt_ids] + + # Compute the L2 cost between point + cost_point = torch.cdist(out_points, tgt_points, p=2) + + # Compute the giou cost between point + + # Final cost matrix + C = self.cost_point * cost_point + self.cost_class * cost_class + C = C.view(bs, num_queries, -1).cpu() + + sizes = [len(v["point"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +def build_matcher_crowd(args): + return HungarianMatcher_Crowd(cost_class=args['set_cost_class'], cost_point=args['set_cost_point']) diff --git a/p2pnetUtils/misc.py b/p2pnetUtils/misc.py new file mode 100644 index 0000000..7cfe7d7 --- /dev/null +++ b/p2pnetUtils/misc.py @@ -0,0 +1,518 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from typing import Optional, List + +import torch +import torch.distributed as dist +from torch import Tensor + +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +# if float(torchvision.__version__[:3]) < 0.7: +# from torchvision.ops import _new_empty_tensor +# from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + +def collate_fn_crowd(batch): + # re-organize the batch + batch_new = [] + for b in batch: + imgs, points = b + if imgs.ndim == 3: + imgs = imgs.unsqueeze(0) + for i in range(len(imgs)): + batch_new.append((imgs[i, :, :, :], points[i])) + batch = batch_new + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + +def _max_by_axis_pad(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + + block = 128 + + for i in range(2): + maxes[i+1] = ((maxes[i+1] - 1) // block + 1) * block + return maxes + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + + # TODO make it support different-sized images + max_size = _max_by_axis_pad([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + for img, pad_img in zip(tensor_list, tensor): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + else: + raise ValueError('not supported') + return tensor + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if float(torchvision.__version__[:3]) < 0.7: + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) + + +class FocalLoss(nn.Module): + r""" + This criterion is a implemenation of Focal Loss, which is proposed in + Focal Loss for Dense Object Detection. + + Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class]) + + The losses are averaged across observations for each minibatch. + + Args: + alpha(1D Tensor, Variable) : the scalar factor for this criterion + gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5), + putting more focus on hard, misclassified examples + size_average(bool): By default, the losses are averaged over observations for each minibatch. + However, if the field size_average is set to False, the losses are + instead summed for each minibatch. + + + """ + def __init__(self, class_num, alpha=None, gamma=2, size_average=True): + super(FocalLoss, self).__init__() + if alpha is None: + self.alpha = Variable(torch.ones(class_num, 1)) + else: + if isinstance(alpha, Variable): + self.alpha = alpha + else: + self.alpha = Variable(alpha) + self.gamma = gamma + self.class_num = class_num + self.size_average = size_average + + def forward(self, inputs, targets): + N = inputs.size(0) + C = inputs.size(1) + P = F.softmax(inputs) + + class_mask = inputs.data.new(N, C).fill_(0) + class_mask = Variable(class_mask) + ids = targets.view(-1, 1) + class_mask.scatter_(1, ids.data, 1.) + + if inputs.is_cuda and not self.alpha.is_cuda: + self.alpha = self.alpha.cuda() + alpha = self.alpha[ids.data.view(-1)] + + probs = (P*class_mask).sum(1).view(-1,1) + + log_p = probs.log() + batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p + + if self.size_average: + loss = batch_loss.mean() + else: + loss = batch_loss.sum() + return loss \ No newline at end of file diff --git a/p2pnetUtils/p2pnet.py b/p2pnetUtils/p2pnet.py new file mode 100644 index 0000000..00b8e19 --- /dev/null +++ b/p2pnetUtils/p2pnet.py @@ -0,0 +1,354 @@ +import os +import torch +import torch.nn.functional as F +from torch import nn + +from .misc import (NestedTensor, nested_tensor_from_tensor_list, + accuracy, get_world_size, interpolate, + is_dist_avail_and_initialized) + +from .backbone import build_backbone +from .matcher import build_matcher_crowd + +import numpy as np +import time + +# the network frmawork of the regression branch +class RegressionModel(nn.Module): + def __init__(self, num_features_in, num_anchor_points=4, feature_size=256): + super(RegressionModel, self).__init__() + + self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) + self.act1 = nn.ReLU() + + self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act2 = nn.ReLU() + + self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act3 = nn.ReLU() + + self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act4 = nn.ReLU() + + self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1) + + # sub-branch forward + def forward(self, x): + out = self.conv1(x) + out = self.act1(out) + + out = self.conv2(out) + out = self.act2(out) + + out = self.output(out) + + out = out.permute(0, 2, 3, 1) + + return out.contiguous().view(out.shape[0], -1, 2) + + +# the network frmawork of the classification branch +class ClassificationModel(nn.Module): + def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256): + super(ClassificationModel, self).__init__() + + self.num_classes = num_classes + self.num_anchor_points = num_anchor_points + + self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) + self.act1 = nn.ReLU() + + self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act2 = nn.ReLU() + + self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act3 = nn.ReLU() + + self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) + self.act4 = nn.ReLU() + + self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1) + self.output_act = nn.Sigmoid() + + # sub-branch forward + def forward(self, x): + out = self.conv1(x) + out = self.act1(out) + + out = self.conv2(out) + out = self.act2(out) + + out = self.output(out) + + out1 = out.permute(0, 2, 3, 1) + + batch_size, width, height, _ = out1.shape + + out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes) + + return out2.contiguous().view(x.shape[0], -1, self.num_classes) + + +# generate the reference points in grid layout +def generate_anchor_points(stride=16, row=3, line=3): + row_step = stride / row + line_step = stride / line + + shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2 + shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2 + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + anchor_points = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + return anchor_points + + +# shift the meta-anchor to get an acnhor points +def shift(shape, stride, anchor_points): + shift_x = (np.arange(0, shape[1]) + 0.5) * stride + shift_y = (np.arange(0, shape[0]) + 0.5) * stride + + shift_x, shift_y = np.meshgrid(shift_x, shift_y) + + shifts = np.vstack(( + shift_x.ravel(), shift_y.ravel() + )).transpose() + + A = anchor_points.shape[0] + K = shifts.shape[0] + all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2))) + all_anchor_points = all_anchor_points.reshape((K * A, 2)) + + return all_anchor_points + + +# this class generate all reference points on all pyramid levels +class AnchorPoints(nn.Module): + def __init__(self, pyramid_levels=None, strides=None, row=3, line=3): + super(AnchorPoints, self).__init__() + + if pyramid_levels is None: + self.pyramid_levels = [3, 4, 5, 6, 7] + else: + self.pyramid_levels = pyramid_levels + + if strides is None: + self.strides = [2 ** x for x in self.pyramid_levels] + + self.row = row + self.line = line + + def forward(self, image): + image_shape = image.shape[2:] + image_shape = np.array(image_shape) + image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels] + + all_anchor_points = np.zeros((0, 2)).astype(np.float32) + # get reference points for each level + for idx, p in enumerate(self.pyramid_levels): + anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line) + shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points) + all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0) + + all_anchor_points = np.expand_dims(all_anchor_points, axis=0) + # send reference points to device + if torch.cuda.is_available(): + return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda() + else: + return torch.from_numpy(all_anchor_points.astype(np.float32)) + + +class Decoder(nn.Module): + def __init__(self, C3_size, C4_size, C5_size, feature_size=256): + super(Decoder, self).__init__() + + # upsample C5 to get P5 from the FPN paper + self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + # add P5 elementwise to C4 + self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + # add P4 elementwise to C3 + self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0) + self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest') + self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1) + + def forward(self, inputs): + C3, C4, C5 = inputs + + P5_x = self.P5_1(C5) + P5_upsampled_x = self.P5_upsampled(P5_x) + P5_x = self.P5_2(P5_x) + + P4_x = self.P4_1(C4) + P4_x = P5_upsampled_x + P4_x + P4_upsampled_x = self.P4_upsampled(P4_x) + P4_x = self.P4_2(P4_x) + + P3_x = self.P3_1(C3) + P3_x = P3_x + P4_upsampled_x + P3_x = self.P3_2(P3_x) + + return [P3_x, P4_x, P5_x] + + +# the defenition of the P2PNet model +class P2PNet(nn.Module): + def __init__(self, backbone, row=2, line=2): + super().__init__() + self.backbone = backbone + self.num_classes = 2 + # the number of all anchor points + num_anchor_points = row * line + + self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points) + self.classification = ClassificationModel(num_features_in=256, \ + num_classes=self.num_classes, \ + num_anchor_points=num_anchor_points) + + self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line) + + self.fpn = Decoder(256, 512, 512) + + def forward(self, samples: NestedTensor): + # get the backbone features + features = self.backbone(samples) + # forward the feature pyramid + features_fpn = self.fpn([features[1], features[2], features[3]]) + + batch_size = features[0].shape[0] + # print("line227", batch_size) + # run the regression and classification branch + regression = self.regression(features_fpn[1]) * 100 # 8x + classification = self.classification(features_fpn[1]) + anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1) + # decode the points as prediction + output_coord = regression + anchor_points + output_class = classification + out = {'pred_logits': output_class, 'pred_points': output_coord} + + return out + + +class SetCriterion_Crowd(nn.Module): + + def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): + """ Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[0] = self.eos_coef + self.register_buffer('empty_weight', empty_weight) + + def loss_labels(self, outputs, targets, indices, num_points): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert 'pred_logits' in outputs + src_logits = outputs['pred_logits'] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full(src_logits.shape[:2], 0, + dtype=torch.int64, device=src_logits.device) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) + losses = {'loss_ce': loss_ce} + + return losses + + def loss_points(self, outputs, targets, indices, num_points): + + assert 'pred_points' in outputs + idx = self._get_src_permutation_idx(indices) + src_points = outputs['pred_points'][idx] + target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = F.mse_loss(src_points, target_points, reduction='none') + + losses = {} + losses['loss_point'] = loss_bbox.sum() / num_points + + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs): + loss_map = { + 'labels': self.loss_labels, + 'points': self.loss_points, + } + assert loss in loss_map, f'do you really want to compute {loss} loss?' + return loss_map[loss](outputs, targets, indices, num_points, **kwargs) + + def forward(self, outputs, targets): + """ This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']} + + indices1 = self.matcher(output1, targets) + + num_points = sum(len(t["labels"]) for t in targets) + num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_points) + num_boxes = torch.clamp(num_points / get_world_size(), min=1).item() + + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes)) + + return losses + + +# create the P2PNet model +def build(args, training=False): + # treats persons as a single class + num_classes = 1 + + backbone = build_backbone(args) + model = P2PNet(backbone, args['row'], args['line']) + if not training: + return model + + weight_dict = {'loss_ce': 1, 'loss_points': args['point_loss_coef']} + losses = ['labels', 'points'] + matcher = build_matcher_crowd(args) + criterion = SetCriterion_Crowd(num_classes, \ + matcher=matcher, weight_dict=weight_dict, \ + eos_coef=args['eos_coef'], losses=losses) + + return model, criterion \ No newline at end of file diff --git a/p2pnetUtils/vgg_.py b/p2pnetUtils/vgg_.py new file mode 100644 index 0000000..c17aab1 --- /dev/null +++ b/p2pnetUtils/vgg_.py @@ -0,0 +1,193 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Mostly copy-paste from torchvision references. +""" +import torch +import torch.nn as nn + + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + +model_paths = { + 'vgg16_bn': '../weights/pth/AIlib2/DenseCrowd/vgg16_bn-6c64b313.pth', +} + + +class VGG(nn.Module): + + def __init__(self, features, num_classes=1000, init_weights=True): + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + if init_weights: + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg, batch_norm=False, sync=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + if sync: + print('use sync backbone') + layers += [conv2d, nn.SyncBatchNorm(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfgs = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def _vgg(arch, cfg, batch_norm, pretrained, progress, sync=False, **kwargs): + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm, sync=sync), **kwargs) + if pretrained: + state_dict = torch.load(model_paths[arch]) + model.load_state_dict(state_dict) + return model + + +def vgg11(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) + + +def vgg11_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) + + +def vgg13(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) + + +def vgg13_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) + + +def vgg16(pretrained=False, progress=True, **kwargs): + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) + + +def vgg16_bn(pretrained=False, progress=True, sync=False, **kwargs): + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, sync=sync, **kwargs) + + +def vgg19(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) + + +def vgg19_bn(pretrained=False, progress=True, **kwargs): + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs) diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..95a237b --- /dev/null +++ b/readme.md @@ -0,0 +1,129 @@ +2.0--变化如下: + 每个模型都要采用TRT模型 +2023.1.17 + 1.增加检测模型(countryRoad)--乡间道路检测---类别包括:违法种植 + 2.增加钓鱼游泳模型(AnglerSwimmer)--类别包括:钓鱼、游泳 + 3.更新车辆检测模型--增加网络公开卡口数据 + 4.更新高速路里的公路检测分割模型--增加乡间小路的数据 +2023.1.29 + 1.增加模型对4080的TRT兼容 +2023.1.31 + 1.增加forest2业务,单独模型,类别包括:"林斑","病死树","人","火焰","烟雾" +2023.1.20 + 1.高速公路模型,增加车辆、行人检测。命名highWay2,只检测路上的目标,非路上的目标排除 +2023.3.07 + 1.把所有模型都增A10显卡版本一份 +2023.4.02 + 1.增加高速事故的检测 + +2023.4.11 + 1.增加OCR en模型 + 2.修改把事故检测V2添加到highWay2 + +2023.4.22-23 + 1.更新事故检测模型,修复BUG。事故不能单独检测,一定要和车辆一起检测。 + 2.增加倾斜船只检测模型,ship2。 + 3.开放分割结果的轮廓画框接口。用户决定是否画道路、水轮廓。line169:'segLineShow':True,来控制。 + +2023.5.5 + 1.增加“chanelEmerency”河道落水人员检测 + 2.增加OCR2模型,换了新的CRNN识别模型。 +2023.6.9 + 1.更新张建川新的河道分割模型,放在业务“river2” +2023.7.3 + 1.添加城管项目,纯检测“车辆”、“垃圾”,业务名称"cityMangement" + +2023.7.10 + 1.demo.py中添加drowing,落水人员检测。 + 2.demo.py中添加noParking 1.0版本,不成熟,后面还要更新。 + 3.demo3.0.py中对'river', 'highWay2','noParking','drowning','forest2','vehicle','pedestrian','smogfire' , 'AnglerSwimmer','channelEmergency', 'countryRoad','cityMangement',增加跟踪功能。 + +2023.7.17 + 1.demo.py,demo3.0.py 的cityMangement模型增加“流动商贩”类别,修改后的模型检测类别是:“车辆”、“垃圾”、“流动商贩” + +2023.7.28 + 1.demo.py 增加了illParking,城市违停车辆检测,检测模型输入三类(车、T角点,L角点),后处理变成“违停”.综合起来,在检测模型中有四类输出(车、T角点,L角点,违停 ) + 但是最终输出只有“违停”,类别好"3",其它车辆等不会输出。 +2023.08.07 + 1.调整输出DSP的数据格式,demo.py和demo3.0.py都变。 + 矩形框(cls,x0,y0,x1,y1,score)-->(x0,y0,x1,y1,score,cls),倾斜框变为 [ (x0,y0),(x1,y1),(x2,y2),(x3,y3),score,cls] + 2.完成切斜目标的跟踪。 + +2023.08.09 + 1.更新highWay2代码,修改输入参数,增加vehicleFlag,distanceFlag ,二者都为False时检测和分割混合的后处理最快。 + 实测2.png,检测和分割混合预处理时间如下: + vehicleFlag distanceFlag time(ms) + True True 2.55 + True False 2.09 + False True 1.56 + False False 1.09 + 2.更新了Noparking(高速路车辆检测,0-常规车辆,1-应急车道上的车),规范了代码编写。 +2023.08.14 + 1.修复highway2 vehicleFlag,distanceFlag 都为false时的bug +2023.08.20 + 1.增加cityMangement2 模型,包括了替代原来的的cityMangement和illParking,实现“车辆”、“垃圾”、“流动商贩”、“违停”,四个类别。 + 2.修改架构,segmodel由原来的demo中加载,改为在demo加载segmodel类,实现trt,pth统一加载,用segmodel.eval()实现统一预测。 + 该类别不仅适用于分类,还适用于dmpr模型,方便后续适用其它类别模型。 +2023.08.22 + 1.增加cityRoad模型,单一检测模型,"护栏","交通标志","非交通标志","锥桶","水马",其中"施工“第4,第5类别合并都叫"锥桶","水马",名称相同. + 2.对cityMangement2中yolo和dmpr融合处理debug。 +2023.08.23 + 1.优化highWay2.0中yolo和stdc融合处理,使得分割模型输入的(640,360)-->(1920,1080)时候,时间增长不明显约为4.0ms + 同时优化stdc的预处理部分改为torch处理,取代原来的numpy处理,现在两种尺寸的时间如下: + 分割预处理 推理 分割检测融合处理 + 640-360 2.0 1.9 1.29 + 1920-1080 6.5 2.6 0.86 +2023.08.29 + 1.更新drowning中的分割模型。 + 2.highWay2中添加了1920X1080的分割trt模型 +2023.09.07 + 1.更新了cityRoad的检测模型 +2023.09.15 + 1.demo3.0.py中增加了illparking的跟踪模型。这个模型不怎么用,只检测“违停”,和cityManget2里的功能重叠。 + 2.cityMangement2中违停dmpr模型更新,换成了backbone为yolov5s的模型,且检测yolov5模型权重也更新了。 +2023.09.21 + 1.增加分类别置信度过滤参数。分成两次过滤,第一采用原来的conf_threshold 对所有类别过滤。第二次采用score_byClass分类别过滤。 + score_byClass={‘0’:0.1,'0':0.2},是字典形式,“类别”:得分阈值 +2023.10.16 + 1.更新cityRoad的模型权重 +2023.10.20 + 1.增加crowdCounting模型,统计图片中的人群数量,返回的是人群的坐标。 + 2.增进trtUtils2 ,里面包括了动态onnx,trt模型推理使用方法。 +2023.11.06 + 1.增加cityMangement3,和cityMangement2一样的检测目标。但用了三个模型。 + 2.所有的demo3.0.py采用模型列表的方式输入参数 + 3.从cityMangement3 没有更新A100的trt文件和权重,因为云端A100服务器不存在了 +2023.11.17 + 1.修改stdc网络,增加了pth模式下,动态输入模型。Trt等其他方式不支持动态。 + 2.增加crackMeasure模型,返回值ret,timeInfos,其中ret为:[[ x0,y0,x1,y1,score,class,裂缝长度,平均宽度,最大宽度,最小宽度],[...],[...]。 + 3.Debug 跟踪模型中一条跟踪链上,历史框上有不同的类别, 修正为同一类别 + +2023.11.27 + 1.更新cityMangement3中的dmpr,stdc模型权重 +2023.11.30 + 1.更新highWay2的yolov5权重,增加了类别“影子”,原来的事故调到最后一个。现在的顺序是:"行人","车辆","裂缝","裂缝","修补","裂缝","坑槽","裂缝","积水",“影子”,"事故" +2023.12.02 + 1.更新更新cityMangement3中的dmpr,yolov5模型权重 + 2.DMPR模型阈值改为0.1d(mpr_thresh) +2023.12.13 + 1.添加单独的“坑槽”业务,名称"pothole",只有一个检测模型, +2023.12.27 + 1.增加ocr2里面的中文单独的识别模型。 + 2.调整crackMeasure模型中增加两个参数。 + 3.增加channel2业务,采用检测模型识别"国旗","浮标","船名","船只",并对"船名"调用ocr2。 + 除返回格式和过去一样,"船名"对应目标的list末尾加上了ocr结果(原始是[x0,y0,x1,y1,score,cls]--->[x0,y0,x1,y1,score,cls,ocr) +2024.1.26 + 1.增加“riverT”业务,和“river2”所有参数都相同,出去yolov5.pt是定制的。 +2024.2.27 + 1.临时修改smogfire,改变了labelnames.json,及模型权重文件pt,trt文件。i +2024.03.14 + 1.channel2业务增加“未悬挂国旗船只类别”,序号为4,意为:检测到船只,但没有悬挂国旗。同时该船只不再被标为“船只”,其实现过程是通过channel2postUtils.py实现。 + 2.channel2在demo.py的样例里,增加了参数,注意对应修改。 + 3.channel2的检测权重也更新了。 +2024.06.04 + 1.修改了城管模型“cityMangement3”中的yolov5检测模型,增加了对摊贩的识别 +2024.06.25 + 1.修改了forest2模型的yolov5权重,增加了类别“人群”,现在的类别是 "林斑", "病死树", "行人", "火焰", "烟雾","人群" + 2.在yolov5模型中增加了“云朵”类别,减少”烟雾“的误识别,但”云朵“并未输出,在后处理的时候就已经过滤了。 + 3.增加了后处理函数,在“行人”的基础上,判断他们之间的距离,群定是否是人群。主要有两个参数: + 'crowdThreshold':判断是否是人群时人的数量,'distancePersonScale':人与人之间的距离/人的身高 diff --git a/segutils/GPUtils.py b/segutils/GPUtils.py new file mode 100644 index 0000000..72d8088 --- /dev/null +++ b/segutils/GPUtils.py @@ -0,0 +1,501 @@ +#@@ -1,43 +1,43 @@ +# GPUtil - GPU utilization +# +# A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi +# +# Author: Anders Krogh Mortensen (anderskm) +# Date: 16 January 2017 +# Web: https://github.com/anderskm/gputil +# +# LICENSE +# +# MIT License +# +# Copyright (c) 2017 anderskm +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from subprocess import Popen, PIPE +from distutils import spawn +import os +import math +import random +import time +import sys +import platform +import subprocess +import numpy as np + + +__version__ = '1.4.0' +class GPU: + def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu): + self.id = ID + self.uuid = uuid + self.load = load + self.memoryUtil = float(memoryUsed)/float(memoryTotal) + self.memoryTotal = memoryTotal + self.memoryUsed = memoryUsed + self.memoryFree = memoryFree + self.driver = driver + self.name = gpu_name + self.serial = serial + self.display_mode = display_mode + self.display_active = display_active + self.temperature = temp_gpu + + def __str__(self): + return str(self.__dict__) + + +class GPUProcess: + def __init__(self, pid, processName, gpuId, gpuUuid, gpuName, usedMemory, + uid, uname): + self.pid = pid + self.processName = processName + self.gpuId = gpuId + self.gpuUuid = gpuUuid + self.gpuName = gpuName + self.usedMemory = usedMemory + self.uid = uid + self.uname = uname + + def __str__(self): + return str(self.__dict__) + +def safeFloatCast(strNumber): + try: + number = float(strNumber) + except ValueError: + number = float('nan') + return number + +#def getGPUs(): +def getNvidiaSmiCmd(): + if platform.system() == "Windows": + # If the platform is Windows and nvidia-smi + # could not be found from the environment path, + #@@ -75,57 +94,97 @@ def getGPUs(): + nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive'] + else: + nvidia_smi = "nvidia-smi" + return nvidia_smi + + +def getGPUs(): + # Get ID, processing and memory utilization for all GPUs + nvidia_smi = getNvidiaSmiCmd() + try: + p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE) + stdout, stderror = p.communicate() + p = subprocess.run([ + nvidia_smi, + "--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout;#output = stdout.decode('UTF-8') + # output = output[2:-1] # Remove b' and ' from string added by python + #print(output) + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + #print(lines) + numDevices = len(lines)-1 + GPUs = [] + for g in range(numDevices): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + for i in range(12): + # print(vals[i]) + if (i == 0): + deviceIds = int(vals[i]) + elif (i == 1): + uuid = vals[i] + elif (i == 2): + gpuUtil = safeFloatCast(vals[i])/100 + elif (i == 3): + memTotal = safeFloatCast(vals[i]) + elif (i == 4): + memUsed = safeFloatCast(vals[i]) + elif (i == 5): + memFree = safeFloatCast(vals[i]) + elif (i == 6): + driver = vals[i] + elif (i == 7): + gpu_name = vals[i] + elif (i == 8): + serial = vals[i] + elif (i == 9): + display_active = vals[i] + elif (i == 10): + display_mode = vals[i] + elif (i == 11): + temp_gpu = safeFloatCast(vals[i]); + deviceIds = int(vals[0]) + uuid = vals[1] + gpuUtil = safeFloatCast(vals[2]) / 100 + memTotal = safeFloatCast(vals[3]) + memUsed = safeFloatCast(vals[4]) + memFree = safeFloatCast(vals[5]) + driver = vals[6] + gpu_name = vals[7] + serial = vals[8] + display_active = vals[9] + display_mode = vals[10] + temp_gpu = safeFloatCast(vals[11]); + GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu)) + return GPUs # (deviceIds, gpuUtil, memUtil) + + + +def getGPUProcesses(): + """Get all gpu compute processes.""" + + global gpuUuidToIdMap + gpuUuidToIdMap = {} + try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus + except: + pass + + + nvidia_smi = getNvidiaSmiCmd() + try: + p = subprocess.run([ + nvidia_smi, + "--query-compute-apps=pid,process_name,gpu_uuid,gpu_name,used_memory", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + numProcesses = len(lines) - 1 + processes = [] + for g in range(numProcesses): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + pid = int(vals[0]) + processName = vals[1] + gpuUuid = vals[2] + gpuName = vals[3] + usedMemory = safeFloatCast(vals[4]) + gpuId = gpuUuidToIdMap[gpuUuid] + if gpuId is None: + gpuId = -1 + + # get uid and uname owner of the pid + try: + p = subprocess.run(['ps', f'-p{pid}', '-oruid=,ruser='], + stdout=subprocess.PIPE, encoding='utf8') + uid, uname = p.stdout.split() + uid = int(uid) + except: + uid, uname = -1, '' + + processes.append(GPUProcess(pid, processName, gpuId, gpuUuid, + gpuName, usedMemory, uid, uname)) + return processes + + +def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # order = first | last | random | load | memory + # first --> select the GPU with the lowest ID (DEFAULT) + # last --> select the GPU with the highest ID + # random --> select a random available GPU + # load --> select the GPU with the lowest load + # memory --> select the GPU with the most memory available + # limit = 1 (DEFAULT), 2, ..., Inf + # Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned. + # Get device IDs, load and memory usage + GPUs = getGPUs() + # Determine, which GPUs are available + GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)] + # Discard unavailable GPUs + GPUs = [GPUs[g] for g in availAbleGPUindex] + # Sort available GPUs according to the order argument + if (order == 'first'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False) + elif (order == 'last'): + GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True) + elif (order == 'random'): + GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))] + elif (order == 'load'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False) + elif (order == 'memory'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False) + # Extract the number of desired GPUs, but limited to the total number of available GPUs + GPUs = GPUs[0:min(limit, len(GPUs))] + # Extract the device IDs from the GPUs and return them + deviceIds = [gpu.id for gpu in GPUs] + return deviceIds +#def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False): +# # Determine, which GPUs are available +# GPUavailability = np.zeros(len(GPUs)) +# for i in range(len(GPUs)): +# if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))): +# GPUavailability[i] = 1 +def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # Determine, which GPUs are available + GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs] + return GPUavailability +def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]): + #GPUs = getGPUs() + #firstAvailableGPU = np.NaN + #for i in range(len(GPUs)): + # if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory): + # firstAvailableGPU = GPUs[i].id + # break + #return firstAvailableGPU + for i in range(attempts): + if (verbose): + print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.') + # Get first available GPU + available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + # If an available GPU was found, break for loop. + if (available): + if (verbose): + print('GPU ' + str(available) + ' located!') + break + # If this is not the last attempt, sleep for 'interval' seconds + if (i != attempts-1): + time.sleep(interval) + # Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found + if (not(available)): + raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.') + # Return found GPU + return available +def showUtilization(all=False, attrList=None, useOldCode=False): + GPUs = getGPUs() + if (all): + if (useOldCode): + print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |') + print('------------------------------------------------------------------------------------------------------------------------------') + for gpu in GPUs: + print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'name','name':'Name'}, + {'attr':'serial','name':'Serial'}, + {'attr':'uuid','name':'UUID'}], + [{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0}, + {'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}], + [{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0}, + {'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0}, + {'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}], + [{'attr':'display_mode','name':'Display mode'}, + {'attr':'display_active','name':'Display active'}]] + + else: + if (useOldCode): + print(' ID GPU MEM') + print('--------------') + for gpu in GPUs: + print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}], + ] + + if (not useOldCode): + if (attrList is not None): + headerString = '' + GPUstrings = ['']*len(GPUs) + for attrGroup in attrList: + #print(attrGroup) + for attrDict in attrGroup: + headerString = headerString + '| ' + attrDict['name'] + ' ' + headerWidth = len(attrDict['name']) + minWidth = len(attrDict['name']) + + attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else '' + attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else '' + attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x + for gpu in GPUs: + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:' + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:d}').format(attr) + elif (isinstance(attr,str)): + attrStr = attr; + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = attr.encode('ascii','ignore') + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + minWidth = max(minWidth,len(attrStr)) + + headerString += ' '*max(0,minWidth-headerWidth) + + minWidthStr = str(minWidth - len(attrSuffix)) + + for gpuIdx,gpu in enumerate(GPUs): + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:' + minWidthStr + 'd}').format(attr) + elif (isinstance(attr,str)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr); + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore')) + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + GPUstrings[gpuIdx] += '| ' + attrStr + ' ' + + headerString = headerString + '|' + for gpuIdx,gpu in enumerate(GPUs): + GPUstrings[gpuIdx] += '|' + + headerSpacingString = '-' * len(headerString) + print(headerString) + print(headerSpacingString) + for GPUstring in GPUstrings: + print(GPUstring) + + +# Generate gpu uuid to id map +gpuUuidToIdMap = {} +try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus +except: + pass +def getGPUInfos(): + ###返回gpus:list,一个GPU为一个元素-对象 + ###########:有属性,'id','load','memoryFree', + ###########:'memoryTotal','memoryUsed','memoryUtil','name','serial''temperature','uuid',process + ###其中process:每一个计算进程是一个元素--对象 + ############:有属性,'gpuId','gpuName','gpuUuid', + ############:'gpuid','pid','processName','uid', 'uname','usedMemory' + gpus = getGPUs() + gpuUuidToIdMap={} + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + gpu.process=[] + indexx = [x.id for x in gpus ] + + process = getGPUProcesses() + for pre in process: + pre.gpuid = gpuUuidToIdMap[pre.gpuUuid] + gpuId = indexx.index(pre.gpuid ) + gpus[gpuId].process.append(pre ) + return gpus + +def get_available_gpu(gpuStatus): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + cuda=None + for gpus in gpuStatus: + if len(gpus.process) == 0: + cuda = gpus.id + return cuda + return cuda +def get_whether_gpuProcess(): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + gpuStatus=getGPUInfos() + gpuProcess=True + for gpus in gpuStatus: + if len(gpus.process) != 0: + gpuProcess = False + return gpuProcess + +def get_offlineProcess_gpu(gpuStatus,pidInfos): + gpu_onLine = [] + for gpu in gpuStatus: + for gpuProcess in gpu.process: + pid = gpuProcess.pid + if pid in pidInfos.keys(): + pidType = pidInfos[pid]['type'] + if pidType == 'onLine': + gpu_onLine.append(gpu) + gpu_offLine = set(gpuStatus) - set(gpu_onLine) + return list(gpu_offLine) +def arrange_offlineProcess(gpuStatus,pidInfos,modelMemory=1500): + cudaArrange=[] + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + for gpu in gpu_offLine: + leftMemory = gpu.memoryTotal*0.9 - gpu.memoryUsed + modelCnt = int(leftMemory// modelMemory) + + cudaArrange.extend( [gpu.id] * modelCnt ) + return cudaArrange +def get_potential_gpu(gpuStatus,pidInfos): + ###所有GPU上都有计算。需要为“在线任务”空出一块显卡。 + ###step1:查看所有显卡上是否有“在线任务” + + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + if len(gpu_offLine) == 0 : + return False + + ###step2,找出每张显卡上离线进程的数目 + offLineCnt = [ len(gpu.process) for gpu in gpu_offLine ] + minCntIndex =offLineCnt.index( min(offLineCnt)) + + pids = [x.pid for x in gpu_offLine[minCntIndex].process] + return {'cuda':gpu_offLine[minCntIndex].id,'pids':pids } +if __name__=='__main__': + #pres = getGPUProcesses() + #print('###line404:',pres) + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + print(gpu) + print(gpuUuidToIdMap) + pres = getGPUProcesses() + print('###line404:',pres) + for pre in pres: + print('#'*20) + for ken in ['gpuName','gpuUuid','pid','processName','uid','uname','usedMemory' ]: + print(ken,' ',pre.__getattribute__(ken )) + print(' ') + + diff --git a/segutils/core/__init__.py b/segutils/core/__init__.py new file mode 100644 index 0000000..453f410 --- /dev/null +++ b/segutils/core/__init__.py @@ -0,0 +1 @@ +from . import nn, models, utils, data \ No newline at end of file diff --git a/segutils/core/data/__init__.py b/segutils/core/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/data/dataloader/__init__.py b/segutils/core/data/dataloader/__init__.py new file mode 100644 index 0000000..b22f962 --- /dev/null +++ b/segutils/core/data/dataloader/__init__.py @@ -0,0 +1,23 @@ +""" +This module provides data loaders and transformers for popular vision datasets. +""" +from .mscoco import COCOSegmentation +from .cityscapes import CitySegmentation +from .ade import ADE20KSegmentation +from .pascal_voc import VOCSegmentation +from .pascal_aug import VOCAugSegmentation +from .sbu_shadow import SBUSegmentation + +datasets = { + 'ade20k': ADE20KSegmentation, + 'pascal_voc': VOCSegmentation, + 'pascal_aug': VOCAugSegmentation, + 'coco': COCOSegmentation, + 'citys': CitySegmentation, + 'sbu': SBUSegmentation, +} + + +def get_segmentation_dataset(name, **kwargs): + """Segmentation Datasets""" + return datasets[name.lower()](**kwargs) diff --git a/segutils/core/data/dataloader/ade.py b/segutils/core/data/dataloader/ade.py new file mode 100644 index 0000000..522ecbd --- /dev/null +++ b/segutils/core/data/dataloader/ade.py @@ -0,0 +1,172 @@ +"""Pascal ADE20K Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class ADE20KSegmentation(SegmentationDataset): + """ADE20K Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/ade' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = ADE20KSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'ADEChallengeData2016' + NUM_CLASS = 150 + + def __init__(self, root='../datasets/ade', split='test', mode=None, transform=None, **kwargs): + super(ADE20KSegmentation, self).__init__(root, split, mode, transform, **kwargs) + root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(root), "Please setup the dataset using ../datasets/ade20k.py" + self.images, self.masks = _get_ade20k_pairs(root, split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + print('Found {} images in the folder {}'.format(len(self.images), root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and to Tensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32') - 1) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 1 + + @property + def classes(self): + """Category names.""" + return ("wall", "building, edifice", "sky", "floor, flooring", "tree", + "ceiling", "road, route", "bed", "windowpane, window", "grass", + "cabinet", "sidewalk, pavement", + "person, individual, someone, somebody, mortal, soul", + "earth, ground", "door, double door", "table", "mountain, mount", + "plant, flora, plant life", "curtain, drape, drapery, mantle, pall", + "chair", "car, auto, automobile, machine, motorcar", + "water", "painting, picture", "sofa, couch, lounge", "shelf", + "house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair", + "seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press", + "lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion", + "base, pedestal, stand", "box", "column, pillar", "signboard, sign", + "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", + "skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox", + "grandstand, covered stand", "path", "stairs, steps", "runway", + "case, display case, showcase, vitrine", + "pool table, billiard table, snooker table", "pillow", + "screen door, screen", "stairway, staircase", "river", "bridge, span", + "bookcase", "blind, screen", "coffee table, cocktail table", + "toilet, can, commode, crapper, pot, potty, stool, throne", + "flower", "book", "hill", "bench", "countertop", + "stove, kitchen stove, range, kitchen range, cooking stove", + "palm, palm tree", "kitchen island", + "computer, computing machine, computing device, data processor, " + "electronic computer, information processing system", + "swivel chair", "boat", "bar", "arcade machine", + "hovel, hut, hutch, shack, shanty", + "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, " + "motorcoach, omnibus, passenger vehicle", + "towel", "light, light source", "truck, motortruck", "tower", + "chandelier, pendant, pendent", "awning, sunshade, sunblind", + "streetlight, street lamp", "booth, cubicle, stall, kiosk", + "television receiver, television, television set, tv, tv set, idiot " + "box, boob tube, telly, goggle box", + "airplane, aeroplane, plane", "dirt track", + "apparel, wearing apparel, dress, clothes", + "pole", "land, ground, soil", + "bannister, banister, balustrade, balusters, handrail", + "escalator, moving staircase, moving stairway", + "ottoman, pouf, pouffe, puff, hassock", + "bottle", "buffet, counter, sideboard", + "poster, posting, placard, notice, bill, card", + "stage", "van", "ship", "fountain", + "conveyer belt, conveyor belt, conveyer, conveyor, transporter", + "canopy", "washer, automatic washer, washing machine", + "plaything, toy", "swimming pool, swimming bath, natatorium", + "stool", "barrel, cask", "basket, handbasket", "waterfall, falls", + "tent, collapsible shelter", "bag", "minibike, motorbike", "cradle", + "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", + "trade name, brand name, brand, marque", "microwave, microwave oven", + "pot, flowerpot", "animal, animate being, beast, brute, creature, fauna", + "bicycle, bike, wheel, cycle", "lake", + "dishwasher, dish washer, dishwashing machine", + "screen, silver screen, projection screen", + "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", + "traffic light, traffic signal, stoplight", "tray", + "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, " + "dustbin, trash barrel, trash bin", + "fan", "pier, wharf, wharfage, dock", "crt screen", + "plate", "monitor, monitoring device", "bulletin board, notice board", + "shower", "radiator", "glass, drinking glass", "clock", "flag") + + +def _get_ade20k_pairs(folder, mode='train'): + img_paths = [] + mask_paths = [] + if mode == 'train': + img_folder = os.path.join(folder, 'images/training') + mask_folder = os.path.join(folder, 'annotations/training') + else: + img_folder = os.path.join(folder, 'images/validation') + mask_folder = os.path.join(folder, 'annotations/validation') + for filename in os.listdir(img_folder): + basename, _ = os.path.splitext(filename) + if filename.endswith(".jpg"): + imgpath = os.path.join(img_folder, filename) + maskname = basename + '.png' + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask:', maskpath) + + return img_paths, mask_paths + + +if __name__ == '__main__': + train_dataset = ADE20KSegmentation() diff --git a/segutils/core/data/dataloader/cityscapes.py b/segutils/core/data/dataloader/cityscapes.py new file mode 100644 index 0000000..7d5de71 --- /dev/null +++ b/segutils/core/data/dataloader/cityscapes.py @@ -0,0 +1,137 @@ +"""Prepare Cityscapes dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class CitySegmentation(SegmentationDataset): + """Cityscapes Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to Cityscapes folder. Default is './datasets/citys' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = CitySegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'cityscapes' + NUM_CLASS = 19 + + def __init__(self, root='../datasets/citys', split='train', mode=None, transform=None, **kwargs): + super(CitySegmentation, self).__init__(root, split, mode, transform, **kwargs) + # self.root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(self.root), "Please setup the dataset using ../datasets/cityscapes.py" + self.images, self.mask_paths = _get_city_pairs(self.root, self.split) + assert (len(self.images) == len(self.mask_paths)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + self._key = np.array([-1, -1, -1, -1, -1, -1, + -1, -1, 0, 1, -1, -1, + 2, 3, 4, -1, -1, -1, + 5, -1, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, + -1, -1, 16, 17, 18]) + self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32') + + def _class_to_index(self, mask): + # assert the value + values = np.unique(mask) + for value in values: + assert (value in self._mapping) + index = np.digitize(mask.ravel(), self._mapping, right=True) + return self._key[index].reshape(mask.shape) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.mask_paths[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = self._class_to_index(np.array(mask).astype('int32')) + return torch.LongTensor(np.array(target).astype('int32')) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_city_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + for filename in files: + if filename.endswith('.png'): + imgpath = os.path.join(root, filename) + foldername = os.path.basename(os.path.dirname(imgpath)) + maskname = filename.replace('leftImg8bit', 'gtFine_labelIds') + maskpath = os.path.join(mask_folder, foldername, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split in ('train', 'val'): + img_folder = os.path.join(folder, 'leftImg8bit/' + split) + mask_folder = os.path.join(folder, 'gtFine/' + split) + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + else: + assert split == 'trainval' + print('trainval set') + train_img_folder = os.path.join(folder, 'leftImg8bit/train') + train_mask_folder = os.path.join(folder, 'gtFine/train') + val_img_folder = os.path.join(folder, 'leftImg8bit/val') + val_mask_folder = os.path.join(folder, 'gtFine/val') + train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder) + val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder) + img_paths = train_img_paths + val_img_paths + mask_paths = train_mask_paths + val_mask_paths + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = CitySegmentation() diff --git a/segutils/core/data/dataloader/lip_parsing.py b/segutils/core/data/dataloader/lip_parsing.py new file mode 100644 index 0000000..245beda --- /dev/null +++ b/segutils/core/data/dataloader/lip_parsing.py @@ -0,0 +1,90 @@ +"""Look into Person Dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from core.data.dataloader.segbase import SegmentationDataset + + +class LIPSegmentation(SegmentationDataset): + """Look into person parsing dataset """ + + BASE_DIR = 'LIP' + NUM_CLASS = 20 + + def __init__(self, root='../datasets/LIP', split='train', mode=None, transform=None, **kwargs): + super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _trainval_image_dir = os.path.join(root, 'TrainVal_images') + _testing_image_dir = os.path.join(root, 'Testing_images') + _trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations') + if split == 'train': + _image_dir = os.path.join(_trainval_image_dir, 'train_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'train_id.txt') + elif split == 'val': + _image_dir = os.path.join(_trainval_image_dir, 'val_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'val_id.txt') + elif split == 'test': + _image_dir = os.path.join(_testing_image_dir, 'testing_images') + _split_f = os.path.join(_testing_image_dir, 'test_id.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), 'r') as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + '.jpg') + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + '.png') + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} {} images in the folder {}'.format(len(self.images), split, root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category name.""" + return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', + 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', + 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', + 'rightShoe') + + +if __name__ == '__main__': + dataset = LIPSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/segutils/core/data/dataloader/mscoco.py b/segutils/core/data/dataloader/mscoco.py new file mode 100644 index 0000000..6e280c8 --- /dev/null +++ b/segutils/core/data/dataloader/mscoco.py @@ -0,0 +1,136 @@ +"""MSCOCO Semantic Segmentation pretraining for VOC.""" +import os +import pickle +import torch +import numpy as np + +from tqdm import trange +from PIL import Image +from .segbase import SegmentationDataset + + +class COCOSegmentation(SegmentationDataset): + """COCO Semantic Segmentation Dataset for VOC Pre-training. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/coco' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = COCOSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, + 1, 64, 20, 63, 7, 72] + NUM_CLASS = 21 + + def __init__(self, root='../datasets/coco', split='train', mode=None, transform=None, **kwargs): + super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # lazy import pycocotools + from pycocotools.coco import COCO + from pycocotools import mask + if split == 'train': + print('train set') + ann_file = os.path.join(root, 'annotations/instances_train2017.json') + ids_file = os.path.join(root, 'annotations/train_ids.mx') + self.root = os.path.join(root, 'train2017') + else: + print('val set') + ann_file = os.path.join(root, 'annotations/instances_val2017.json') + ids_file = os.path.join(root, 'annotations/val_ids.mx') + self.root = os.path.join(root, 'val2017') + self.coco = COCO(ann_file) + self.coco_mask = mask + if os.path.exists(ids_file): + with open(ids_file, 'rb') as f: + self.ids = pickle.load(f) + else: + ids = list(self.coco.imgs.keys()) + self.ids = self._preprocess(ids, ids_file) + self.transform = transform + + def __getitem__(self, index): + coco = self.coco + img_id = self.ids[index] + img_metadata = coco.loadImgs(img_id)[0] + path = img_metadata['file_name'] + img = Image.open(os.path.join(self.root, path)).convert('RGB') + cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id)) + mask = Image.fromarray(self._gen_seg_mask( + cocotarget, img_metadata['height'], img_metadata['width'])) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.ids[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _gen_seg_mask(self, target, h, w): + mask = np.zeros((h, w), dtype=np.uint8) + coco_mask = self.coco_mask + for instance in target: + rle = coco_mask.frPyObjects(instance['Segmentation'], h, w) + m = coco_mask.decode(rle) + cat = instance['category_id'] + if cat in self.CAT_LIST: + c = self.CAT_LIST.index(cat) + else: + continue + if len(m.shape) < 3: + mask[:, :] += (mask == 0) * (m * c) + else: + mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8) + return mask + + def _preprocess(self, ids, ids_file): + print("Preprocessing mask, this will take a while." + \ + "But don't worry, it only run once for each split.") + tbar = trange(len(ids)) + new_ids = [] + for i in tbar: + img_id = ids[i] + cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id)) + img_metadata = self.coco.loadImgs(img_id)[0] + mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width']) + # more than 1k pixels + if (mask > 0).sum() > 1000: + new_ids.append(img_id) + tbar.set_description('Doing: {}/{}, got {} qualified images'. \ + format(i, len(ids), len(new_ids))) + print('Found number of qualified images: ', len(new_ids)) + with open(ids_file, 'wb') as f: + pickle.dump(new_ids, f) + return new_ids + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') diff --git a/segutils/core/data/dataloader/pascal_aug.py b/segutils/core/data/dataloader/pascal_aug.py new file mode 100644 index 0000000..1cbe238 --- /dev/null +++ b/segutils/core/data/dataloader/pascal_aug.py @@ -0,0 +1,104 @@ +"""Pascal Augmented VOC Semantic Segmentation Dataset.""" +import os +import torch +import scipy.io as sio +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCAugSegmentation(SegmentationDataset): + """Pascal VOC Augmented Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/voc' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCAugSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOCaug/dataset/' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCAugSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # train/val/test splits are pre-cut + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'cls') + _image_dir = os.path.join(_voc_root, 'img') + if split == 'train': + _split_f = os.path.join(_voc_root, 'trainval.txt') + elif split == 'val': + _split_f = os.path.join(_voc_root, 'val.txt') + else: + raise RuntimeError('Unknown dataset split: {}'.format(split)) + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".mat") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + target = self._load_mat(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, target = self._sync_transform(img, target) + elif self.mode == 'val': + img, target = self._val_sync_transform(img, target) + else: + raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode)) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, target, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _load_mat(self, filename): + mat = sio.loadmat(filename, mat_dtype=True, squeeze_me=True, struct_as_record=False) + mask = mat['GTcls'].Segmentation + return Image.fromarray(mask) + + def __len__(self): + return len(self.images) + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCAugSegmentation() \ No newline at end of file diff --git a/segutils/core/data/dataloader/pascal_voc.py b/segutils/core/data/dataloader/pascal_voc.py new file mode 100644 index 0000000..94db82c --- /dev/null +++ b/segutils/core/data/dataloader/pascal_voc.py @@ -0,0 +1,112 @@ +"""Pascal VOC Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCSegmentation(SegmentationDataset): + """Pascal VOC Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/VOCdevkit' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOC2012' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'SegmentationClass') + _image_dir = os.path.join(_voc_root, 'JPEGImages') + # train/val/test splits are pre-cut + _splits_dir = os.path.join(_voc_root, 'ImageSets/Segmentation') + if split == 'train': + _split_f = os.path.join(_splits_dir, 'train.txt') + elif split == 'val': + _split_f = os.path.join(_splits_dir, 'val.txt') + elif split == 'test': + _split_f = os.path.join(_splits_dir, 'test.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".png") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target == 255] = -1 + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCSegmentation() \ No newline at end of file diff --git a/segutils/core/data/dataloader/sbu_shadow.py b/segutils/core/data/dataloader/sbu_shadow.py new file mode 100644 index 0000000..0cf4ca9 --- /dev/null +++ b/segutils/core/data/dataloader/sbu_shadow.py @@ -0,0 +1,88 @@ +"""SBU Shadow Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class SBUSegmentation(SegmentationDataset): + """SBU Shadow Segmentation Dataset + """ + NUM_CLASS = 2 + + def __init__(self, root='../datasets/sbu', split='train', mode=None, transform=None, **kwargs): + super(SBUSegmentation, self).__init__(root, split, mode, transform, **kwargs) + assert os.path.exists(self.root) + self.images, self.masks = _get_sbu_pairs(self.root, self.split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target > 0] = 1 + return torch.from_numpy(target).long() + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_sbu_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + print(root) + for filename in files: + if filename.endswith('.jpg'): + imgpath = os.path.join(root, filename) + maskname = filename.replace('.jpg', '.png') + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split == 'train': + img_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowImages') + mask_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + else: + assert split in ('val', 'test') + img_folder = os.path.join(folder, 'SBU-Test/ShadowImages') + mask_folder = os.path.join(folder, 'SBU-Test/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = SBUSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/segutils/core/data/dataloader/segbase.py b/segutils/core/data/dataloader/segbase.py new file mode 100644 index 0000000..823436d --- /dev/null +++ b/segutils/core/data/dataloader/segbase.py @@ -0,0 +1,93 @@ +"""Base segmentation dataset""" +import random +import numpy as np + +from PIL import Image, ImageOps, ImageFilter + +__all__ = ['SegmentationDataset'] + + +class SegmentationDataset(object): + """Segmentation Base Dataset""" + + def __init__(self, root, split, mode, transform, base_size=520, crop_size=480): + super(SegmentationDataset, self).__init__() + self.root = root + self.transform = transform + self.split = split + self.mode = mode if mode is not None else split + self.base_size = base_size + self.crop_size = crop_size + + def _val_sync_transform(self, img, mask): + outsize = self.crop_size + short_size = outsize + w, h = img.size + if w > h: + oh = short_size + ow = int(1.0 * w * oh / h) + else: + ow = short_size + oh = int(1.0 * h * ow / w) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # center crop + w, h = img.size + x1 = int(round((w - outsize) / 2.)) + y1 = int(round((h - outsize) / 2.)) + img = img.crop((x1, y1, x1 + outsize, y1 + outsize)) + mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _sync_transform(self, img, mask): + # random mirror + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + crop_size = self.crop_size + # random scale (short edge) + short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) + w, h = img.size + if h > w: + ow = short_size + oh = int(1.0 * h * ow / w) + else: + oh = short_size + ow = int(1.0 * w * oh / h) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # pad crop + if short_size < crop_size: + padh = crop_size - oh if oh < crop_size else 0 + padw = crop_size - ow if ow < crop_size else 0 + img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) + mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) + # random crop crop_size + w, h = img.size + x1 = random.randint(0, w - crop_size) + y1 = random.randint(0, h - crop_size) + img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + # gaussian blur as in PSP + if random.random() < 0.5: + img = img.filter(ImageFilter.GaussianBlur(radius=random.random())) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _img_transform(self, img): + return np.array(img) + + def _mask_transform(self, mask): + return np.array(mask).astype('int32') + + @property + def num_class(self): + """Number of categories.""" + return self.NUM_CLASS + + @property + def pred_offset(self): + return 0 diff --git a/segutils/core/data/dataloader/utils.py b/segutils/core/data/dataloader/utils.py new file mode 100644 index 0000000..c0bd1ad --- /dev/null +++ b/segutils/core/data/dataloader/utils.py @@ -0,0 +1,69 @@ +import os +import hashlib +import errno +import tarfile +from six.moves import urllib +from torch.utils.model_zoo import tqdm + +def gen_bar_updater(): + pbar = tqdm(total=None) + + def bar_update(count, block_size, total_size): + if pbar.total is None and total_size: + pbar.total = total_size + progress_bytes = count * block_size + pbar.update(progress_bytes - pbar.n) + + return bar_update + +def check_integrity(fpath, md5=None): + if md5 is None: + return True + if not os.path.isfile(fpath): + return False + md5o = hashlib.md5() + with open(fpath, 'rb') as f: + # read in 1MB chunks + for chunk in iter(lambda: f.read(1024 * 1024), b''): + md5o.update(chunk) + md5c = md5o.hexdigest() + if md5c != md5: + return False + return True + +def makedir_exist_ok(dirpath): + try: + os.makedirs(dirpath) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + pass + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root.""" + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + makedir_exist_ok(root) + + # downloads file + if os.path.isfile(fpath) and check_integrity(fpath, md5): + print('Using downloaded and verified file: ' + fpath) + else: + try: + print('Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + except OSError: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + ' Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + +def download_extract(url, root, filename, md5): + download_url(url, root, filename, md5) + with tarfile.open(os.path.join(root, filename), "r") as tar: + tar.extractall(path=root) \ No newline at end of file diff --git a/segutils/core/data/downloader/__init__.py b/segutils/core/data/downloader/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/data/downloader/ade20k.py b/segutils/core/data/downloader/ade20k.py new file mode 100644 index 0000000..8187c48 --- /dev/null +++ b/segutils/core/data/downloader/ade20k.py @@ -0,0 +1,51 @@ +"""Prepare ADE20K dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/ade') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python setup_ade20k.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_ade(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', + '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'), + ( + 'http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', + 'e05747892219d10e9243933371a497e905a4860c'), ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + download_ade(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/data/downloader/cityscapes.py b/segutils/core/data/downloader/cityscapes.py new file mode 100644 index 0000000..3b65b88 --- /dev/null +++ b/segutils/core/data/downloader/cityscapes.py @@ -0,0 +1,54 @@ +"""Prepare Cityscapes dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, check_sha1 + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/citys') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python prepare_cityscapes.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_city(path, overwrite=False): + _CITY_DOWNLOAD_URLS = [ + ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), + ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for filename, checksum in _CITY_DOWNLOAD_URLS: + if not check_sha1(filename, checksum): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(filename)) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_city(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/data/downloader/mscoco.py b/segutils/core/data/downloader/mscoco.py new file mode 100644 index 0000000..6d509b6 --- /dev/null +++ b/segutils/core/data/downloader/mscoco.py @@ -0,0 +1,69 @@ +"""Prepare MS COCO datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, try_import_pycocotools + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/coco') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize MS COCO dataset.', + epilog='Example: python mscoco.py --download-dir ~/mscoco', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/mscoco/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +def download_coco(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://images.cocodataset.org/zips/train2017.zip', + '10ad623668ab00c62c096f0ed636d6aff41faca5'), + ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', + '8551ee4bb5860311e79dace7e79cb91e432e78b3'), + ('http://images.cocodataset.org/zips/val2017.zip', + '4950dc9d00dbe1c933ee0170f5797584351d2a41'), + # ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', + # '46cdcf715b6b4f67e980b529534e79c2edffe084'), + # test2017.zip, for those who want to attend the competition. + # ('http://images.cocodataset.org/zips/test2017.zip', + # '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'), + ] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename) as zf: + zf.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isdir(path) or not os.path.isdir(os.path.join(path, 'train2017')) \ + or not os.path.isdir(os.path.join(path, 'val2017')) \ + or not os.path.isdir(os.path.join(path, 'annotations')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_coco(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) + try_import_pycocotools() diff --git a/segutils/core/data/downloader/pascal_voc.py b/segutils/core/data/downloader/pascal_voc.py new file mode 100644 index 0000000..849c95b --- /dev/null +++ b/segutils/core/data/downloader/pascal_voc.py @@ -0,0 +1,100 @@ +"""Prepare PASCAL VOC datasets""" +import os +import sys +import shutil +import argparse +import tarfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/voc') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize PASCAL VOC dataset.', + epilog='Example: python pascal_voc.py --download-dir ~/VOCdevkit', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/VOCdevkit/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract VOC datasets into ``path`` + +def download_voc(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', + '34ed68851bce2a36e2a223fa52c661d592c66b3c'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', + '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', + '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + + +##################################################################################### +# Download and extract the VOC augmented segmentation dataset into ``path`` + +def download_aug(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', + '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] + makedirs(path) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + shutil.move(os.path.join(path, 'benchmark_RELEASE'), + os.path.join(path, 'VOCaug')) + filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] + # generate trainval.txt + with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: + for fname in filenames: + fname = os.path.join(path, fname) + with open(fname) as infile: + for line in infile: + outfile.write(line) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isfile(path) or not os.path.isdir(os.path.join(path, 'VOC2007')) \ + or not os.path.isdir(os.path.join(path, 'VOC2012')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_voc(path, overwrite=args.overwrite) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2007'), os.path.join(path, 'VOC2007')) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2012'), os.path.join(path, 'VOC2012')) + shutil.rmtree(os.path.join(path, 'VOCdevkit')) + + if not os.path.isdir(os.path.join(path, 'VOCaug')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_aug(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) diff --git a/segutils/core/data/downloader/sbu_shadow.py b/segutils/core/data/downloader/sbu_shadow.py new file mode 100644 index 0000000..cdcbdde --- /dev/null +++ b/segutils/core/data/downloader/sbu_shadow.py @@ -0,0 +1,56 @@ +"""Prepare SBU Shadow datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/sbu') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize SBU Shadow dataset.', + epilog='Example: python sbu_shadow.py --download-dir ~/SBU-shadow', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract SBU shadow datasets into ``path`` + +def download_sbu(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://www3.cs.stonybrook.edu/~cvl/content/datasets/shadow_db/SBU-shadow.zip'), + ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite) + # extract + with zipfile.ZipFile(filename, "r") as zf: + zf.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_sbu(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/lib/psa/functional.py b/segutils/core/lib/psa/functional.py new file mode 100644 index 0000000..8e66088 --- /dev/null +++ b/segutils/core/lib/psa/functional.py @@ -0,0 +1,5 @@ +from . import functions + + +def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): + return functions.psa_mask(input, psa_type, mask_H_, mask_W_) diff --git a/segutils/core/lib/psa/functions/__init__.py b/segutils/core/lib/psa/functions/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/segutils/core/lib/psa/functions/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/segutils/core/lib/psa/functions/psamask.py b/segutils/core/lib/psa/functions/psamask.py new file mode 100644 index 0000000..26f34a2 --- /dev/null +++ b/segutils/core/lib/psa/functions/psamask.py @@ -0,0 +1,39 @@ +import torch +from torch.autograd import Function +from .. import src + + +class PSAMask(Function): + @staticmethod + def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + num_, channels_, feature_H_, feature_W_ = input.size() + if mask_H_ is None and mask_W_ is None: + mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 + assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) + assert channels_ == mask_H_ * mask_W_ + half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 + output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) + if not input.is_cuda: + src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + output = output.cuda() + src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ + ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ + return output + + @staticmethod + def backward(ctx, grad_output): + psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ + mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ + grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) + if not grad_output.is_cuda: + src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + return grad_input, None, None, None + + +psa_mask = PSAMask.apply diff --git a/segutils/core/lib/psa/modules/__init__.py b/segutils/core/lib/psa/modules/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/segutils/core/lib/psa/modules/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/segutils/core/lib/psa/modules/psamask.py b/segutils/core/lib/psa/modules/psamask.py new file mode 100644 index 0000000..58ea4d9 --- /dev/null +++ b/segutils/core/lib/psa/modules/psamask.py @@ -0,0 +1,15 @@ +from torch import nn +from .. import functional as F + + +class PSAMask(nn.Module): + def __init__(self, psa_type=0, mask_H_=None, mask_W_=None): + super(PSAMask, self).__init__() + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ in None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + self.psa_type = psa_type + self.mask_H_ = mask_H_ + self.mask_W_ = mask_W_ + + def forward(self, input): + return F.psa_mask(input, self.psa_type, self.mask_H_, self.mask_W_) diff --git a/segutils/core/lib/psa/src/__init__.py b/segutils/core/lib/psa/src/__init__.py new file mode 100644 index 0000000..ead1cfe --- /dev/null +++ b/segutils/core/lib/psa/src/__init__.py @@ -0,0 +1,18 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') +print(cpu_path,gpu_path) +cpu = load('psamask_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'psamask.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('psamask_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'psamask_cuda.cu'), + ], build_directory=gpu_path, verbose=False) \ No newline at end of file diff --git a/segutils/core/lib/psa/src/cpu/operator.cpp b/segutils/core/lib/psa/src/cpu/operator.cpp new file mode 100644 index 0000000..e7b9f6c --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cpu, "PSAMASK forward (CPU)"); + m.def("psamask_backward", &psamask_backward_cpu, "PSAMASK backward (CPU)"); +} diff --git a/segutils/core/lib/psa/src/cpu/operator.h b/segutils/core/lib/psa/src/cpu/operator.h new file mode 100644 index 0000000..abc43cb --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); \ No newline at end of file diff --git a/segutils/core/lib/psa/src/cpu/psamask.cpp b/segutils/core/lib/psa/src/cpu/psamask.cpp new file mode 100644 index 0000000..eb33694 --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/psamask.cpp @@ -0,0 +1,133 @@ +#include + +#ifndef min +#define min(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef max +#define max(a,b) (((a) > (b)) ? (a) : (b)) +#endif + +void psamask_collect_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_collect_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } + } + } +} + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/segutils/core/lib/psa/src/gpu/operator.cpp b/segutils/core/lib/psa/src/gpu/operator.cpp new file mode 100644 index 0000000..5a52f4a --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cuda, "PSAMASK forward (GPU)"); + m.def("psamask_backward", &psamask_backward_cuda, "PSAMASK backward (GPU)"); +} diff --git a/segutils/core/lib/psa/src/gpu/operator.h b/segutils/core/lib/psa/src/gpu/operator.h new file mode 100644 index 0000000..235a9e1 --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); diff --git a/segutils/core/lib/psa/src/gpu/psamask_cuda.cu b/segutils/core/lib/psa/src/gpu/psamask_cuda.cu new file mode 100644 index 0000000..f3fcb93 --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/psamask_cuda.cu @@ -0,0 +1,128 @@ +#include + +// CUDA: grid stride looping +#ifndef CUDA_KERNEL_LOOP +#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) +#endif + +__global__ void psamask_collect_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_collect_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } +} + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/segutils/core/models/__init__.py b/segutils/core/models/__init__.py new file mode 100644 index 0000000..2a8b222 --- /dev/null +++ b/segutils/core/models/__init__.py @@ -0,0 +1,2 @@ +"""Model Zoo""" +from .model_zoo import get_model, get_model_list \ No newline at end of file diff --git a/segutils/core/models/base_models.zip b/segutils/core/models/base_models.zip new file mode 100644 index 0000000..b7fb6b1 Binary files /dev/null and b/segutils/core/models/base_models.zip differ diff --git a/segutils/core/models/base_models/__init__.py b/segutils/core/models/base_models/__init__.py new file mode 100644 index 0000000..562aa28 --- /dev/null +++ b/segutils/core/models/base_models/__init__.py @@ -0,0 +1,6 @@ +from .densenet import * +from .resnet import * +from .resnetv1b import * +from .vgg import * +from .eespnet import * +from .xception import * diff --git a/segutils/core/models/base_models/densenet.py b/segutils/core/models/base_models/densenet.py new file mode 100644 index 0000000..733f21d --- /dev/null +++ b/segutils/core/models/base_models/densenet.py @@ -0,0 +1,237 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +from collections import OrderedDict + +__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', + 'dilated_densenet121', 'dilated_densenet161', 'dilated_densenet169', 'dilated_densenet201'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, 1, 1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, 3, 1, dilation, dilation, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, + growth_rate, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, + growth_rate, bn_size, drop_rate, dilation, norm_layer) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d): + super(_Transition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('relu', nn.ReLU(True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 1, 1, bias=False)) + self.add_module('pool', nn.AvgPool2d(2, 2)) + + +# Net +class DenseNet(nn.Module): #这是一个全新的构建模型的方法,<先构造模块后两步传递数据:features和classifier>;另一种常见的是,<边构造边传递数据> + + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, norm_layer=nn.BatchNorm2d, **kwargs): + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, 7, 2, 3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('relu0', nn.ReLU(True)), + ('pool0', nn.MaxPool2d(3, 2, 1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers, num_features, bn_size, growth_rate, drop_rate, norm_layer=norm_layer) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_features, num_features // 2, norm_layer=norm_layer) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + self.num_features = num_features + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + print('11',features.shape) + out = F.relu(features, True) + out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1) + out = self.classifier(out) + return out + + +class DilatedDenseNet(DenseNet): + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, dilate_scale=8, norm_layer=nn.BatchNorm2d, **kwargs): + super(DilatedDenseNet, self).__init__(growth_rate, block_config, num_init_features, + bn_size, drop_rate, num_classes, norm_layer) + assert (dilate_scale == 8 or dilate_scale == 16), "dilate_scale can only set as 8 or 16" + from functools import partial + if dilate_scale == 8: # output_stride + self.features.denseblock3.apply(partial(self._conv_dilate, dilate=2))#利用partial函数给 + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=4)) + del self.features.transition2.pool + del self.features.transition3.pool + elif dilate_scale == 16: + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=2)) + del self.features.transition3.pool + + def _conv_dilate(self, m, dilate): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + if m.kernel_size == (3, 3): + m.padding = (dilate, dilate) + m.dilation = (dilate, dilate) + + +# Specification +densenet_spec = {121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32])} + + +# Constructor +def get_densenet(num_layers, pretrained=False, **kwargs): + r"""Densenet-BC model from the + `"Densely Connected Convolutional Networks" `_ paper. + + Parameters + ---------- + num_layers : int + Number of layers for the variant of densenet. Options are 121, 161, 169, 201. + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default $TORCH_HOME/models + Location for keeping the model parameters. + """ + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) #初始化(加载权重) + return model + + +def get_dilated_densenet(num_layers, dilate_scale, pretrained=False, **kwargs): + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DilatedDenseNet(growth_rate, block_config, num_init_features, dilate_scale=dilate_scale) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + return model + + +def densenet121(**kwargs): + return get_densenet(121, **kwargs) + + +def densenet161(**kwargs): + return get_densenet(161, **kwargs) + + +def densenet169(**kwargs): + return get_densenet(169, **kwargs) + + +def densenet201(**kwargs): + return get_densenet(201, **kwargs) + + +def dilated_densenet121(dilate_scale, **kwargs): + return get_dilated_densenet(121, dilate_scale, **kwargs) + + +def dilated_densenet161(dilate_scale, **kwargs): + return get_dilated_densenet(161, dilate_scale, **kwargs) + + +def dilated_densenet169(dilate_scale, **kwargs): + return get_dilated_densenet(169, dilate_scale, **kwargs) + + +def dilated_densenet201(dilate_scale, **kwargs): + return get_dilated_densenet(201, dilate_scale, **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 512, 512).cuda() + model = dilated_densenet121(8).cuda() + outputs = model(img) + print(outputs.shape) + from torchsummary import summary + + summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + for name, parameters in model.named_parameters(): + print(name, ':', parameters.size()) diff --git a/segutils/core/models/base_models/eespnet.py b/segutils/core/models/base_models/eespnet.py new file mode 100644 index 0000000..7d087fd --- /dev/null +++ b/segutils/core/models/base_models/eespnet.py @@ -0,0 +1,202 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _ConvBN, _BNPReLU + +__all__ = ['EESP', 'EESPNet', 'eespnet'] + + +class EESP(nn.Module): + + def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d): + super(EESP, self).__init__() + self.stride = stride + n = int(out_channels / k) + n1 = out_channels - (k - 1) * n + assert down_method in ['avg', 'esp'], 'One of these is suppported (avg or esp)' + assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(n, n1) + self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer) + + map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8} + self.k_sizes = list() + for i in range(k): + ksize = int(3 + 2 * i) + ksize = ksize if ksize <= r_lim else 3 + self.k_sizes.append(ksize) + self.k_sizes.sort() + self.spp_dw = nn.ModuleList() + for i in range(k): + dilation = map_receptive_ksize[self.k_sizes[i]] + self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False)) + self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer) + self.br_after_cat = _BNPReLU(out_channels, norm_layer) + self.module_act = nn.PReLU(out_channels) + self.downAvg = True if down_method == 'avg' else False + + def forward(self, x): + output1 = self.proj_1x1(x) + output = [self.spp_dw[0](output1)] + for k in range(1, len(self.spp_dw)): + out_k = self.spp_dw[k](output1) + out_k = out_k + output[k - 1] + output.append(out_k) + expanded = self.conv_1x1_exp(self.br_after_cat(torch.cat(output, 1))) + del output + if self.stride == 2 and self.downAvg: + return expanded + + if expanded.size() == x.size(): + expanded = expanded + x + + return self.module_act(expanded) + + +class DownSampler(nn.Module): + + def __init__(self, in_channels, out_channels, k=4, r_lim=9, reinf=True, inp_reinf=3, norm_layer=None): + super(DownSampler, self).__init__() + channels_diff = out_channels - in_channels + self.eesp = EESP(in_channels, channels_diff, stride=2, k=k, + r_lim=r_lim, down_method='avg', norm_layer=norm_layer) + self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=2) + if reinf: + self.inp_reinf = nn.Sequential( + _ConvBNPReLU(inp_reinf, inp_reinf, 3, 1, 1), + _ConvBN(inp_reinf, out_channels, 1, 1)) + self.act = nn.PReLU(out_channels) + + def forward(self, x, x2=None): + avg_out = self.avg(x) + eesp_out = self.eesp(x) + output = torch.cat([avg_out, eesp_out], 1) + if x2 is not None: + w1 = avg_out.size(2) + while True: + x2 = F.avg_pool2d(x2, kernel_size=3, padding=1, stride=2) + w2 = x2.size(2) + if w2 == w1: + break + output = output + self.inp_reinf(x2) + + return self.act(output) + + +class EESPNet(nn.Module): + def __init__(self, num_classes=1000, scale=1, reinf=True, norm_layer=nn.BatchNorm2d): + super(EESPNet, self).__init__() + inp_reinf = 3 if reinf else None + reps = [0, 3, 7, 3] + r_lim = [13, 11, 9, 7, 5] + K = [4] * len(r_lim) + + # set out_channels + base, levels, base_s = 32, 5, 0 + out_channels = [base] * levels + for i in range(levels): + if i == 0: + base_s = int(base * scale) + base_s = math.ceil(base_s / K[0]) * K[0] + out_channels[i] = base if base_s > base else base_s + else: + out_channels[i] = base_s * pow(2, i) + if scale <= 1.5: + out_channels.append(1024) + elif scale in [1.5, 2]: + out_channels.append(1280) + else: + raise ValueError("Unknown scale value.") + + self.level1 = _ConvBNPReLU(3, out_channels[0], 3, 2, 1, norm_layer=norm_layer) + + self.level2_0 = DownSampler(out_channels[0], out_channels[1], k=K[0], r_lim=r_lim[0], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + + self.level3_0 = DownSampler(out_channels[1], out_channels[2], k=K[1], r_lim=r_lim[1], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level3 = nn.ModuleList() + for i in range(reps[1]): + self.level3.append(EESP(out_channels[2], out_channels[2], k=K[2], r_lim=r_lim[2], + norm_layer=norm_layer)) + + self.level4_0 = DownSampler(out_channels[2], out_channels[3], k=K[2], r_lim=r_lim[2], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level4 = nn.ModuleList() + for i in range(reps[2]): + self.level4.append(EESP(out_channels[3], out_channels[3], k=K[3], r_lim=r_lim[3], + norm_layer=norm_layer)) + + self.level5_0 = DownSampler(out_channels[3], out_channels[4], k=K[3], r_lim=r_lim[3], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level5 = nn.ModuleList() + for i in range(reps[2]): + self.level5.append(EESP(out_channels[4], out_channels[4], k=K[4], r_lim=r_lim[4], + norm_layer=norm_layer)) + + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[4], 3, 1, 1, + groups=out_channels[4], norm_layer=norm_layer)) + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[5], 1, 1, 0, + groups=K[4], norm_layer=norm_layer)) + + self.fc = nn.Linear(out_channels[5], num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, std=0.001) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, seg=True): + out_l1 = self.level1(x) + + out_l2 = self.level2_0(out_l1, x) + + out_l3_0 = self.level3_0(out_l2, x) + for i, layer in enumerate(self.level3): + if i == 0: + out_l3 = layer(out_l3_0) + else: + out_l3 = layer(out_l3) + + out_l4_0 = self.level4_0(out_l3, x) + for i, layer in enumerate(self.level4): + if i == 0: + out_l4 = layer(out_l4_0) + else: + out_l4 = layer(out_l4) + + if not seg: + out_l5_0 = self.level5_0(out_l4) # down-sampled + for i, layer in enumerate(self.level5): + if i == 0: + out_l5 = layer(out_l5_0) + else: + out_l5 = layer(out_l5) + + output_g = F.adaptive_avg_pool2d(out_l5, output_size=1) + output_g = F.dropout(output_g, p=0.2, training=self.training) + output_1x1 = output_g.view(output_g.size(0), -1) + + return self.fc(output_1x1) + return out_l1, out_l2, out_l3, out_l4 + + +def eespnet(pretrained=False, **kwargs): + model = EESPNet(**kwargs) + if pretrained: + raise ValueError("Don't support pretrained") + return model + + +if __name__ == '__main__': + img = torch.randn(1, 3, 224, 224) + model = eespnet() + out = model(img) diff --git a/segutils/core/models/base_models/hrnet.py b/segutils/core/models/base_models/hrnet.py new file mode 100644 index 0000000..775b809 --- /dev/null +++ b/segutils/core/models/base_models/hrnet.py @@ -0,0 +1,371 @@ +import torch +import torch.nn as nn + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, padding=1, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, 1, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, + fuse_method, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + super(HighResolutionModule, self).__init__() + assert num_branches == len(num_blocks) + assert num_branches == len(num_channels) + assert num_branches == len(num_inchannels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels, norm_layer=norm_layer) + self.fuse_layers = self._make_fuse_layers(norm_layer) + self.relu = nn.ReLU(True) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + 1, stride, bias=False), + norm_layer(num_channels[branch_index] * block.expansion)) + + layers = list() + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], + stride, downsample, norm_layer=norm_layer)) + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels, norm_layer=nn.BatchNorm2d): + branches = list() + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels, norm_layer=norm_layer)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self, norm_layer=nn.BatchNorm2d): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = list() + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, bias=False), + norm_layer(num_inchannels[i]), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = list() + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = list() + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +class HighResolutionNet(nn.Module): + def __init__(self, blocks, num_channels, num_modules, num_branches, num_blocks, + fuse_method, norm_layer=nn.BatchNorm2d, **kwargs): + super(HighResolutionNet, self).__init__() + self.num_branches = num_branches + + # deep stem + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True)) + + self.layer1 = self._make_layer(Bottleneck, 64, 64, 4, norm_layer=norm_layer) + + # stage 2 + num_channel, block = num_channels[0], blocks[0] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer([256], channels, norm_layer) + self.stage2, pre_stage_channels = self._make_stage(num_modules[0], num_branches[0], + num_blocks[0], channels, block, + fuse_method[0], channels, + norm_layer=norm_layer) + + # stage 3 + num_channel, block = num_channels[1], blocks[1] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage3, pre_stage_channels = self._make_stage(num_modules[1], num_branches[1], + num_blocks[1], channels, block, + fuse_method[1], channels, + norm_layer=norm_layer) + + # stage 4 + num_channel, block = num_channels[2], blocks[2] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage4, pre_stage_channels = self._make_stage(num_modules[2], num_branches[2], + num_blocks[2], channels, block, + fuse_method[2], channels, + norm_layer=norm_layer) + + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels, norm_layer) + + self.classifier = nn.Linear(2048, 1000) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion)) + + layers = list() + layers.append(block(inplanes, planes, stride, downsample=downsample, norm_layer=norm_layer)) + inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(inplanes, planes, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer, norm_layer=nn.BatchNorm2d): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = list() + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, padding=1, bias=False), + norm_layer(num_channels_cur_layer[i]), + nn.ReLU(True))) + else: + transition_layers.append(None) + else: + conv3x3s = list() + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] if j == i - num_branches_pre else in_channels + conv3x3s.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, num_modules, num_branches, num_blocks, num_channels, block, + fuse_method, num_inchannels, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + modules = list() + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, + fuse_method, reset_multi_scale_output, norm_layer=norm_layer)) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def _make_head(self, pre_stage_channels, norm_layer=nn.BatchNorm2d): + head_block = Bottleneck + head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = list() + for i, channels in enumerate(pre_stage_channels): + incre_module = self._make_layer(head_block, channels, head_channels[i], 1) + incre_modules.append(incre_module) + incre_modules = nn.ModuleList(incre_modules) + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = head_channels[i] * head_block.expansion + out_channels = head_channels[i + 1] * head_block.expansion + + downsamp_module = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1), + norm_layer(out_channels), + nn.ReLU(True)) + + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d(head_channels[3] * head_block.expansion, 2048, 1), + norm_layer(2048), + nn.ReLU(True)) + + return incre_modules, downsamp_modules, final_layer + + def forward(self, x): + x = self.conv1(x) + x = self.layer1(x) + + x_list = list() + for i in range(self.num_branches[0]): + if self.transition1[i] is not None: + tmp = self.transition1[i](x) + print(tmp.size()) + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.num_branches[1]): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.num_branches[2]): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + # Classification Head + y = self.incre_modules[0](y_list[0]) + for i in range(len(self.downsamp_modules)): + y = self.incre_modules[i + 1](y_list[i + 1]) + self.downsamp_modules[i](y) + + y = self.final_layer(y) + + y = F.avg_pool2d(y, kernel_size=y.size() + [2:]).view(y.size(0), -1) + + y = self.classifier(y) + + return y + + +blocks = [BasicBlock, BasicBlock, BasicBlock] +num_modules = [1, 1, 1] +num_branches = [2, 3, 4] +num_blocks = [[4, 4], [4, 4, 4], [4, 4, 4, 4]] +num_channels = [[256, 256], [32, 64, 128], [32, 64, 128, 256]] +fuse_method = ['sum', 'sum', 'sum'] + +if __name__ == '__main__': + img = torch.randn(1, 3, 256, 256) + model = HighResolutionNet(blocks, num_channels, num_modules, num_branches, num_blocks, fuse_method) + output = model(img) diff --git a/segutils/core/models/base_models/mobilenetv2.py b/segutils/core/models/base_models/mobilenetv2.py new file mode 100644 index 0000000..4e4c093 --- /dev/null +++ b/segutils/core/models/base_models/mobilenetv2.py @@ -0,0 +1,158 @@ +"""MobileNet and MobileNetV2.""" +import torch +import torch.nn as nn + +from core.nn import _ConvBNReLU, _DepthwiseConv, InvertedResidual + +__all__ = ['MobileNet', 'MobileNetV2', 'get_mobilenet', 'get_mobilenet_v2', + 'mobilenet1_0', 'mobilenet_v2_1_0', 'mobilenet0_75', 'mobilenet_v2_0_75', + 'mobilenet0_5', 'mobilenet_v2_0_5', 'mobilenet0_25', 'mobilenet_v2_0_25'] + + +class MobileNet(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNet, self).__init__() + conv_dw_setting = [ + [64, 1, 1], + [128, 2, 2], + [256, 2, 2], + [512, 6, 2], + [1024, 2, 2]] + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, norm_layer=norm_layer)] + + for c, n, s in conv_dw_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(_DepthwiseConv(input_channels, out_channels, stride, norm_layer)) + input_channels = out_channels + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Linear(int(1024 * multiplier), num_classes) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +class MobileNetV2(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNetV2, self).__init__() + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1]] + # building first layer + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, relu6=True, norm_layer=norm_layer)] + + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(InvertedResidual(input_channels, out_channels, stride, t, norm_layer)) + input_channels = out_channels + + # building last several layers + features.append(_ConvBNReLU(input_channels, last_channels, 1, relu6=True, norm_layer=norm_layer)) + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Sequential( + nn.Dropout2d(0.2), + nn.Linear(last_channels, num_classes)) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +# Constructor +def get_mobilenet(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNet(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def get_mobilenet_v2(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNetV2(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def mobilenet1_0(**kwargs): + return get_mobilenet(1.0, **kwargs) + + +def mobilenet_v2_1_0(**kwargs): + return get_mobilenet_v2(1.0, **kwargs) + + +def mobilenet0_75(**kwargs): + return get_mobilenet(0.75, **kwargs) + + +def mobilenet_v2_0_75(**kwargs): + return get_mobilenet_v2(0.75, **kwargs) + + +def mobilenet0_5(**kwargs): + return get_mobilenet(0.5, **kwargs) + + +def mobilenet_v2_0_5(**kwargs): + return get_mobilenet_v2(0.5, **kwargs) + + +def mobilenet0_25(**kwargs): + return get_mobilenet(0.25, **kwargs) + + +def mobilenet_v2_0_25(**kwargs): + return get_mobilenet_v2(0.25, **kwargs) + + +if __name__ == '__main__': + model = mobilenet0_5() diff --git a/segutils/core/models/base_models/resnet.py b/segutils/core/models/base_models/resnet.py new file mode 100644 index 0000000..b95b8c6 --- /dev/null +++ b/segutils/core/models/base_models/resnet.py @@ -0,0 +1,226 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = norm_layer(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = norm_layer(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=nn.BatchNorm2d): + super(ResNet, self).__init__() + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + return model + + +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) + return model + + +if __name__ == '__main__': + import torch + img = torch.randn(4, 3, 224, 224) + model = resnet50(True) + output = model(img) \ No newline at end of file diff --git a/segutils/core/models/base_models/resnetv1b.py b/segutils/core/models/base_models/resnetv1b.py new file mode 100644 index 0000000..21d67b7 --- /dev/null +++ b/segutils/core/models/base_models/resnetv1b.py @@ -0,0 +1,264 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNetV1b', 'resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', + 'resnet101_v1b', 'resnet152_v1b', 'resnet152_v1s', 'resnet101_v1s', 'resnet50_v1s'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +class BasicBlockV1b(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BasicBlockV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation, + dilation=previous_dilation, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class BottleneckV1b(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BottleneckV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNetV1b(nn.Module): + + def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False, + zero_init_residual=False, norm_layer=nn.BatchNorm2d): + self.inplanes = 128 if deep_stem else 64 + super(ResNetV1b, self).__init__() + if deep_stem: + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 1, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 128, 3, 1, 1, bias=False) + ) + else: + self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, BottleneckV1b): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlockV1b): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion), + ) + + layers = [] + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, dilation=dilation, + previous_dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet18']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet34_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet34']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet50']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet101_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet101']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet152_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet152']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet50', root=root)), strict=False) + return model + + +def resnet101_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet101', root=root)), strict=False) + return model + + +def resnet152_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet152', root=root)), strict=False) + return model + + +if __name__ == '__main__': + import torch + + img = torch.randn(4, 3, 224, 224) + model = resnet50_v1b(True) + output = model(img) diff --git a/segutils/core/models/base_models/resnext.py b/segutils/core/models/base_models/resnext.py new file mode 100644 index 0000000..8daf287 --- /dev/null +++ b/segutils/core/models/base_models/resnext.py @@ -0,0 +1,154 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNext', 'resnext50_32x4d', 'resnext101_32x8d'] + +model_urls = { + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, **kwargs): + super(Bottleneck, self).__init__() + width = int(planes * (base_width / 64.)) * groups + + self.conv1 = nn.Conv2d(inplanes, width, 1, bias=False) + self.bn1 = norm_layer(width) + self.conv2 = nn.Conv2d(width, width, 3, stride, dilation, dilation, groups, bias=False) + self.bn2 = norm_layer(width) + self.conv3 = nn.Conv2d(width, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNext(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, + width_per_group=64, dilated=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(ResNext, self).__init__() + self.inplanes = 64 + self.groups = groups + self.base_width = width_per_group + + self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion) + ) + + layers = list() + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, dilation=2, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, + dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnext50_32x4d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + model = ResNext(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext50_32x4d']) + model.load_state_dict(state_dict) + return model + + +def resnext101_32x8d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + model = ResNext(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext101_32x8d']) + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + model = resnext101_32x8d() diff --git a/segutils/core/models/base_models/vgg.py b/segutils/core/models/base_models/vgg.py new file mode 100644 index 0000000..fe5c163 --- /dev/null +++ b/segutils/core/models/base_models/vgg.py @@ -0,0 +1,191 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + + +class VGG(nn.Module): + def __init__(self, features, num_classes=1000, init_weights=True): + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes) + ) + if init_weights: + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += (conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)) + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfg = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def vgg11(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11'])) + return model + + +def vgg11_bn(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) + return model + + +def vgg13(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13'])) + return model + + +def vgg13_bn(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn'])) + return model + + +def vgg16(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16'])) + return model + + +def vgg16_bn(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn'])) + return model + + +def vgg19(pretrained=False, **kwargs): + """VGG 19-layer model (configuration "E") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19'])) + return model + + +def vgg19_bn(pretrained=False, **kwargs): + """VGG 19-layer model (configuration 'E') with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'])) + return model + + +if __name__ == '__main__': + img = torch.randn((4, 3, 480, 480)) + model = vgg16(pretrained=False) + out = model(img) diff --git a/segutils/core/models/base_models/xception.py b/segutils/core/models/base_models/xception.py new file mode 100644 index 0000000..51832f1 --- /dev/null +++ b/segutils/core/models/base_models/xception.py @@ -0,0 +1,411 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['Enc', 'FCAttention', 'Xception65', 'Xception71', 'get_xception', 'get_xception_71', 'get_xception_a'] + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, 0, dilation, groups=in_channels, + bias=bias) + self.bn = norm_layer(in_channels) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias) + + def forward(self, x): + x = self.fix_padding(x, self.kernel_size, self.dilation) + x = self.conv1(x) + x = self.bn(x) + x = self.pointwise(x) + + return x + + def fix_padding(self, x, kernel_size, dilation): + kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) + return padded_inputs + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, stride=1, dilation=1, norm_layer=None, + start_with_relu=True, grow_first=True, is_last=False): + super(Block, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(True) + rep = list() + filters = in_channels + if grow_first: + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + filters = out_channels + for i in range(reps - 1): + if grow_first or start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(filters)) + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + elif is_last: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Xception65(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception65, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True) + self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class Xception71(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception71, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = nn.Sequential( + Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True), + Block(256, 728, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True)) + self.block3 = Block(728, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# ------------------------------------------------- +# For DFANet +# ------------------------------------------------- +class BlockA(nn.Module): + def __init__(self, in_channels, out_channels, stride=1, dilation=1, norm_layer=None, start_with_relu=True): + super(BlockA, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(False) + rep = list() + inter_channels = out_channels // 4 + + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + else: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, 1, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Enc(nn.Module): + def __init__(self, in_channels, out_channels, blocks, norm_layer=nn.BatchNorm2d): + super(Enc, self).__init__() + block = list() + block.append(BlockA(in_channels, out_channels, 2, norm_layer=norm_layer)) + for i in range(blocks - 1): + block.append(BlockA(out_channels, out_channels, 1, norm_layer=norm_layer)) + self.block = nn.Sequential(*block) + + def forward(self, x): + return self.block(x) + + +class FCAttention(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d): + super(FCAttention, self).__init__() + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(in_channels, 1000) + self.conv = nn.Sequential( + nn.Conv2d(1000, in_channels, 1, bias=False), + norm_layer(in_channels), + nn.ReLU(False)) + + def forward(self, x): + n, c, _, _ = x.size() + att = self.avgpool(x).view(n, c) + att = self.fc(att).view(n, 1000, 1, 1) + att = self.conv(att) + return x * att.expand_as(x) + + +class XceptionA(nn.Module): + def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d): + super(XceptionA, self).__init__() + self.conv1 = nn.Sequential(nn.Conv2d(3, 8, 3, 2, 1, bias=False), + norm_layer(8), + nn.ReLU(True)) + + self.enc2 = Enc(8, 48, 4, norm_layer=norm_layer) + self.enc3 = Enc(48, 96, 6, norm_layer=norm_layer) + self.enc4 = Enc(96, 192, 4, norm_layer=norm_layer) + + self.fca = FCAttention(192, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(192, num_classes) + + def forward(self, x): + x = self.conv1(x) + + x = self.enc2(x) + x = self.enc3(x) + x = self.enc4(x) + x = self.fca(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# Constructor +def get_xception(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception65(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception', root=root))) + return model + + +def get_xception_71(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception71(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception71', root=root))) + return model + + +def get_xception_a(pretrained=False, root='~/.torch/models', **kwargs): + model = XceptionA(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception_a', root=root))) + return model + + +if __name__ == '__main__': + model = get_xception_a() diff --git a/segutils/core/models/bisenet.py b/segutils/core/models/bisenet.py new file mode 100644 index 0000000..09d335d --- /dev/null +++ b/segutils/core/models/bisenet.py @@ -0,0 +1,298 @@ +"""Bilateral Segmentation Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from core.models.base_models.resnet import resnet18,resnet50 +from core.nn import _ConvBNReLU + +__all__ = ['BiSeNet', 'get_bisenet', 'get_bisenet_resnet18_citys'] + + +class BiSeNet(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + self.head = _BiSeHead(256, 64, nclass, **kwargs) + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + x = self.head(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + + + if outsize: + print('######using torch resize#######',outsize) + x = F.interpolate(x, outsize, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + #return tuple(outputs) + return outputs[0] +class BiSeNet_MultiOutput(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet_MultiOutput, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + assert isinstance(nclass,list) + self.outCnt = len(nclass) + for ii,nclassii in enumerate(nclass): + setattr(self,'head%d'%(ii) , _BiSeHead(256, 64, nclassii, **kwargs)) + + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False,smooth_kernel=0): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + for ii in range(self.outCnt): + x = getattr(self,'head%d'%(ii))(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + if smooth_kernel>0: + gaussian_kernel = torch.from_numpy(np.ones((1,1,smooth_kernel,smooth_kernel)) ) + + pad = int((smooth_kernel - 1)/2) + if not gaussian_kernel.is_cuda: + gaussian_kernel = gaussian_kernel.to(x.device) + #print(gaussian_kernel.dtype,gaussian_kernel,outputs[0].dtype) + outputs = [ x.unsqueeze(1).double() for x in outputs] + outputs = [torch.conv2d(x, gaussian_kernel, padding=pad) for x in outputs ] + outputs = [ x.squeeze(1).long() for x in outputs] + #return tuple(outputs) + return outputs + +class _BiSeHead(nn.Module): + def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BiSeHead, self).__init__() + self.block = nn.Sequential( + _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + x = self.block(x) + return x + + +class SpatialPath(nn.Module): + """Spatial path""" + + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(SpatialPath, self).__init__() + inter_channels = 64 + self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer) + self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv7x7(x) + x = self.conv3x3_1(x) + x = self.conv3x3_2(x) + x = self.conv1x1(x) + + return x + + +class _GlobalAvgPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_GlobalAvgPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class AttentionRefinmentModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(AttentionRefinmentModule, self).__init__() + self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x): + x = self.conv3x3(x) + attention = self.channel_attention(x) + x = x * attention + return x + + +class ContextPath(nn.Module): + def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextPath, self).__init__() + if backbone == 'resnet18': + pretrained = resnet18(pretrained=pretrained_base, **kwargs) + elif backbone=='resnet50': + pretrained = resnet50(pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.conv1 = pretrained.conv1 + self.bn1 = pretrained.bn1 + self.relu = pretrained.relu + self.maxpool = pretrained.maxpool + self.layer1 = pretrained.layer1 + self.layer2 = pretrained.layer2 + self.layer3 = pretrained.layer3 + self.layer4 = pretrained.layer4 + + inter_channels = 128 + self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer) + + self.arms = nn.ModuleList( + [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs), + AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)] + ) + self.refines = nn.ModuleList( + [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)] + ) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + + context_blocks = [] + context_blocks.append(x) + x = self.layer2(x) + context_blocks.append(x) + c3 = self.layer3(x) + context_blocks.append(c3) + c4 = self.layer4(c3) + context_blocks.append(c4) + context_blocks.reverse() + + global_context = self.global_context(c4) + last_feature = global_context + context_outputs = [] + for i, (feature, arm, refine) in enumerate(zip(context_blocks[:2], self.arms, self.refines)): + feature = arm(feature) + feature += last_feature + last_feature = F.interpolate(feature, size=context_blocks[i + 1].size()[2:], + mode='bilinear', align_corners=True) + last_feature = refine(last_feature) + context_outputs.append(last_feature) + + return context_outputs + + +class FeatureFusion(nn.Module): + def __init__(self, in_channels, out_channels, reduction=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFusion, self).__init__() + self.conv1x1 = _ConvBNReLU(in_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels // reduction, 1, 1, 0, norm_layer=norm_layer), + _ConvBNReLU(out_channels // reduction, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x1, x2): + fusion = torch.cat([x1, x2], dim=1) + out = self.conv1x1(fusion) + attention = self.channel_attention(out) + out = out + out * attention + return out + + +def get_bisenet(dataset='citys', backbone='resnet18', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = BiSeNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('bisenet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_bisenet_resnet18_citys(**kwargs): + return get_bisenet('citys', 'resnet18', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 224, 224) + # model = BiSeNet(19, backbone='resnet18') + # print(model.exclusive) + input = torch.rand(2, 3, 224, 224) + model = BiSeNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/ccnet.py b/segutils/core/models/ccnet.py new file mode 100644 index 0000000..b06ca03 --- /dev/null +++ b/segutils/core/models/ccnet.py @@ -0,0 +1,166 @@ +"""Criss-Cross Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CrissCrossAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +#失败:NameError: name '_C' is not defined + +__all__ = ['CCNet', 'get_ccnet', 'get_ccnet_resnet50_citys', 'get_ccnet_resnet101_citys', + 'get_ccnet_resnet152_citys', 'get_ccnet_resnet50_ade', 'get_ccnet_resnet101_ade', + 'get_ccnet_resnet152_ade'] + + +class CCNet(SegBaseModel): + r"""CCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zilong Huang, et al. "CCNet: Criss-Cross Attention for Semantic Segmentation." + arXiv preprint arXiv:1811.11721 (2018). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(CCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _CCHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _CCHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_CCHead, self).__init__() + self.rcca = _RCCAModule(2048, 512, norm_layer, **kwargs) + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.rcca(x) + x = self.out(x) + return x + + +class _RCCAModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_RCCAModule, self).__init__() + inter_channels = in_channels // 4 + self.conva = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + self.cca = CrissCrossAttention(inter_channels) + self.convb = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.bottleneck = nn.Sequential( + nn.Conv2d(in_channels + inter_channels, out_channels, 3, padding=1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1)) + + def forward(self, x, recurrence=1): + out = self.conva(x) + for i in range(recurrence): + out = self.cca(out) + out = self.convb(out) + out = torch.cat([x, out], dim=1) + out = self.bottleneck(out) + + return out + + +def get_ccnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = CCNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('ccnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_ccnet_resnet50_citys(**kwargs): + return get_ccnet('citys', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_citys(**kwargs): + return get_ccnet('citys', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_citys(**kwargs): + return get_ccnet('citys', 'resnet152', **kwargs) + + +def get_ccnet_resnet50_ade(**kwargs): + return get_ccnet('ade20k', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_ade(**kwargs): + return get_ccnet('ade20k', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_ade(**kwargs): + return get_ccnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # model = get_ccnet_resnet50_citys() + # img = torch.randn(1, 3, 480, 480) + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = CCNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/cgnet.py b/segutils/core/models/cgnet.py new file mode 100644 index 0000000..85cb4e6 --- /dev/null +++ b/segutils/core/models/cgnet.py @@ -0,0 +1,228 @@ +"""Context Guided Network for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _BNPReLU + +__all__ = ['CGNet', 'get_cgnet', 'get_cgnet_citys'] + + +class CGNet(nn.Module): + r"""CGNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Tianyi Wu, et al. "CGNet: A Light-weight Context Guided Network for Semantic Segmentation." + arXiv preprint arXiv:1811.08201 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, M=3, N=21, **kwargs): + super(CGNet, self).__init__() + # stage 1 + self.stage1_0 = _ConvBNPReLU(3, 32, 3, 2, 1, **kwargs) + self.stage1_1 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + self.stage1_2 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + + self.sample1 = _InputInjection(1) + self.sample2 = _InputInjection(2) + self.bn_prelu1 = _BNPReLU(32 + 3, **kwargs) + + # stage 2 + self.stage2_0 = ContextGuidedBlock(32 + 3, 64, dilation=2, reduction=8, down=True, residual=False, **kwargs) + self.stage2 = nn.ModuleList() + for i in range(0, M - 1): + self.stage2.append(ContextGuidedBlock(64, 64, dilation=2, reduction=8, **kwargs)) + self.bn_prelu2 = _BNPReLU(128 + 3, **kwargs) + + # stage 3 + self.stage3_0 = ContextGuidedBlock(128 + 3, 128, dilation=4, reduction=16, down=True, residual=False, **kwargs) + self.stage3 = nn.ModuleList() + for i in range(0, N - 1): + self.stage3.append(ContextGuidedBlock(128, 128, dilation=4, reduction=16, **kwargs)) + self.bn_prelu3 = _BNPReLU(256, **kwargs) + + self.head = nn.Sequential( + nn.Dropout2d(0.1, False), + nn.Conv2d(256, nclass, 1)) + + self.__setattr__('exclusive', ['stage1_0', 'stage1_1', 'stage1_2', 'sample1', 'sample2', + 'bn_prelu1', 'stage2_0', 'stage2', 'bn_prelu2', 'stage3_0', + 'stage3', 'bn_prelu3', 'head']) + + def forward(self, x): + size = x.size()[2:] + # stage1 + out0 = self.stage1_0(x) + out0 = self.stage1_1(out0) + out0 = self.stage1_2(out0) + + inp1 = self.sample1(x) + inp2 = self.sample2(x) + + # stage 2 + out0_cat = self.bn_prelu1(torch.cat([out0, inp1], dim=1)) + out1_0 = self.stage2_0(out0_cat) + for i, layer in enumerate(self.stage2): + if i == 0: + out1 = layer(out1_0) + else: + out1 = layer(out1) + out1_cat = self.bn_prelu2(torch.cat([out1, out1_0, inp2], dim=1)) + + # stage 3 + out2_0 = self.stage3_0(out1_cat) + for i, layer in enumerate(self.stage3): + if i == 0: + out2 = layer(out2_0) + else: + out2 = layer(out2) + out2_cat = self.bn_prelu3(torch.cat([out2_0, out2], dim=1)) + + outputs = [] + out = self.head(out2_cat) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + #return tuple(outputs) + return outputs[0] + + +class _ChannelWiseConv(nn.Module): + def __init__(self, in_channels, out_channels, dilation=1, **kwargs): + super(_ChannelWiseConv, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, 3, 1, dilation, dilation, groups=in_channels, bias=False) + + def forward(self, x): + x = self.conv(x) + return x + + +class _FGlo(nn.Module): + def __init__(self, in_channels, reduction=16, **kwargs): + super(_FGlo, self).__init__() + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels // reduction), + nn.ReLU(True), + nn.Linear(in_channels // reduction, in_channels), + nn.Sigmoid()) + + def forward(self, x): + n, c, _, _ = x.size() + out = self.gap(x).view(n, c) + out = self.fc(out).view(n, c, 1, 1) + return x * out + + +class _InputInjection(nn.Module): + def __init__(self, ratio): + super(_InputInjection, self).__init__() + self.pool = nn.ModuleList() + for i in range(0, ratio): + self.pool.append(nn.AvgPool2d(3, 2, 1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +class _ConcatInjection(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConcatInjection, self).__init__() + self.bn = norm_layer(in_channels) + self.prelu = nn.PReLU(in_channels) + + def forward(self, x1, x2): + out = torch.cat([x1, x2], dim=1) + out = self.bn(out) + out = self.prelu(out) + return out + + +class ContextGuidedBlock(nn.Module): + def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False, + residual=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextGuidedBlock, self).__init__() + inter_channels = out_channels // 2 if not down else out_channels + if down: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer, **kwargs) + self.reduce = nn.Conv2d(inter_channels * 2, out_channels, 1, bias=False) + else: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.f_loc = _ChannelWiseConv(inter_channels, inter_channels, **kwargs) + self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation, **kwargs) + self.bn = norm_layer(inter_channels * 2) + self.prelu = nn.PReLU(inter_channels * 2) + self.f_glo = _FGlo(out_channels, reduction, **kwargs) + self.down = down + self.residual = residual + + def forward(self, x): + out = self.conv(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], dim=1) + joi_feat = self.prelu(self.bn(joi_feat)) + if self.down: + joi_feat = self.reduce(joi_feat) + + out = self.f_glo(joi_feat) + if self.residual: + out = out + x + + return out + + +def get_cgnet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = CGNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('cgnet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_cgnet_citys(**kwargs): + return get_cgnet('citys', '', **kwargs) + + +if __name__ == '__main__': + # model = get_cgnet_citys() + # print(model) + input = torch.rand(2, 3, 224, 224) + model = CGNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/danet.py b/segutils/core/models/danet.py new file mode 100644 index 0000000..7dae5d3 --- /dev/null +++ b/segutils/core/models/danet.py @@ -0,0 +1,232 @@ +"""Dual Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['DANet', 'get_danet', 'get_danet_resnet50_citys', + 'get_danet_resnet101_citys', 'get_danet_resnet152_citys'] + + +class DANet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu. + "Dual Attention Network for Scene Segmentation." *CVPR*, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DAHead(2048, nclass, aux, **kwargs) + + self.__setattr__('exclusive', ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x0 = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + outputs.append(x0) + + if self.aux: + x1 = F.interpolate(x[1], size, mode='bilinear', align_corners=True) + x2 = F.interpolate(x[2], size, mode='bilinear', align_corners=True) + outputs.append(x1) + outputs.append(x2) + #return outputs + return outputs[0] + +class _PositionAttentionModule(nn.Module): + """ Position attention module""" + + def __init__(self, in_channels, **kwargs): + super(_PositionAttentionModule, self).__init__() + self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_d = nn.Conv2d(in_channels, in_channels, 1) + self.alpha = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1) + feat_c = self.conv_c(x).view(batch_size, -1, height * width) + attention_s = self.softmax(torch.bmm(feat_b, feat_c)) + feat_d = self.conv_d(x).view(batch_size, -1, height * width) + feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width) + out = self.alpha * feat_e + x + + return out + + +class _ChannelAttentionModule(nn.Module): + """Channel attention module""" + + def __init__(self, **kwargs): + super(_ChannelAttentionModule, self).__init__() + self.beta = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_a = x.view(batch_size, -1, height * width) + feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1) + attention = torch.bmm(feat_a, feat_a_transpose) + attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention + attention = self.softmax(attention_new) + + feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width) + out = self.beta * feat_e + x + + return out + + +class _DAHead(nn.Module): + def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DAHead, self).__init__() + self.aux = aux + inter_channels = in_channels // 4 + self.conv_p1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.pam = _PositionAttentionModule(inter_channels, **kwargs) + self.cam = _ChannelAttentionModule(**kwargs) + self.conv_p2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.out = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + if aux: + self.conv_p3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + self.conv_c3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + feat_p = self.conv_p1(x) + feat_p = self.pam(feat_p) + feat_p = self.conv_p2(feat_p) + + feat_c = self.conv_c1(x) + feat_c = self.cam(feat_c) + feat_c = self.conv_c2(feat_c) + + feat_fusion = feat_p + feat_c + + outputs = [] + fusion_out = self.out(feat_fusion) + outputs.append(fusion_out) + if self.aux: + p_out = self.conv_p3(feat_p) + c_out = self.conv_c3(feat_c) + outputs.append(p_out) + outputs.append(c_out) + + return tuple(outputs) + + +def get_danet(dataset='citys', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""Dual Attention Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_danet(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('danet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_danet_resnet50_citys(**kwargs): + return get_danet('citys', 'resnet50', **kwargs) + + +def get_danet_resnet101_citys(**kwargs): + return get_danet('citys', 'resnet101', **kwargs) + + +def get_danet_resnet152_citys(**kwargs): + return get_danet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_danet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3,512,512) + model = DANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/deeplabv3.py b/segutils/core/models/deeplabv3.py new file mode 100644 index 0000000..98d0c02 --- /dev/null +++ b/segutils/core/models/deeplabv3.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['DeepLabV3', 'get_deeplabv3', 'get_deeplabv3_resnet50_voc', 'get_deeplabv3_resnet101_voc', + 'get_deeplabv3_resnet152_voc', 'get_deeplabv3_resnet50_ade', 'get_deeplabv3_resnet101_ade', + 'get_deeplabv3_resnet152_ade'] + + +class DeepLabV3(SegBaseModel): + r"""DeepLabV3 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation." + arXiv preprint arXiv:1706.05587 (2017). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(DeepLabV3, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DeepLabHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1) + ) + + def forward(self, x): + x = self.aspp(x) + return self.block(x) + + +class _ASPPConv(nn.Module): + def __init__(self, in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs): + super(_ASPPConv, self).__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + return self.block(x) + + +class _AsppPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs): + super(_AsppPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class _ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs, **kwargs): + super(_ASPP, self).__init__() + out_channels = 256 + self.b0 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs) + self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs) + self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs) + self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.5) + ) + + def forward(self, x): + feat1 = self.b0(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + x = self.project(x) + return x + + +def get_deeplabv3(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_resnet50_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet152', **kwargs) + + +def get_deeplabv3_resnet50_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_resnet50_voc() + img = torch.randn(2, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/deeplabv3_plus.py b/segutils/core/models/deeplabv3_plus.py new file mode 100644 index 0000000..9b5a703 --- /dev/null +++ b/segutils/core/models/deeplabv3_plus.py @@ -0,0 +1,142 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_models.xception import get_xception +from .deeplabv3 import _ASPP +from .fcn import _FCNHead +from ..nn import _ConvBNReLU + +__all__ = ['DeepLabV3Plus', 'get_deeplabv3_plus', 'get_deeplabv3_plus_xception_voc'] + + +class DeepLabV3Plus(nn.Module): + r"""DeepLabV3Plus + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'xception'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Encoder-Decoder with Atrous Separable Convolution for Semantic + Image Segmentation." + """ + + def __init__(self, nclass, backbone='xception', aux=True, pretrained_base=True, dilated=True, **kwargs): + super(DeepLabV3Plus, self).__init__() + self.aux = aux + self.nclass = nclass + output_stride = 8 if dilated else 32 + + self.pretrained = get_xception(pretrained=pretrained_base, output_stride=output_stride, **kwargs) + + # deeplabv3 plus + self.head = _DeepLabHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(728, nclass, **kwargs) + + def base_forward(self, x): + # Entry flow + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv2(x) + x = self.pretrained.bn2(x) + x = self.pretrained.relu(x) + + x = self.pretrained.block1(x) + # add relu here + x = self.pretrained.relu(x) + low_level_feat = x + + x = self.pretrained.block2(x) + x = self.pretrained.block3(x) + + # Middle flow + x = self.pretrained.midflow(x) + mid_level_feat = x + + # Exit flow + x = self.pretrained.block20(x) + x = self.pretrained.relu(x) + x = self.pretrained.conv3(x) + x = self.pretrained.bn3(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv4(x) + x = self.pretrained.bn4(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv5(x) + x = self.pretrained.bn5(x) + x = self.pretrained.relu(x) + return low_level_feat, mid_level_feat, x + + def forward(self, x): + size = x.size()[2:] + c1, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4, c1) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, **kwargs) + self.c1_block = _ConvBNReLU(c1_channels, 48, 3, padding=1, norm_layer=norm_layer) + self.block = nn.Sequential( + _ConvBNReLU(304, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.5), + _ConvBNReLU(256, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1)) + + def forward(self, x, c1): + size = c1.size()[2:] + c1 = self.c1_block(c1) + x = self.aspp(x) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + return self.block(torch.cat([x, c1], dim=1)) + + +def get_deeplabv3_plus(dataset='pascal_voc', backbone='xception', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3Plus(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict( + torch.load(get_model_file('deeplabv3_plus_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_plus_xception_voc(**kwargs): + return get_deeplabv3_plus('pascal_voc', 'xception', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_plus_xception_voc() diff --git a/segutils/core/models/denseaspp.py b/segutils/core/models/denseaspp.py new file mode 100644 index 0000000..1582375 --- /dev/null +++ b/segutils/core/models/denseaspp.py @@ -0,0 +1,198 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models.densenet import * +from core.models.fcn import _FCNHead + +__all__ = ['DenseASPP', 'get_denseaspp', 'get_denseaspp_densenet121_citys', + 'get_denseaspp_densenet161_citys', 'get_denseaspp_densenet169_citys', 'get_denseaspp_densenet201_citys'] + + +class DenseASPP(nn.Module): + def __init__(self, nclass, backbone='densenet121', aux=False, jpu=False, + pretrained_base=True, dilate_scale=8, **kwargs): + super(DenseASPP, self).__init__() + self.nclass = nclass + self.aux = aux + self.dilate_scale = dilate_scale + if backbone == 'densenet121': + self.pretrained = dilated_densenet121(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet161': + self.pretrained = dilated_densenet161(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet169': + self.pretrained = dilated_densenet169(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet201': + self.pretrained = dilated_densenet201(dilate_scale, pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + in_channels = self.pretrained.num_features + + self.head = _DenseASPPHead(in_channels, nclass) + + if aux: + self.auxlayer = _FCNHead(in_channels, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + #print('size', size) #torch.Size([512, 512]) + features = self.pretrained.features(x) + #print('22',features.shape) #torch.Size([2, 1024, 64, 64]) + if self.dilate_scale > 8: + features = F.interpolate(features, scale_factor=2, mode='bilinear', align_corners=True) + outputs = [] + x = self.head(features) #torch.Size([2, 4, 64, 64]) + #print('x.shape',x.shape) + x = F.interpolate(x, size, mode='bilinear', align_corners=True)#直接64到512。。。。效果还这么好! + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(features) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _DenseASPPHead(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DenseASPPHead, self).__init__() + self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs) + self.block = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(in_channels + 5 * 64, nclass, 1) + ) + + def forward(self, x): + x = self.dense_aspp_block(x) + return self.block(x) + + +class _DenseASPPConv(nn.Sequential): + def __init__(self, in_channels, inter_channels, out_channels, atrous_rate, + drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPConv, self).__init__() + self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)), + self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)), + self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu2', nn.ReLU(True)), + self.drop_rate = drop_rate + + def forward(self, x): + features = super(_DenseASPPConv, self).forward(x) + if self.drop_rate > 0: + features = F.dropout(features, p=self.drop_rate, training=self.training) + return features + + +class _DenseASPPBlock(nn.Module): + def __init__(self, in_channels, inter_channels1, inter_channels2, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPBlock, self).__init__() + self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1, + norm_layer, norm_kwargs) + self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1, + norm_layer, norm_kwargs) + self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1, + norm_layer, norm_kwargs) + self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1, + norm_layer, norm_kwargs) + self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1, + norm_layer, norm_kwargs) + + def forward(self, x): + aspp3 = self.aspp_3(x) + x = torch.cat([aspp3, x], dim=1) + + aspp6 = self.aspp_6(x) + x = torch.cat([aspp6, x], dim=1) + + aspp12 = self.aspp_12(x) + x = torch.cat([aspp12, x], dim=1) + + aspp18 = self.aspp_18(x) + x = torch.cat([aspp18, x], dim=1) + + aspp24 = self.aspp_24(x) + x = torch.cat([aspp24, x], dim=1) + + return x + + +def get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""DenseASPP + + Parameters + ---------- + dataset : str, default citys + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + # >>> model = get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False) + # >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DenseASPP(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('denseaspp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_denseaspp_densenet121_citys(**kwargs): + return get_denseaspp('citys', 'densenet121', **kwargs) + + +def get_denseaspp_densenet161_citys(**kwargs): + return get_denseaspp('citys', 'densenet161', **kwargs) + + +def get_denseaspp_densenet169_citys(**kwargs): + return get_denseaspp('citys', 'densenet169', **kwargs) + + +def get_denseaspp_densenet201_citys(**kwargs): + return get_denseaspp('citys', 'densenet201', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_denseaspp_densenet121_citys() + # outputs = model(img) + input = torch.rand(2, 3, 512, 512) + model = DenseASPP(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/dfanet.py b/segutils/core/models/dfanet.py new file mode 100644 index 0000000..15e3be0 --- /dev/null +++ b/segutils/core/models/dfanet.py @@ -0,0 +1,129 @@ +""" Deep Feature Aggregation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import Enc, FCAttention, get_xception_a +from core.nn import _ConvBNReLU + +__all__ = ['DFANet', 'get_dfanet', 'get_dfanet_citys'] + + +class DFANet(nn.Module): + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(DFANet, self).__init__() + self.pretrained = get_xception_a(pretrained_base, **kwargs) + + self.enc2_2 = Enc(240, 48, 4, **kwargs) + self.enc3_2 = Enc(144, 96, 6, **kwargs) + self.enc4_2 = Enc(288, 192, 4, **kwargs) + self.fca_2 = FCAttention(192, **kwargs) + + self.enc2_3 = Enc(240, 48, 4, **kwargs) + self.enc3_3 = Enc(144, 96, 6, **kwargs) + self.enc3_4 = Enc(288, 192, 4, **kwargs) + self.fca_3 = FCAttention(192, **kwargs) + + self.enc2_1_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_2_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_3_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.conv_fusion = _ConvBNReLU(32, 32, 1, **kwargs) + + self.fca_1_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_2_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_3_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.conv_out = nn.Conv2d(32, nclass, 1) + + self.__setattr__('exclusive', ['enc2_2', 'enc3_2', 'enc4_2', 'fca_2', 'enc2_3', 'enc3_3', 'enc3_4', 'fca_3', + 'enc2_1_reduce', 'enc2_2_reduce', 'enc2_3_reduce', 'conv_fusion', 'fca_1_reduce', + 'fca_2_reduce', 'fca_3_reduce', 'conv_out']) + + def forward(self, x): + # backbone + stage1_conv1 = self.pretrained.conv1(x) + stage1_enc2 = self.pretrained.enc2(stage1_conv1) + stage1_enc3 = self.pretrained.enc3(stage1_enc2) + stage1_enc4 = self.pretrained.enc4(stage1_enc3) + stage1_fca = self.pretrained.fca(stage1_enc4) + stage1_out = F.interpolate(stage1_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage2 + stage2_enc2 = self.enc2_2(torch.cat([stage1_enc2, stage1_out], dim=1)) + stage2_enc3 = self.enc3_2(torch.cat([stage1_enc3, stage2_enc2], dim=1)) + stage2_enc4 = self.enc4_2(torch.cat([stage1_enc4, stage2_enc3], dim=1)) + stage2_fca = self.fca_2(stage2_enc4) + stage2_out = F.interpolate(stage2_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage3 + stage3_enc2 = self.enc2_3(torch.cat([stage2_enc2, stage2_out], dim=1)) + stage3_enc3 = self.enc3_3(torch.cat([stage2_enc3, stage3_enc2], dim=1)) + stage3_enc4 = self.enc3_4(torch.cat([stage2_enc4, stage3_enc3], dim=1)) + stage3_fca = self.fca_3(stage3_enc4) + + stage1_enc2_decoder = self.enc2_1_reduce(stage1_enc2) + stage2_enc2_docoder = F.interpolate(self.enc2_2_reduce(stage2_enc2), scale_factor=2, + mode='bilinear', align_corners=True) + stage3_enc2_decoder = F.interpolate(self.enc2_3_reduce(stage3_enc2), scale_factor=4, + mode='bilinear', align_corners=True) + fusion = stage1_enc2_decoder + stage2_enc2_docoder + stage3_enc2_decoder + fusion1 = self.conv_fusion(fusion) + + stage1_fca_decoder = F.interpolate(self.fca_1_reduce(stage1_fca), scale_factor=4, + mode='bilinear', align_corners=True) + stage2_fca_decoder = F.interpolate(self.fca_2_reduce(stage2_fca), scale_factor=8, + mode='bilinear', align_corners=True) + stage3_fca_decoder = F.interpolate(self.fca_3_reduce(stage3_fca), scale_factor=16, + mode='bilinear', align_corners=True) + #print(fusion.shape,stage1_fca_decoder.shape,stage2_fca_decoder.shape,stage3_fca_decoder.shape) + fusion2 = fusion1 + stage1_fca_decoder + stage2_fca_decoder + stage3_fca_decoder + + outputs = list() + out = self.conv_out(fusion2) + out1 = F.interpolate(out, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(out1) + + #return tuple(outputs) + return outputs[0] + +def get_dfanet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DFANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dfanet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dfanet_citys(**kwargs): + return get_dfanet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_dfanet_citys() + input = torch.rand(2, 3, 512, 512) + model = DFANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/dinknet.py b/segutils/core/models/dinknet.py new file mode 100644 index 0000000..a36b90c --- /dev/null +++ b/segutils/core/models/dinknet.py @@ -0,0 +1,359 @@ +""" +Codes of LinkNet based on https://github.com/snakers4/spacenet-three +""" +import torch +import torch.nn as nn +from torch.autograd import Variable +from torchvision import models +import torch.nn.functional as F + +from functools import partial + +nonlinearity = partial(F.relu,inplace=True) + +class Dblock_more_dilate(nn.Module): + def __init__(self,channel): + super(Dblock_more_dilate, self).__init__() + self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1) + self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=2, padding=2) + self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=4, padding=4) + self.dilate4 = nn.Conv2d(channel, channel, kernel_size=3, dilation=8, padding=8) + self.dilate5 = nn.Conv2d(channel, channel, kernel_size=3, dilation=16, padding=16) + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + dilate1_out = nonlinearity(self.dilate1(x)) + dilate2_out = nonlinearity(self.dilate2(dilate1_out)) + dilate3_out = nonlinearity(self.dilate3(dilate2_out)) + dilate4_out = nonlinearity(self.dilate4(dilate3_out)) + dilate5_out = nonlinearity(self.dilate5(dilate4_out)) + out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out + dilate5_out + return out + +class Dblock(nn.Module): + def __init__(self,channel): + super(Dblock, self).__init__() + self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1) + self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=2, padding=2) + self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=4, padding=4) + self.dilate4 = nn.Conv2d(channel, channel, kernel_size=3, dilation=8, padding=8) + #self.dilate5 = nn.Conv2d(channel, channel, kernel_size=3, dilation=16, padding=16) + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + dilate1_out = nonlinearity(self.dilate1(x)) + dilate2_out = nonlinearity(self.dilate2(dilate1_out)) + dilate3_out = nonlinearity(self.dilate3(dilate2_out)) + dilate4_out = nonlinearity(self.dilate4(dilate3_out)) + #dilate5_out = nonlinearity(self.dilate5(dilate4_out)) + out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out# + dilate5_out + return out + +class DecoderBlock(nn.Module): + def __init__(self, in_channels, n_filters): + super(DecoderBlock,self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1) + self.norm1 = nn.BatchNorm2d(in_channels // 4) + self.relu1 = nonlinearity + + self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1) + self.norm2 = nn.BatchNorm2d(in_channels // 4) + self.relu2 = nonlinearity + + self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1) + self.norm3 = nn.BatchNorm2d(n_filters) + self.relu3 = nonlinearity + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + x = self.deconv2(x) + x = self.norm2(x) + x = self.relu2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.relu3(x) + return x + +class DinkNet34_less_pool(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet34_more_dilate, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + + self.dblock = Dblock_more_dilate(256) + + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + + #Center + e3 = self.dblock(e3) + + # Decoder + d3 = self.decoder3(e3) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + + # Final Classification + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet34(nn.Module): + def __init__(self, num_classes=1, num_channels=3): + super(DinkNet34, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock(512) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet50(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet50, self).__init__() + + filters = [256, 512, 1024, 2048] + resnet = models.resnet50(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock_more_dilate(2048) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet101(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet101, self).__init__() + + filters = [256, 512, 1024, 2048] + resnet = models.resnet101(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock_more_dilate(2048) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class LinkNet34(nn.Module): + def __init__(self, num_classes=1): + super(LinkNet34, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out \ No newline at end of file diff --git a/segutils/core/models/dunet.py b/segutils/core/models/dunet.py new file mode 100644 index 0000000..affc476 --- /dev/null +++ b/segutils/core/models/dunet.py @@ -0,0 +1,172 @@ +"""Decoders Matter for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['DUNet', 'get_dunet', 'get_dunet_resnet50_pascal_voc', + 'get_dunet_resnet101_pascal_voc', 'get_dunet_resnet152_pascal_voc'] + + +# The model may be wrong because lots of details missing in paper. +class DUNet(SegBaseModel): + """Decoders Matter for Semantic Segmentation + + Reference: + Zhi Tian, Tong He, Chunhua Shen, and Youliang Yan. + "Decoders Matter for Semantic Segmentation: + Data-Dependent Decoding Enables Flexible Feature Aggregation." CVPR, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DUNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DUHead(2144, **kwargs) + self.dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, 256, **kwargs) + self.aux_dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + + self.__setattr__('exclusive', + ['dupsample', 'head', 'auxlayer', 'aux_dupsample'] if aux else ['dupsample', 'head']) + + def forward(self, x): + c1, c2, c3, c4 = self.base_forward(x)#继承自SegBaseModel;返回的是resnet的layer1,2,3,4的输出 + outputs = [] + x = self.head(c2, c3, c4) + x = self.dupsample(x) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = self.aux_dupsample(auxout) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class FeatureFused(nn.Module): + """Module for fused features""" + + def __init__(self, inter_channels=48, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFused, self).__init__() + self.conv2 = nn.Sequential( + nn.Conv2d(512, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + self.conv3 = nn.Sequential( + nn.Conv2d(1024, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + size = c4.size()[2:] + c2 = self.conv2(F.interpolate(c2, size, mode='bilinear', align_corners=True)) + c3 = self.conv3(F.interpolate(c3, size, mode='bilinear', align_corners=True)) + fused_feature = torch.cat([c4, c3, c2], dim=1) + return fused_feature + + +class _DUHead(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DUHead, self).__init__() + self.fuse = FeatureFused(norm_layer=norm_layer, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(in_channels, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True), + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + fused_feature = self.fuse(c2, c3, c4) + out = self.block(fused_feature) + return out + + +class DUpsampling(nn.Module): + """DUsampling module""" + + def __init__(self, in_channels, out_channels, scale_factor=2, **kwargs): + super(DUpsampling, self).__init__() + self.scale_factor = scale_factor + self.conv_w = nn.Conv2d(in_channels, out_channels * scale_factor * scale_factor, 1, bias=False) + + def forward(self, x): + x = self.conv_w(x) + n, c, h, w = x.size() + + # N, C, H, W --> N, W, H, C + x = x.permute(0, 3, 2, 1).contiguous() + + # N, W, H, C --> N, W, H * scale, C // scale + x = x.view(n, w, h * self.scale_factor, c // self.scale_factor) + + # N, W, H * scale, C // scale --> N, H * scale, W, C // scale + x = x.permute(0, 2, 1, 3).contiguous() + + # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) + x = x.view(n, h * self.scale_factor, w * self.scale_factor, c // (self.scale_factor * self.scale_factor)) + + # N, H * scale, W * scale, C // (scale ** 2) -- > N, C // (scale ** 2), H * scale, W * scale + x = x.permute(0, 3, 1, 2) + + return x + +def get_dunet(dataset='pascal_voc', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DUNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dunet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dunet_resnet50_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet50', **kwargs) + + +def get_dunet_resnet101_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet101', **kwargs) + + +def get_dunet_resnet152_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 256, 256) + # model = get_dunet_resnet50_pascal_voc() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = DUNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + input = torch.randn(1, 3, 512, 512) + flop, params = profile(model, inputs=(input, )) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/encnet.py b/segutils/core/models/encnet.py new file mode 100644 index 0000000..585557b --- /dev/null +++ b/segutils/core/models/encnet.py @@ -0,0 +1,212 @@ +"""Context Encoding for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['EncNet', 'EncModule', 'get_encnet', 'get_encnet_resnet50_ade', + 'get_encnet_resnet101_ade', 'get_encnet_resnet152_ade'] + + +class EncNet(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, se_loss=True, lateral=False, + pretrained_base=True, **kwargs): + super(EncNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _EncHead(2048, nclass, se_loss=se_loss, lateral=lateral, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + features = self.base_forward(x) + + x = list(self.head(*features)) + x[0] = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + if self.aux: + auxout = self.auxlayer(features[2]) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + x.append(auxout) + return tuple(x) + + +class _EncHead(nn.Module): + def __init__(self, in_channels, nclass, se_loss=True, lateral=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_EncHead, self).__init__() + self.lateral = lateral + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + if lateral: + self.connect = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(512, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + nn.Sequential( + nn.Conv2d(1024, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + ]) + self.fusion = nn.Sequential( + nn.Conv2d(3 * 512, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.encmodule = EncModule(512, nclass, ncodes=32, se_loss=se_loss, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.conv6 = nn.Sequential( + nn.Dropout(0.1, False), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, *inputs): + feat = self.conv5(inputs[-1]) + if self.lateral: + c2 = self.connect[0](inputs[1]) + c3 = self.connect[1](inputs[2]) + feat = self.fusion(torch.cat([feat, c2, c3], 1)) + outs = list(self.encmodule(feat)) + outs[0] = self.conv6(outs[0]) + return tuple(outs) + + +class EncModule(nn.Module): + def __init__(self, in_channels, nclass, ncodes=32, se_loss=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(EncModule, self).__init__() + self.se_loss = se_loss + self.encoding = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 1, bias=False), + norm_layer(in_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + Encoding(D=in_channels, K=ncodes), + nn.BatchNorm1d(ncodes), + nn.ReLU(True), + Mean(dim=1) + ) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels), + nn.Sigmoid() + ) + if self.se_loss: + self.selayer = nn.Linear(in_channels, nclass) + + def forward(self, x): + en = self.encoding(x) + b, c, _, _ = x.size() + gamma = self.fc(en) + y = gamma.view(b, c, 1, 1) + outputs = [F.relu_(x + x * y)] + if self.se_loss: + outputs.append(self.selayer(en)) + return tuple(outputs) + + +class Encoding(nn.Module): + def __init__(self, D, K): + super(Encoding, self).__init__() + # init codewords and smoothing factor + self.D, self.K = D, K + self.codewords = nn.Parameter(torch.Tensor(K, D), requires_grad=True) + self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True) + self.reset_params() + + def reset_params(self): + std1 = 1. / ((self.K * self.D) ** (1 / 2)) + self.codewords.data.uniform_(-std1, std1) + self.scale.data.uniform_(-1, 0) + + def forward(self, X): + # input X is a 4D tensor + assert (X.size(1) == self.D) + B, D = X.size(0), self.D + if X.dim() == 3: + # BxDxN -> BxNxD + X = X.transpose(1, 2).contiguous() + elif X.dim() == 4: + # BxDxHxW -> Bx(HW)xD + X = X.view(B, D, -1).transpose(1, 2).contiguous() + else: + raise RuntimeError('Encoding Layer unknown input dims!') + # assignment weights BxNxK + A = F.softmax(self.scale_l2(X, self.codewords, self.scale), dim=2) + # aggregate + E = self.aggregate(A, X, self.codewords) + return E + + def __repr__(self): + return self.__class__.__name__ + '(' \ + + 'N x' + str(self.D) + '=>' + str(self.K) + 'x' \ + + str(self.D) + ')' + + @staticmethod + def scale_l2(X, C, S): + S = S.view(1, 1, C.size(0), 1) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + SL = S * (X - C) + SL = SL.pow(2).sum(3) + return SL + + @staticmethod + def aggregate(A, X, C): + A = A.unsqueeze(3) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + E = A * (X - C) + E = E.sum(1) + return E + + +class Mean(nn.Module): + def __init__(self, dim, keep_dim=False): + super(Mean, self).__init__() + self.dim = dim + self.keep_dim = keep_dim + + def forward(self, input): + return input.mean(self.dim, self.keep_dim) + + +def get_encnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = EncNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('encnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_encnet_resnet50_ade(**kwargs): + return get_encnet('ade20k', 'resnet50', **kwargs) + + +def get_encnet_resnet101_ade(**kwargs): + return get_encnet('ade20k', 'resnet101', **kwargs) + + +def get_encnet_resnet152_ade(**kwargs): + return get_encnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 224, 224) + model = get_encnet_resnet50_ade() + outputs = model(img) diff --git a/segutils/core/models/enet.py b/segutils/core/models/enet.py new file mode 100644 index 0000000..853fc65 --- /dev/null +++ b/segutils/core/models/enet.py @@ -0,0 +1,243 @@ +"""Efficient Neural Network""" +import torch +import torch.nn as nn + +__all__ = ['ENet', 'get_enet', 'get_enet_citys'] + + +class ENet(nn.Module): + """Efficient Neural Network""" + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=None, **kwargs): + super(ENet, self).__init__() + self.initial = InitialBlock(13, **kwargs) + + self.bottleneck1_0 = Bottleneck(16, 16, 64, downsampling=True, **kwargs) + self.bottleneck1_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_2 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_3 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_4 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck2_0 = Bottleneck(64, 32, 128, downsampling=True, **kwargs) + self.bottleneck2_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck2_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck2_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck2_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck3_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck3_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck3_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck3_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck4_0 = UpsamplingBottleneck(128, 16, 64, **kwargs) + self.bottleneck4_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck4_2 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck5_0 = UpsamplingBottleneck(64, 4, 16, **kwargs) + self.bottleneck5_1 = Bottleneck(16, 4, 16, **kwargs) + + self.fullconv = nn.ConvTranspose2d(16, nclass, 2, 2, bias=False) + + self.__setattr__('exclusive', ['bottleneck1_0', 'bottleneck1_1', 'bottleneck1_2', 'bottleneck1_3', + 'bottleneck1_4', 'bottleneck2_0', 'bottleneck2_1', 'bottleneck2_2', + 'bottleneck2_3', 'bottleneck2_4', 'bottleneck2_5', 'bottleneck2_6', + 'bottleneck2_7', 'bottleneck2_8', 'bottleneck3_1', 'bottleneck3_2', + 'bottleneck3_3', 'bottleneck3_4', 'bottleneck3_5', 'bottleneck3_6', + 'bottleneck3_7', 'bottleneck3_8', 'bottleneck4_0', 'bottleneck4_1', + 'bottleneck4_2', 'bottleneck5_0', 'bottleneck5_1', 'fullconv']) + + def forward(self, x): + # init + x = self.initial(x) + + # stage 1 + x, max_indices1 = self.bottleneck1_0(x) + x = self.bottleneck1_1(x) + x = self.bottleneck1_2(x) + x = self.bottleneck1_3(x) + x = self.bottleneck1_4(x) + + # stage 2 + x, max_indices2 = self.bottleneck2_0(x) + x = self.bottleneck2_1(x) + x = self.bottleneck2_2(x) + x = self.bottleneck2_3(x) + x = self.bottleneck2_4(x) + x = self.bottleneck2_5(x) + x = self.bottleneck2_6(x) + x = self.bottleneck2_7(x) + x = self.bottleneck2_8(x) + + # stage 3 + x = self.bottleneck3_1(x) + x = self.bottleneck3_2(x) + x = self.bottleneck3_3(x) + x = self.bottleneck3_4(x) + x = self.bottleneck3_6(x) + x = self.bottleneck3_7(x) + x = self.bottleneck3_8(x) + + # stage 4 + x = self.bottleneck4_0(x, max_indices2) + x = self.bottleneck4_1(x) + x = self.bottleneck4_2(x) + + # stage 5 + x = self.bottleneck5_0(x, max_indices1) + x = self.bottleneck5_1(x) + + # out + x = self.fullconv(x) + return tuple([x]) + + +class InitialBlock(nn.Module): + """ENet initial block""" + + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(InitialBlock, self).__init__() + self.conv = nn.Conv2d(3, out_channels, 3, 2, 1, bias=False) + self.maxpool = nn.MaxPool2d(2, 2) + self.bn = norm_layer(out_channels + 3) + self.act = nn.PReLU() + + def forward(self, x): + x_conv = self.conv(x) + x_pool = self.maxpool(x) + x = torch.cat([x_conv, x_pool], dim=1) + x = self.bn(x) + x = self.act(x) + return x + + +class Bottleneck(nn.Module): + """Bottlenecks include regular, asymmetric, downsampling, dilated""" + + def __init__(self, in_channels, inter_channels, out_channels, dilation=1, asymmetric=False, + downsampling=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(Bottleneck, self).__init__() + self.downsamping = downsampling + if downsampling: + self.maxpool = nn.MaxPool2d(2, 2, return_indices=True) + self.conv_down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + + if downsampling: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 2, stride=2, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + if asymmetric: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (5, 1), padding=(2, 0), bias=False), + nn.Conv2d(inter_channels, inter_channels, (1, 5), padding=(0, 2), bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, dilation=dilation, padding=dilation, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + self.conv3 = nn.Sequential( + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x): + identity = x + if self.downsamping: + identity, max_indices = self.maxpool(identity) + identity = self.conv_down(identity) + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out = self.act(out + identity) + + if self.downsamping: + return out, max_indices + else: + return out + + +class UpsamplingBottleneck(nn.Module): + """upsampling Block""" + + def __init__(self, in_channels, inter_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(UpsamplingBottleneck, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.upsampling = nn.MaxUnpool2d(2) + + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.ConvTranspose2d(inter_channels, inter_channels, 2, 2, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x, max_indices): + out_up = self.conv(x) + out_up = self.upsampling(out_up, max_indices) + + out_ext = self.block(x) + out = self.act(out_up + out_ext) + return out + + +def get_enet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ENet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('enet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_enet_citys(**kwargs): + return get_enet('citys', '', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(1, 3, 512, 512) + model = get_enet_citys() + output = model(img) diff --git a/segutils/core/models/espnet.py b/segutils/core/models/espnet.py new file mode 100644 index 0000000..82651f4 --- /dev/null +++ b/segutils/core/models/espnet.py @@ -0,0 +1,134 @@ +"ESPNetv2: A Light-weight, Power Efficient, and General Purpose for Semantic Segmentation" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import eespnet, EESP +from core.nn import _ConvBNPReLU, _BNPReLU + + +class ESPNetV2(nn.Module): + r"""ESPNetV2 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Sachin Mehta, et al. "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network." + arXiv preprint arXiv:1811.11431 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(ESPNetV2, self).__init__() + self.pretrained = eespnet(pretrained=pretrained_base, **kwargs) + self.proj_L4_C = _ConvBNPReLU(256, 128, 1, **kwargs) + self.pspMod = nn.Sequential( + EESP(256, 128, stride=1, k=4, r_lim=7, **kwargs), + _PSPModule(128, 128, **kwargs)) + self.project_l3 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(128, nclass, 1, bias=False)) + self.act_l3 = _BNPReLU(nclass, **kwargs) + self.project_l2 = _ConvBNPReLU(64 + nclass, nclass, 1, **kwargs) + self.project_l1 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(32 + nclass, nclass, 1, bias=False)) + + self.aux = aux + + self.__setattr__('exclusive', ['proj_L4_C', 'pspMod', 'project_l3', 'act_l3', 'project_l2', 'project_l1']) + + def forward(self, x): + size = x.size()[2:] + out_l1, out_l2, out_l3, out_l4 = self.pretrained(x, seg=True) + out_l4_proj = self.proj_L4_C(out_l4) + up_l4_to_l3 = F.interpolate(out_l4_proj, scale_factor=2, mode='bilinear', align_corners=True) + merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1)) + proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4) + proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act) + out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True) + merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1)) + out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True) + merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1)) + + outputs = list() + merge1_l1 = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True) + outputs.append(merge1_l1) + if self.aux: + # different from paper + auxout = F.interpolate(proj_merge_l3_bef_act, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +# different from PSPNet +class _PSPModule(nn.Module): + def __init__(self, in_channels, out_channels=1024, sizes=(1, 2, 4, 8), **kwargs): + super(_PSPModule, self).__init__() + self.stages = nn.ModuleList( + [nn.Conv2d(in_channels, in_channels, 3, 1, 1, groups=in_channels, bias=False) for _ in sizes]) + self.project = _ConvBNPReLU(in_channels * (len(sizes) + 1), out_channels, 1, 1, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for stage in self.stages: + x = F.avg_pool2d(x, kernel_size=3, stride=2, padding=1) + upsampled = F.interpolate(stage(x), size, mode='bilinear', align_corners=True) + feats.append(upsampled) + return self.project(torch.cat(feats, dim=1)) + + +def get_espnet(dataset='pascal_voc', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ESPNetV2(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('espnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_espnet_citys(**kwargs): + return get_espnet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_espnet_citys() + input = torch.rand(2, 3, 224, 224) + model =ESPNetV2(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/fcn.py b/segutils/core/models/fcn.py new file mode 100644 index 0000000..bc54fb4 --- /dev/null +++ b/segutils/core/models/fcn.py @@ -0,0 +1,235 @@ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import sys +sys.path.extend(['/home/thsw2/WJ/src/yolov5/segutils/','../..','..' ]) +from core.models.base_models.vgg import vgg16 + +__all__ = ['get_fcn32s', 'get_fcn16s', 'get_fcn8s', + 'get_fcn32s_vgg16_voc', 'get_fcn16s_vgg16_voc', 'get_fcn8s_vgg16_voc'] + + +class FCN32s(nn.Module): + """There are some difference from original fcn""" + + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, + norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN32s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.head = _FCNHead(512, nclass, norm_layer) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + pool5 = self.pretrained(x) + + outputs = [] + out = self.head(pool5) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class FCN16s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN16s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool4 = nn.Sequential(*self.pretrained[:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool4']) + + def forward(self, x): + pool4 = self.pool4(x) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + out = F.interpolate(fuse_pool4, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +class FCN8s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN8s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool3 = nn.Sequential(*self.pretrained[:17]) + self.pool4 = nn.Sequential(*self.pretrained[17:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool3 = nn.Conv2d(256, nclass, 1) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', + ['head', 'score_pool3', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool3', + 'score_pool4']) + + def forward(self, x): + pool3 = self.pool3(x) + pool4 = self.pool4(pool3) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + score_pool3 = self.score_pool3(pool3) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + upscore_pool4 = F.interpolate(fuse_pool4, score_pool3.size()[2:], mode='bilinear', align_corners=True) + fuse_pool3 = upscore_pool4 + score_pool3 + + out = F.interpolate(fuse_pool3, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(inplace=True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn32s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN32s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn32s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn16s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN16s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn16s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn8s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN8s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn8s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn32s_vgg16_voc(**kwargs): + return get_fcn32s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn16s_vgg16_voc(**kwargs): + return get_fcn16s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn8s_vgg16_voc(**kwargs): + return get_fcn8s('pascal_voc', 'vgg16', **kwargs) + + +if __name__ == "__main__": + model = FCN16s(21) + print(model) + input = torch.rand(2, 3, 224,224) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss) + print(loss.shape) + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) diff --git a/segutils/core/models/fcnv2.py b/segutils/core/models/fcnv2.py new file mode 100644 index 0000000..6bc4954 --- /dev/null +++ b/segutils/core/models/fcnv2.py @@ -0,0 +1,82 @@ +"""Fully Convolutional Network with Stride of 8""" +from __future__ import division + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel + +__all__ = ['FCN', 'get_fcn', 'get_fcn_resnet50_voc', + 'get_fcn_resnet101_voc', 'get_fcn_resnet152_voc'] + + +class FCN(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(FCN, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _FCNHead(2048, nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn_resnet50_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet50', **kwargs) + + +def get_fcn_resnet101_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet101', **kwargs) + + +def get_fcn_resnet152_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet152', **kwargs) diff --git a/segutils/core/models/hrnet.py b/segutils/core/models/hrnet.py new file mode 100644 index 0000000..8ad08e3 --- /dev/null +++ b/segutils/core/models/hrnet.py @@ -0,0 +1,29 @@ +"""High-Resolution Representations for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +class HRNet(nn.Module): + """HRNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Ke Sun. "High-Resolution Representations for Labeling Pixels and Regions." + arXiv preprint arXiv:1904.04514 (2019). + """ + def __init__(self, nclass, backbone='', aux=False, pretrained_base=False, **kwargs): + super(HRNet, self).__init__() + + def forward(self, x): + pass \ No newline at end of file diff --git a/segutils/core/models/icnet.py b/segutils/core/models/icnet.py new file mode 100644 index 0000000..fed14a4 --- /dev/null +++ b/segutils/core/models/icnet.py @@ -0,0 +1,180 @@ +"""Image Cascade Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['ICNet', 'get_icnet', 'get_icnet_resnet50_citys', + 'get_icnet_resnet101_citys', 'get_icnet_resnet152_citys'] + + +class ICNet(SegBaseModel): + """Image Cascade Network""" + + def __init__(self, nclass, backbone='resnet50', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(ICNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.conv_sub1 = nn.Sequential( + _ConvBNReLU(3, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 64, 3, 2, **kwargs) + ) + + self.ppm = PyramidPoolingModule() + + self.head = _ICHead(nclass, **kwargs) + + self.__setattr__('exclusive', ['conv_sub1', 'head']) + + def forward(self, x): + # sub 1 + x_sub1 = self.conv_sub1(x) + + # sub 2 + x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=True) + _, x_sub2, _, _ = self.base_forward(x_sub2) + + # sub 4 + x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear', align_corners=True) + _, _, _, x_sub4 = self.base_forward(x_sub4) + # add PyramidPoolingModule + x_sub4 = self.ppm(x_sub4) + outputs = self.head(x_sub1, x_sub2, x_sub4) + + return tuple(outputs) + +class PyramidPoolingModule(nn.Module): + def __init__(self, pyramids=[1,2,3,6]): + super(PyramidPoolingModule, self).__init__() + self.pyramids = pyramids + + def forward(self, input): + feat = input + height, width = input.shape[2:] + for bin_size in self.pyramids: + x = F.adaptive_avg_pool2d(input, output_size=bin_size) + x = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True) + feat = feat + x + return feat + +class _ICHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ICHead, self).__init__() + #self.cff_12 = CascadeFeatureFusion(512, 64, 128, nclass, norm_layer, **kwargs) + self.cff_12 = CascadeFeatureFusion(128, 64, 128, nclass, norm_layer, **kwargs) + self.cff_24 = CascadeFeatureFusion(2048, 512, 128, nclass, norm_layer, **kwargs) + + self.conv_cls = nn.Conv2d(128, nclass, 1, bias=False) + + def forward(self, x_sub1, x_sub2, x_sub4): + outputs = list() + x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2) + outputs.append(x_24_cls) + #x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) + x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1) + outputs.append(x_12_cls) + + up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear', align_corners=True) + up_x2 = self.conv_cls(up_x2) + outputs.append(up_x2) + up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(up_x8) + # 1 -> 1/4 -> 1/8 -> 1/16 + outputs.reverse() + + return outputs + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, + groups=1, norm_layer=nn.BatchNorm2d, bias=False, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU(True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class CascadeFeatureFusion(nn.Module): + """CFF Unit""" + + def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(CascadeFeatureFusion, self).__init__() + self.conv_low = nn.Sequential( + nn.Conv2d(low_channels, out_channels, 3, padding=2, dilation=2, bias=False), + norm_layer(out_channels) + ) + self.conv_high = nn.Sequential( + nn.Conv2d(high_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.conv_low_cls = nn.Conv2d(out_channels, nclass, 1, bias=False) + + def forward(self, x_low, x_high): + x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) + x_low = self.conv_low(x_low) + x_high = self.conv_high(x_high) + x = x_low + x_high + x = F.relu(x, inplace=True) + x_low_cls = self.conv_low_cls(x_low) + + return x, x_low_cls + + +def get_icnet(dataset='citys', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = ICNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('icnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_icnet_resnet50_citys(**kwargs): + return get_icnet('citys', 'resnet50', **kwargs) + + +def get_icnet_resnet101_citys(**kwargs): + return get_icnet('citys', 'resnet101', **kwargs) + + +def get_icnet_resnet152_citys(**kwargs): + return get_icnet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(1, 3, 256, 256) + # model = get_icnet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = ICNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + #print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/lednet.py b/segutils/core/models/lednet.py new file mode 100644 index 0000000..03c05bd --- /dev/null +++ b/segutils/core/models/lednet.py @@ -0,0 +1,211 @@ +"""LEDNet: A Lightweight Encoder-Decoder Network for Real-time Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU + +__all__ = ['LEDNet', 'get_lednet', 'get_lednet_citys'] + +class LEDNet(nn.Module): + r"""LEDNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Yu Wang, et al. "LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation." + arXiv preprint arXiv:1905.02423 (2019). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(LEDNet, self).__init__() + self.encoder = nn.Sequential( + Downsampling(3, 32), + SSnbt(32, **kwargs), SSnbt(32, **kwargs), SSnbt(32, **kwargs), + Downsampling(32, 64), + SSnbt(64, **kwargs), SSnbt(64, **kwargs), + Downsampling(64, 128), + SSnbt(128, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 17, **kwargs), + ) + self.decoder = APNModule(128, nclass) + + self.__setattr__('exclusive', ['encoder', 'decoder']) + + def forward(self, x): + size = x.size()[2:] + x = self.encoder(x) + x = self.decoder(x) + outputs = list() + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + #return tuple(outputs) + return outputs[0] + +class Downsampling(nn.Module): + def __init__(self, in_channels, out_channels, **kwargs): + super(Downsampling, self).__init__() + self.conv1 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.conv2 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.pool = nn.MaxPool2d(kernel_size=2, stride=1) + + def forward(self, x): + x1 = self.conv1(x) + x1 = self.pool(x1) + + x2 = self.conv2(x) + x2 = self.pool(x2) + + return torch.cat([x1, x2], dim=1) + + +class SSnbt(nn.Module): + def __init__(self, in_channels, dilation=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(SSnbt, self).__init__() + inter_channels = in_channels // 2 + self.branch1 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.branch2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.relu = nn.ReLU(True) + + @staticmethod + def channel_shuffle(x, groups): + n, c, h, w = x.size() + + channels_per_group = c // groups + x = x.view(n, groups, channels_per_group, h, w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(n, -1, h, w) + + return x + + def forward(self, x): + # channels split + x1, x2 = x.split(x.size(1) // 2, 1) + + x1 = self.branch1(x1) + x2 = self.branch2(x2) + + out = torch.cat([x1, x2], dim=1) + out = self.relu(out + x) + out = self.channel_shuffle(out, groups=2) + + return out + + +class APNModule(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(APNModule, self).__init__() + self.conv1 = _ConvBNReLU(in_channels, in_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv2 = _ConvBNReLU(in_channels, in_channels, 5, 2, 2, norm_layer=norm_layer) + self.conv3 = _ConvBNReLU(in_channels, in_channels, 7, 2, 3, norm_layer=norm_layer) + self.level1 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level2 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level3 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level4 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level5 = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(in_channels, nclass, 1)) + + def forward(self, x): + w, h = x.size()[2:] + branch3 = self.conv1(x) + branch2 = self.conv2(branch3) + branch1 = self.conv3(branch2) + + out = self.level1(branch1) + out = F.interpolate(out, ((w + 3) // 4, (h + 3) // 4), mode='bilinear', align_corners=True) + out = self.level2(branch2) + out + out = F.interpolate(out, ((w + 1) // 2, (h + 1) // 2), mode='bilinear', align_corners=True) + out = self.level3(branch3) + out + out = F.interpolate(out, (w, h), mode='bilinear', align_corners=True) + out = self.level4(x) * out + out = self.level5(x) + out + return out + + +def get_lednet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = LEDNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('lednet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_lednet_citys(**kwargs): + return get_lednet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_lednet_citys() + input = torch.rand(2, 3, 224, 224) + model =LEDNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/model_store.py b/segutils/core/models/model_store.py new file mode 100644 index 0000000..9e64675 --- /dev/null +++ b/segutils/core/models/model_store.py @@ -0,0 +1,68 @@ +"""Model store which provides pretrained models.""" +from __future__ import print_function + +import os +import zipfile + +from ..utils.download import download, check_sha1 + +__all__ = ['get_model_file', 'get_resnet_file'] + +_model_sha1 = {name: checksum for checksum, name in [ + ('25c4b50959ef024fcc050213a06b614899f94b3d', 'resnet50'), + ('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'), + ('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'), +]} + +encoding_repo_url = 'https://hangzh.s3.amazonaws.com/' +_url_format = '{repo_url}encoding/models/{file_name}.zip' + + +def short_hash(name): + if name not in _model_sha1: + raise ValueError('Pretrained model for {name} is not available.'.format(name=name)) + return _model_sha1[name][:8] + + +def get_resnet_file(name, root='~/.torch/models'): + file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name)) + root = os.path.expanduser(root) + + file_path = os.path.join(root, file_name + '.pth') + sha1_hash = _model_sha1[name] + if os.path.exists(file_path): + if check_sha1(file_path, sha1_hash): + return file_path + else: + print('Mismatch in the content of model file {} detected.' + + ' Downloading again.'.format(file_path)) + else: + print('Model file {} is not found. Downloading.'.format(file_path)) + + if not os.path.exists(root): + os.makedirs(root) + + zip_file_path = os.path.join(root, file_name + '.zip') + repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url) + if repo_url[-1] != '/': + repo_url = repo_url + '/' + download(_url_format.format(repo_url=repo_url, file_name=file_name), + path=zip_file_path, + overwrite=True) + with zipfile.ZipFile(zip_file_path) as zf: + zf.extractall(root) + os.remove(zip_file_path) + + if check_sha1(file_path, sha1_hash): + return file_path + else: + raise ValueError('Downloaded file has different hash. Please try again.') + + +def get_model_file(name, root='~/.torch/models'): + root = os.path.expanduser(root) + file_path = os.path.join(root, name + '.pth') + if os.path.exists(file_path): + return file_path + else: + raise ValueError('Model file is not found. Downloading or trainning.') diff --git a/segutils/core/models/model_zoo.py b/segutils/core/models/model_zoo.py new file mode 100644 index 0000000..7f8cd11 --- /dev/null +++ b/segutils/core/models/model_zoo.py @@ -0,0 +1,122 @@ +"""Model store which handles pretrained models """ +from .fcn import * +from .fcnv2 import * +from .pspnet import * +from .deeplabv3 import * +from .deeplabv3_plus import * +from .danet import * +from .denseaspp import * +from .bisenet import * +from .encnet import * +from .dunet import * +from .icnet import * +from .enet import * +from .ocnet import * +from .ccnet import * +from .psanet import * +from .cgnet import * +from .espnet import * +from .lednet import * +from .dfanet import * + +__all__ = ['get_model', 'get_model_list', 'get_segmentation_model'] + +_models = { + 'fcn32s_vgg16_voc': get_fcn32s_vgg16_voc, + 'fcn16s_vgg16_voc': get_fcn16s_vgg16_voc, + 'fcn8s_vgg16_voc': get_fcn8s_vgg16_voc, + 'fcn_resnet50_voc': get_fcn_resnet50_voc, + 'fcn_resnet101_voc': get_fcn_resnet101_voc, + 'fcn_resnet152_voc': get_fcn_resnet152_voc, + 'psp_resnet50_voc': get_psp_resnet50_voc, + 'psp_resnet50_ade': get_psp_resnet50_ade, + 'psp_resnet101_voc': get_psp_resnet101_voc, + 'psp_resnet101_ade': get_psp_resnet101_ade, + 'psp_resnet101_citys': get_psp_resnet101_citys, + 'psp_resnet101_coco': get_psp_resnet101_coco, + 'deeplabv3_resnet50_voc': get_deeplabv3_resnet50_voc, + 'deeplabv3_resnet101_voc': get_deeplabv3_resnet101_voc, + 'deeplabv3_resnet152_voc': get_deeplabv3_resnet152_voc, + 'deeplabv3_resnet50_ade': get_deeplabv3_resnet50_ade, + 'deeplabv3_resnet101_ade': get_deeplabv3_resnet101_ade, + 'deeplabv3_resnet152_ade': get_deeplabv3_resnet152_ade, + 'deeplabv3_plus_xception_voc': get_deeplabv3_plus_xception_voc, + 'danet_resnet50_ciyts': get_danet_resnet50_citys, + 'danet_resnet101_citys': get_danet_resnet101_citys, + 'danet_resnet152_citys': get_danet_resnet152_citys, + 'denseaspp_densenet121_citys': get_denseaspp_densenet121_citys, + 'denseaspp_densenet161_citys': get_denseaspp_densenet161_citys, + 'denseaspp_densenet169_citys': get_denseaspp_densenet169_citys, + 'denseaspp_densenet201_citys': get_denseaspp_densenet201_citys, + 'bisenet_resnet18_citys': get_bisenet_resnet18_citys, + 'encnet_resnet50_ade': get_encnet_resnet50_ade, + 'encnet_resnet101_ade': get_encnet_resnet101_ade, + 'encnet_resnet152_ade': get_encnet_resnet152_ade, + 'dunet_resnet50_pascal_voc': get_dunet_resnet50_pascal_voc, + 'dunet_resnet101_pascal_voc': get_dunet_resnet101_pascal_voc, + 'dunet_resnet152_pascal_voc': get_dunet_resnet152_pascal_voc, + 'icnet_resnet50_citys': get_icnet_resnet50_citys, + 'icnet_resnet101_citys': get_icnet_resnet101_citys, + 'icnet_resnet152_citys': get_icnet_resnet152_citys, + 'enet_citys': get_enet_citys, + 'base_ocnet_resnet101_citys': get_base_ocnet_resnet101_citys, + 'pyramid_ocnet_resnet101_citys': get_pyramid_ocnet_resnet101_citys, + 'asp_ocnet_resnet101_citys': get_asp_ocnet_resnet101_citys, + 'ccnet_resnet50_citys': get_ccnet_resnet50_citys, + 'ccnet_resnet101_citys': get_ccnet_resnet101_citys, + 'ccnet_resnet152_citys': get_ccnet_resnet152_citys, + 'ccnet_resnet50_ade': get_ccnet_resnet50_ade, + 'ccnet_resnet101_ade': get_ccnet_resnet101_ade, + 'ccnet_resnet152_ade': get_ccnet_resnet152_ade, + 'psanet_resnet50_voc': get_psanet_resnet50_voc, + 'psanet_resnet101_voc': get_psanet_resnet101_voc, + 'psanet_resnet152_voc': get_psanet_resnet152_voc, + 'psanet_resnet50_citys': get_psanet_resnet50_citys, + 'psanet_resnet101_citys': get_psanet_resnet101_citys, + 'psanet_resnet152_citys': get_psanet_resnet152_citys, + 'cgnet_citys': get_cgnet_citys, + 'espnet_citys': get_espnet_citys, + 'lednet_citys': get_lednet_citys, + 'dfanet_citys': get_dfanet_citys, +} + + +def get_model(name, **kwargs): + name = name.lower() + if name not in _models: + err_str = '"%s" is not among the following model list:\n\t' % (name) + err_str += '%s' % ('\n\t'.join(sorted(_models.keys()))) + raise ValueError(err_str) + net = _models[name](**kwargs) + return net + + +def get_model_list(): + return _models.keys() + + +def get_segmentation_model(model, **kwargs): + models = { + 'fcn32s': get_fcn32s, + 'fcn16s': get_fcn16s, + 'fcn8s': get_fcn8s, + 'fcn': get_fcn, + 'psp': get_psp, + 'deeplabv3': get_deeplabv3, + 'deeplabv3_plus': get_deeplabv3_plus, + 'danet': get_danet, + 'denseaspp': get_denseaspp, + 'bisenet': get_bisenet, + 'encnet': get_encnet, + 'dunet': get_dunet, + 'icnet': get_icnet, + 'enet': get_enet, + 'ocnet': get_ocnet, + 'ccnet': get_ccnet, + 'psanet': get_psanet, + 'cgnet': get_cgnet, + 'espnet': get_espnet, + 'lednet': get_lednet, + 'dfanet': get_dfanet, + } + return models[model](**kwargs) diff --git a/segutils/core/models/ocnet.py b/segutils/core/models/ocnet.py new file mode 100644 index 0000000..1e1e85c --- /dev/null +++ b/segutils/core/models/ocnet.py @@ -0,0 +1,361 @@ +""" Object Context Network for Scene Parsing""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['OCNet', 'get_ocnet', 'get_base_ocnet_resnet101_citys', + 'get_pyramid_ocnet_resnet101_citys', 'get_asp_ocnet_resnet101_citys'] + + +class OCNet(SegBaseModel): + r"""OCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Yuhui Yuan, Jingdong Wang. "OCNet: Object Context Network for Scene Parsing." + arXiv preprint arXiv:1809.00916 (2018). + """ + + def __init__(self, nclass, backbone='resnet101', oc_arch='base', aux=False, pretrained_base=True, **kwargs): + super(OCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _OCHead(nclass, oc_arch, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _OCHead(nn.Module): + def __init__(self, nclass, oc_arch, norm_layer=nn.BatchNorm2d, **kwargs): + super(_OCHead, self).__init__() + if oc_arch == 'base': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + BaseOCModule(512, 512, 256, 256, scales=([1]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'pyramid': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + PyramidOCModule(512, 512, 256, 512, scales=([1, 2, 3, 6]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'asp': + self.context = ASPOCModule(2048, 512, 256, 512, norm_layer=norm_layer, **kwargs) + else: + raise ValueError("Unknown OC architecture!") + + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.context(x) + return self.out(x) + + +class BaseAttentionBlock(nn.Module): + """The basic implementation for self-attention block/non-local block.""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(BaseAttentionBlock, self).__init__() + self.scale = scale + self.key_channels = key_channels + self.value_channels = value_channels + if scale > 1: + self.pool = nn.MaxPool2d(scale) + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + if self.scale > 1: + x = self.pool(x) + + value = self.f_value(x).view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query = self.f_query(x).view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key = self.f_key(x).view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query, key) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.bmm(sim_map, value).permute(0, 2, 1).contiguous() + context = context.view(batch_size, self.value_channels, *x.size()[2:]) + context = self.W(context) + if self.scale > 1: + context = F.interpolate(context, size=(w, h), mode='bilinear', align_corners=True) + + return context + + +class BaseOCModule(nn.Module): + """Base-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, concat=True, **kwargs): + super(BaseOCModule, self).__init__() + self.stages = nn.ModuleList([ + BaseAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + in_channels = in_channels * 2 if concat else in_channels + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + self.concat = concat + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = priors[0] + for i in range(1, len(priors)): + context += priors[i] + if self.concat: + context = torch.cat([context, x], 1) + out = self.project(context) + return out + + +class PyramidAttentionBlock(nn.Module): + """The basic implementation for pyramid self-attention block/non-local block""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidAttentionBlock, self).__init__() + self.scale = scale + self.value_channels = value_channels + self.key_channels = key_channels + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + + local_x = list() + local_y = list() + step_w, step_h = w // self.scale, h // self.scale + for i in range(self.scale): + for j in range(self.scale): + start_x, start_y = step_w * i, step_h * j + end_x, end_y = min(start_x + step_w, w), min(start_y + step_h, h) + if i == (self.scale - 1): + end_x = w + if j == (self.scale - 1): + end_y = h + local_x += [start_x, end_x] + local_y += [start_y, end_y] + + value = self.f_value(x) + query = self.f_query(x) + key = self.f_key(x) + + local_list = list() + local_block_cnt = (self.scale ** 2) * 2 + for i in range(0, local_block_cnt, 2): + value_local = value[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + query_local = query[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + key_local = key[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + + w_local, h_local = value_local.size(2), value_local.size(3) + value_local = value_local.contiguous().view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query_local = query_local.contiguous().view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key_local = key_local.contiguous().view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query_local, key_local) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context_local = torch.bmm(sim_map, value_local).permute(0, 2, 1).contiguous() + context_local = context_local.view(batch_size, self.value_channels, w_local, h_local) + local_list.append(context_local) + + context_list = list() + for i in range(0, self.scale): + row_tmp = list() + for j in range(self.scale): + row_tmp.append(local_list[j + i * self.scale]) + context_list.append(torch.cat(row_tmp, 3)) + + context = torch.cat(context_list, 2) + context = self.W(context) + + return context + + +class PyramidOCModule(nn.Module): + """Pyramid-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidOCModule, self).__init__() + self.stages = nn.ModuleList([ + PyramidAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + self.up_dr = nn.Sequential( + nn.Conv2d(in_channels, in_channels * len(scales), 1), + norm_layer(in_channels * len(scales)), + nn.ReLU(True) + ) + self.project = nn.Sequential( + nn.Conv2d(in_channels * len(scales) * 2, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = [self.up_dr(x)] + for i in range(len(priors)): + context += [priors[i]] + context = torch.cat(context, 1) + out = self.project(context) + return out + + +class ASPOCModule(nn.Module): + """ASP-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + atrous_rates=(12, 24, 36), norm_layer=nn.BatchNorm2d, **kwargs): + super(ASPOCModule, self).__init__() + self.context = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + norm_layer(out_channels), + nn.ReLU(True), + BaseOCModule(out_channels, out_channels, key_channels, value_channels, ([2]), norm_layer, False, **kwargs)) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate1, dilation=rate1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b2 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate2, dilation=rate2, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b3 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate3, dilation=rate3, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b4 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + + self.project = nn.Sequential( + nn.Conv2d(out_channels * 5, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + feat1 = self.context(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + out = self.project(out) + return out + + +def get_ocnet(dataset='citys', backbone='resnet50', oc_arch='base', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = OCNet(datasets[dataset].NUM_CLASS, backbone=backbone, oc_arch=oc_arch, + pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('%s_ocnet_%s_%s' % ( + oc_arch, backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_base_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'base', **kwargs) + + +def get_pyramid_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'pyramid', **kwargs) + + +def get_asp_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'asp', **kwargs) + + +if __name__ == '__main__': + #img = torch.randn(1, 3, 256, 256) + #model = get_asp_ocnet_resnet101_citys() + # outputs = model(img) + input = torch.rand(1, 3, 224,224) + model=OCNet(4,pretrained_base=False) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss,loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) \ No newline at end of file diff --git a/segutils/core/models/psanet.py b/segutils/core/models/psanet.py new file mode 100644 index 0000000..82361f3 --- /dev/null +++ b/segutils/core/models/psanet.py @@ -0,0 +1,163 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + # psa_out_channels = crop_size // 8 ** 2 + self.psa = _PointwiseSpatialAttention(2048, 3600, norm_layer) + + self.conv_post = _ConvBNReLU(1024, 2048, 1, norm_layer=norm_layer) + self.project = nn.Sequential( + _ConvBNReLU(4096, 512, 3, padding=1, norm_layer=norm_layer), + nn.Dropout2d(0.1, False), + nn.Conv2d(512, nclass, 1)) + + def forward(self, x): + global_feature = self.psa(x) + out = self.conv_post(global_feature) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _PointwiseSpatialAttention(nn.Module):# + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PointwiseSpatialAttention, self).__init__() + reduced_channels = 512 + self.collect_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + self.distribute_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + + def forward(self, x): + collect_fm = self.collect_attention(x) + distribute_fm = self.distribute_attention(x) + psa_fm = torch.cat([collect_fm, distribute_fm], dim=1) + return psa_fm + + +class _AttentionGeneration(nn.Module):#-->Z:(n,C2,H,W),不是原文over-completed的做法。 + def __init__(self, in_channels, reduced_channels, out_channels, norm_layer, **kwargs): + super(_AttentionGeneration, self).__init__() + self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer) + self.attention = nn.Sequential( + _ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer), + nn.Conv2d(reduced_channels, out_channels, 1, bias=False)) + + self.reduced_channels = reduced_channels + + def forward(self, x): + reduce_x = self.conv_reduce(x) + attention = self.attention(reduce_x) + n, c, h, w = attention.size()#c=out_channels=3600, + attention = attention.view(n, c, -1)#(n,3600,H*W) + reduce_x = reduce_x.view(n, self.reduced_channels, -1)#(n,512,H*W) + print(reduce_x.shape,attention.shape) + fm = torch.bmm(reduce_x, torch.softmax(attention, dim=1)) + fm = fm.view(n, self.reduced_channels, h, w)#(n,512,60,60) + + return fm + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = PSANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/psanet_offical.py b/segutils/core/models/psanet_offical.py new file mode 100644 index 0000000..54531a3 --- /dev/null +++ b/segutils/core/models/psanet_offical.py @@ -0,0 +1,255 @@ +import torch +from torch import nn +import torch.nn.functional as F +import core.lib.psa.functional as PF +import modeling.backbone.resnet_real as models + +#运行失败,compact可以运行,但over-completed运行不了。也是跟psamask的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,导入不了_C模块出错) +# +# from . import functions +# +# +# def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): +# return functions.psa_mask(input, psa_type, mask_H_, mask_W_) +# +# +# import torch +# from torch.autograd import Function +# from .. import src + + +# class PSAMask(Function): +# @staticmethod +# def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): +# assert psa_type in [0, 1] # 0-col, 1-dis +# assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) +# num_, channels_, feature_H_, feature_W_ = input.size() +# if mask_H_ is None and mask_W_ is None: +# mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 +# assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) +# assert channels_ == mask_H_ * mask_W_ +# half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 +# output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) +# if not input.is_cuda: +# src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# output = output.cuda() +# src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ +# ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ +# return output +# +# @staticmethod +# def backward(ctx, grad_output): +# psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ +# mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ +# grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) +# if not grad_output.is_cuda: +# src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# return grad_input, None, None, None + + +# psa_mask = PSAMask.apply + + +class PSA(nn.Module): + def __init__(self, in_channels=2048, mid_channels=512, psa_type=2, compact=False, shrink_factor=2, mask_h=59, + mask_w=59, normalization_factor=1.0, psa_softmax=True): + super(PSA, self).__init__() + assert psa_type in [0, 1, 2] + self.psa_type = psa_type + self.compact = compact + self.shrink_factor = shrink_factor + self.mask_h = mask_h + self.mask_w = mask_w + self.psa_softmax = psa_softmax + if normalization_factor is None: + normalization_factor = mask_h * mask_w + self.normalization_factor = normalization_factor + + self.reduce = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + if psa_type == 2: + self.reduce_p = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention_p = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + self.proj = nn.Sequential( + nn.Conv2d(mid_channels * (2 if psa_type == 2 else 1), in_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + out = x + if self.psa_type in [0, 1]: + x = self.reduce(x) + n, c, h, w = x.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1#可以理解为这样做的目的是向上取整。 + w = (w - 1) // self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + y = self.attention(x) + if self.compact: + if self.psa_type == 1: + y = y.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y = PF.psa_mask(y, self.psa_type, self.mask_h, self.mask_w) + if self.psa_softmax: + y = F.softmax(y, dim=1) + x = torch.bmm(x.view(n, c, h * w), y.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + elif self.psa_type == 2: + x_col = self.reduce(x) + x_dis = self.reduce_p(x) + n, c, h, w = x_col.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + x_col = F.interpolate(x_col, size=(h, w), mode='bilinear', align_corners=True) + x_dis = F.interpolate(x_dis, size=(h, w), mode='bilinear', align_corners=True) + y_col = self.attention(x_col) + y_dis = self.attention_p(x_dis) + if self.compact: + y_dis = y_dis.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y_col = PF.psa_mask(y_col, 0, self.mask_h, self.mask_w) + y_dis = PF.psa_mask(y_dis, 1, self.mask_h, self.mask_w) + if self.psa_softmax: + y_col = F.softmax(y_col, dim=1) + y_dis = F.softmax(y_dis, dim=1) + x_col = torch.bmm(x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x_dis = torch.bmm(x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x = torch.cat([x_col, x_dis], 1) + x = self.proj(x) + if self.shrink_factor != 1: + h = (h - 1) * self.shrink_factor + 1 + w = (w - 1) * self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + return torch.cat((out, x), 1) + + +class PSANet(nn.Module): + def __init__(self, layers=50, dropout=0.1, classes=2, zoom_factor=8, use_psa=True, psa_type=2, compact=False, + shrink_factor=2, mask_h=59, mask_w=59, normalization_factor=1.0, psa_softmax=True, + criterion=nn.CrossEntropyLoss(ignore_index=255), pretrained=True): + super(PSANet, self).__init__() + assert layers in [50, 101, 152] + assert classes > 1 + assert zoom_factor in [1, 2, 4, 8] + assert psa_type in [0, 1, 2] + self.zoom_factor = zoom_factor + self.use_psa = use_psa + self.criterion = criterion + + if layers == 50: + resnet = models.resnet50(pretrained=pretrained,deep_base=True) + elif layers == 101: + resnet = models.resnet101(pretrained=pretrained) + else: + resnet = models.resnet152(pretrained=pretrained) + self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) + self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 + + for n, m in self.layer3.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + for n, m in self.layer4.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + + fea_dim = 2048 + if use_psa: + self.psa = PSA(fea_dim, 512, psa_type, compact, shrink_factor, mask_h, mask_w, normalization_factor, psa_softmax) + fea_dim *= 2 + self.cls = nn.Sequential( + nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(512), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(512, classes, kernel_size=1) + ) + if self.training: + self.aux = nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(256, classes, kernel_size=1) + ) + + def forward(self, x, y=None): + x_size = x.size() + assert (x_size[2] - 1) % 8 == 0 and (x_size[3] - 1) % 8 == 0 + h = int((x_size[2] - 1) / 8 * self.zoom_factor + 1) + w = int((x_size[3] - 1) / 8 * self.zoom_factor + 1) + + x = self.layer0(x) + x = self.layer1(x) + x = self.layer2(x) + x_tmp = self.layer3(x) + x = self.layer4(x_tmp) + if self.use_psa: + x = self.psa(x) + x = self.cls(x) + if self.zoom_factor != 1: + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + + if self.training: + aux = self.aux(x_tmp) + if self.zoom_factor != 1: + aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True) + main_loss = self.criterion(x, y) + aux_loss = self.criterion(aux, y) + return x.max(1)[1], main_loss, aux_loss + else: + return x + + +if __name__ == '__main__': + import os + os.environ["CUDA_VISIBLE_DEVICES"] = '0' + crop_h = crop_w = 465 + input = torch.rand(4, 3, crop_h, crop_w).cuda() + compact = False + mask_h, mask_w = None, None + shrink_factor = 2 + if compact: + mask_h = (crop_h - 1) // (8 * shrink_factor) + 1 + mask_w = (crop_w - 1) // (8 * shrink_factor) + 1 + else: + assert (mask_h is None and mask_w is None) or (mask_h is not None and mask_w is not None) + if mask_h is None and mask_w is None: + mask_h = 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1 + mask_w = 2 * ((crop_w - 1) // (8 * shrink_factor) + 1) - 1 + else: + assert (mask_h % 2 == 1) and (mask_h >= 3) and (mask_h <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + assert (mask_w % 2 == 1) and (mask_w >= 3) and (mask_w <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + + model = PSANet(layers=50, dropout=0.1, classes=21, zoom_factor=8, use_psa=True, psa_type=2, compact=compact, + shrink_factor=shrink_factor, mask_h=mask_h, mask_w=mask_w, psa_softmax=True, pretrained=False).cuda() + print(model) + model.eval() + output = model(input) + print('PSANet', output.size()) diff --git a/segutils/core/models/psanet_old.py b/segutils/core/models/psanet_old.py new file mode 100644 index 0000000..71a6db7 --- /dev/null +++ b/segutils/core/models/psanet_old.py @@ -0,0 +1,208 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CollectAttention, DistributeAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + + +#运行失败,name '_C' is not defined。也是跟psa_block模块的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,找不到文件出错) + + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet', aux=False, pretrained_base=False, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + self.collect = _CollectModule(2048, 512, 60, 60, norm_layer, **kwargs) + self.distribute = _DistributeModule(2048, 512, 60, 60, norm_layer, **kwargs) + + self.conv_post = nn.Sequential( + nn.Conv2d(1024, 2048, 1, bias=False), + norm_layer(2048), + nn.ReLU(True)) + self.project = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + global_feature_collect = self.collect(x) + global_feature_distribute = self.distribute(x) + + global_feature = torch.cat([global_feature_collect, global_feature_distribute], dim=1) + out = self.conv_post(global_feature) + out = F.interpolate(out, scale_factor=2, mode='bilinear', align_corners=True) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _CollectModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_CollectModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.collect_attention = CollectAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + # shrink + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + ca = self.collect_attention(x_adaption) + global_feature_collect_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + ca_i = ca[i].view(ca.shape[1], -1) + global_feature_collect_list.append( + torch.mm(x_shrink_i, ca_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_collect = torch.cat(global_feature_collect_list) + + return global_feature_collect + + +class _DistributeModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_DistributeModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.distribute_attention = DistributeAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + da = self.distribute_attention(x_adaption) + global_feature_distribute_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + da_i = da[i].view(da.shape[1], -1) + global_feature_distribute_list.append( + torch.mm(x_shrink_i, da_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_distribute = torch.cat(global_feature_distribute_list) + + return global_feature_distribute + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + # from ..data.dataloader import datasets + model = PSANet(4, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + # if pretrained: + # from .model_store import get_model_file + # device = torch.device(kwargs['local_rank']) + # model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + # map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/pspnet.py b/segutils/core/models/pspnet.py new file mode 100644 index 0000000..6960e57 --- /dev/null +++ b/segutils/core/models/pspnet.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSPNet', 'get_psp', 'get_psp_resnet50_voc', 'get_psp_resnet50_ade', 'get_psp_resnet101_voc', + 'get_psp_resnet101_ade', 'get_psp_resnet101_citys', 'get_psp_resnet101_coco'] + + +class PSPNet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zhao, Hengshuang, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. + "Pyramid scene parsing network." *CVPR*, 2017 + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSPNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSPHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +def _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + +class _PyramidPooling(nn.Module): + def __init__(self, in_channels, **kwargs): + super(_PyramidPooling, self).__init__() + out_channels = int(in_channels / 4) + self.avgpool1 = nn.AdaptiveAvgPool2d(1) + self.avgpool2 = nn.AdaptiveAvgPool2d(2) + self.avgpool3 = nn.AdaptiveAvgPool2d(3) + self.avgpool4 = nn.AdaptiveAvgPool2d(6) + self.conv1 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv2 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv3 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv4 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feat1 = F.interpolate(self.conv1(self.avgpool1(x)), size, mode='bilinear', align_corners=True) + feat2 = F.interpolate(self.conv2(self.avgpool2(x)), size, mode='bilinear', align_corners=True) + feat3 = F.interpolate(self.conv3(self.avgpool3(x)), size, mode='bilinear', align_corners=True) + feat4 = F.interpolate(self.conv4(self.avgpool4(x)), size, mode='bilinear', align_corners=True) + return torch.cat([x, feat1, feat2, feat3, feat4], dim=1) + + +class _PSPHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_PSPHead, self).__init__() + self.psp = _PyramidPooling(2048, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + self.block = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + x = self.psp(x) + return self.block(x) + + +def get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('psp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psp_resnet50_voc(**kwargs): + return get_psp('pascal_voc', 'resnet50', **kwargs) + + +def get_psp_resnet50_ade(**kwargs): + return get_psp('ade20k', 'resnet50', **kwargs) + + +def get_psp_resnet101_voc(**kwargs): + return get_psp('pascal_voc', 'resnet101', **kwargs) + + +def get_psp_resnet101_ade(**kwargs): + return get_psp('ade20k', 'resnet101', **kwargs) + + +def get_psp_resnet101_citys(**kwargs): + return get_psp('citys', 'resnet101', **kwargs) + + +def get_psp_resnet101_coco(**kwargs): + return get_psp('coco', 'resnet101', **kwargs) + + +if __name__ == '__main__': + # model = get_psp_resnet50_voc() + # img = torch.randn(4, 3, 480, 480) + # output = model(img) + input = torch.rand(2, 3, 512, 512) + model = PSPNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/segbase.py b/segutils/core/models/segbase.py new file mode 100644 index 0000000..dd06266 --- /dev/null +++ b/segutils/core/models/segbase.py @@ -0,0 +1,60 @@ +"""Base Model for Semantic Segmentation""" +import torch.nn as nn + +from ..nn import JPU +from .base_models.resnetv1b import resnet50_v1s, resnet101_v1s, resnet152_v1s + +__all__ = ['SegBaseModel'] + + +class SegBaseModel(nn.Module): + r"""Base Model for Semantic Segmentation + + Parameters + ---------- + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + """ + + def __init__(self, nclass, aux, backbone='resnet50', jpu=False, pretrained_base=True, **kwargs): + super(SegBaseModel, self).__init__() + dilated = False if jpu else True + self.aux = aux + self.nclass = nclass + if backbone == 'resnet50': + self.pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet101': + self.pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet152': + self.pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + + self.jpu = JPU([512, 1024, 2048], width=512, **kwargs) if jpu else None + + def base_forward(self, x): + """forwarding pre-trained network""" + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + x = self.pretrained.maxpool(x) + c1 = self.pretrained.layer1(x) + c2 = self.pretrained.layer2(c1) + c3 = self.pretrained.layer3(c2) + c4 = self.pretrained.layer4(c3) + + if self.jpu: + return self.jpu(c1, c2, c3, c4) + else: + return c1, c2, c3, c4 #返回的是layer1,2,3,4的输出 + + def evaluate(self, x): + """evaluating network with inputs and targets""" + return self.forward(x)[0] + + def demo(self, x): + pred = self.forward(x) + if self.aux: + pred = pred[0] + return pred diff --git a/segutils/core/nn.zip b/segutils/core/nn.zip new file mode 100644 index 0000000..eea3167 Binary files /dev/null and b/segutils/core/nn.zip differ diff --git a/segutils/core/nn/__init__.py b/segutils/core/nn/__init__.py new file mode 100644 index 0000000..218bee9 --- /dev/null +++ b/segutils/core/nn/__init__.py @@ -0,0 +1,7 @@ +"""Seg NN Modules""" +#from .sync_bn.syncbn import * +#from .syncbn import * +from .ca_block import * +from .psa_block import * +from .jpu import * +from .basic import * \ No newline at end of file diff --git a/segutils/core/nn/basic.py b/segutils/core/nn/basic.py new file mode 100644 index 0000000..3b5a186 --- /dev/null +++ b/segutils/core/nn/basic.py @@ -0,0 +1,134 @@ +"""Basic Module for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['_ConvBNPReLU', '_ConvBN', '_BNPReLU', '_ConvBNReLU', '_DepthwiseConv', 'InvertedResidual'] + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU6(False) if relu6 else nn.ReLU(False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class _ConvBNPReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNPReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.prelu(x) + return x + + +class _ConvBN(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBN, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class _BNPReLU(nn.Module): + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BNPReLU, self).__init__() + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.bn(x) + x = self.prelu(x) + return x + + +# ----------------------------------------------------------------- +# For PSPNet +# ----------------------------------------------------------------- +class _PSPModule(nn.Module): + def __init__(self, in_channels, sizes=(1, 2, 3, 6), **kwargs): + super(_PSPModule, self).__init__() + out_channels = int(in_channels / 4) + self.avgpools = nn.ModuleList() + self.convs = nn.ModuleList() + for size in sizes: + self.avgpool.append(nn.AdaptiveAvgPool2d(size)) + self.convs.append(_ConvBNReLU(in_channels, out_channels, 1, **kwargs)) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for (avgpool, conv) in enumerate(zip(self.avgpools, self.convs)): + feats.append(F.interpolate(conv(avgpool(x)), size, mode='bilinear', align_corners=True)) + return torch.cat(feats, dim=1) + + +# ----------------------------------------------------------------- +# For MobileNet +# ----------------------------------------------------------------- +class _DepthwiseConv(nn.Module): + """conv_dw in MobileNet""" + + def __init__(self, in_channels, out_channels, stride, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DepthwiseConv, self).__init__() + self.conv = nn.Sequential( + _ConvBNReLU(in_channels, in_channels, 3, stride, 1, groups=in_channels, norm_layer=norm_layer), + _ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer)) + + def forward(self, x): + return self.conv(x) + + +# ----------------------------------------------------------------- +# For MobileNetV2 +# ----------------------------------------------------------------- +class InvertedResidual(nn.Module): + def __init__(self, in_channels, out_channels, stride, expand_ratio, norm_layer=nn.BatchNorm2d, **kwargs): + super(InvertedResidual, self).__init__() + assert stride in [1, 2] + self.use_res_connect = stride == 1 and in_channels == out_channels + + layers = list() + inter_channels = int(round(in_channels * expand_ratio)) + if expand_ratio != 1: + # pw + layers.append(_ConvBNReLU(in_channels, inter_channels, 1, relu6=True, norm_layer=norm_layer)) + layers.extend([ + # dw + _ConvBNReLU(inter_channels, inter_channels, 3, stride, 1, + groups=inter_channels, relu6=True, norm_layer=norm_layer), + # pw-linear + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels)]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +if __name__ == '__main__': + x = torch.randn(1, 32, 64, 64) + model = InvertedResidual(32, 64, 2, 1) + out = model(x) diff --git a/segutils/core/nn/ca_block.py b/segutils/core/nn/ca_block.py new file mode 100644 index 0000000..954c293 --- /dev/null +++ b/segutils/core/nn/ca_block.py @@ -0,0 +1,72 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CrissCrossAttention', 'ca_weight', 'ca_map'] + + +class _CAWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, t, f): + weight = _C.ca_forward(t, f) + + ctx.save_for_backward(t, f) + + return weight + + @staticmethod + @once_differentiable + def backward(ctx, dw): + t, f = ctx.saved_tensors + + dt, df = _C.ca_backward(dw, t, f) + return dt, df + + +class _CAMap(torch.autograd.Function): + @staticmethod + def forward(ctx, weight, g): + out = _C.ca_map_forward(weight, g) + + ctx.save_for_backward(weight, g) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + weight, g = ctx.saved_tensors + + dw, dg = _C.ca_map_backward(dout, weight, g) + + return dw, dg + + +ca_weight = _CAWeight.apply +ca_map = _CAMap.apply + + +class CrissCrossAttention(nn.Module): + """Criss-Cross Attention Module""" + + def __init__(self, in_channels): + super(CrissCrossAttention, self).__init__() + self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.value_conv = nn.Conv2d(in_channels, in_channels, 1) + self.gamma = nn.Parameter(torch.zeros(1)) + + def forward(self, x): + proj_query = self.query_conv(x) + proj_key = self.key_conv(x) + proj_value = self.value_conv(x) + + energy = ca_weight(proj_query, proj_key) + attention = F.softmax(energy, 1) + out = ca_map(attention, proj_value) + out = self.gamma * out + x + + return out diff --git a/segutils/core/nn/csrc/ca.h b/segutils/core/nn/csrc/ca.h new file mode 100644 index 0000000..1a93b36 --- /dev/null +++ b/segutils/core/nn/csrc/ca.h @@ -0,0 +1,58 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor ca_forward(const at::Tensor& t, + const at::Tensor& f) { + if (t.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_forward_cuda(t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_forward_cpu(t, f); +} + +std::tuple ca_backward(const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + if (dw.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_backward_cuda(dw, t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_backward_cpu(dw, t, f); +} + +at::Tensor ca_map_forward(const at::Tensor& weight, + const at::Tensor& g) { + if (weight.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_forward_cuda(weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_forward_cpu(weight, g); +} + +std::tuple ca_map_backward(const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + if (dout.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_backward_cuda(dout, weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_backward_cpu(dout, weight, g); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/ca_cpu.cpp b/segutils/core/nn/csrc/cpu/ca_cpu.cpp new file mode 100644 index 0000000..6029c51 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/ca_cpu.cpp @@ -0,0 +1,24 @@ +#include "cpu/vision.h" + + +at::Tensor ca_forward_cpu( + const torch::Tensor& t, + const torch::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/psa_cpu.cpp b/segutils/core/nn/csrc/cpu/psa_cpu.cpp new file mode 100644 index 0000000..9e0e765 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/psa_cpu.cpp @@ -0,0 +1,13 @@ +#include "cpu/vision.h" + + +at::Tensor psa_forward_cpu( + const torch::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp b/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..70b5db4 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp @@ -0,0 +1,45 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output = (input_ - broadcast_to(ex_, input_)) / broadcast_to(exs_, input_); + output = output * broadcast_to(gamma_, input_) + broadcast_to(beta_, input_); + return output; +} + +// Not implementing CPU backward for now +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input_); + at::Tensor gradgamma = at::zeros_like(gamma_); + at::Tensor gradbeta = at::zeros_like(beta_); + at::Tensor gradMean = at::zeros_like(ex_); + at::Tensor gradStd = at::zeros_like(exs_); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/vision.h b/segutils/core/nn/csrc/cpu/vision.h new file mode 100644 index 0000000..8a824fe --- /dev/null +++ b/segutils/core/nn/csrc/cpu/vision.h @@ -0,0 +1,47 @@ +#pragma once +#include + + +at::Tensor ca_forward_cpu( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cpu( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/ca_cuda.cu b/segutils/core/nn/csrc/cuda/ca_cuda.cu new file mode 100644 index 0000000..ba459fa --- /dev/null +++ b/segutils/core/nn/csrc/cuda/ca_cuda.cu @@ -0,0 +1,324 @@ +#include +#include + +#include +#include +#include + +template +__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _t = t[(batch * chn + plane) * sp + y * width + x]; + + if (z < width) { + int i = z; + T _f = f[(batch * chn + plane) * sp + y * width + i]; + weight[(batch * len + i) * sp + y*width + x] += _t*_f; + } + else { + int i = z - width; + int j = i +__global__ void ca_backward_kernel_t(const T *dw, const T *t, const T *f, T *dt, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + i) * sp + y*width + x]; + T _f = f[(batch * chn + plane) * sp + y*width + i]; + dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i +__global__ void ca_backward_kernel_f(const T *dw, const T *t, const T *f, T *df, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + x) * sp + y*width + i]; + T _t = t[(batch * chn + plane) * sp + y*width + i]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dw = dw[(batch * len + width + j) * sp + i*width + x]; + T _t = t[(batch * chn + plane) * sp + i*width + x]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + } + } +} + +template +__global__ void ca_map_forward_kernel(const T *weight, const T *g, T *out, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _g = g[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + i) * sp + y*width + x]; + out[(batch * chn + plane) * sp + y*width + x] += _g * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + + int j = i +__global__ void ca_map_backward_kernel_w(const T *dout, const T *weight, const T *g, T *dw, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _dout = dout[(batch * chn + plane) * sp + y*width + x]; + + if (z < width) { + int i = z; + T _g = g[(batch * chn + plane) * sp + y*width + i]; + dw[(batch * len + i) * sp + y*width + x] += _dout * _g; + } + else { + int i = z - width; + int j = i +__global__ void ca_map_backward_kernel_g(const T *dout, const T *weight, const T *g, T *dg, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dout = dout[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + x) * sp + y*width + i]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dout = dout[(batch * chn + plane) * sp + i*width + x]; + T _w = weight[(batch * len + width + j) * sp + i*width + x]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + } + } +} + +/* + * Implementations + */ +at::Tensor ca_forward_cuda(const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor weight = at::zeros({n, h + w - 1, h, w}, t.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_forward", [&] { + ca_forward_kernel<<>>( + t.contiguous().data(), + f.contiguous().data(), + weight.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return weight; +} + +std::tuple ca_backward_cuda(const at::Tensor& dw, const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(dw.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor dt = at::zeros_like(t); + at::Tensor df = at::zeros_like(f); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_backward_kernel_t", [&] { + ca_backward_kernel_t<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + dt.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(f.type(), "ca_backward_kernel_f", [&] { + ca_backward_kernel_f<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + df.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dt, df); +} + +at::Tensor ca_map_forward_cuda(const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = g.size(0); + auto c = g.size(1); + auto h = g.size(2); + auto w = g.size(3); + + at::Tensor out = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_forward", [&] { + ca_map_forward_kernel<<>>( + weight.contiguous().data(), + g.contiguous().data(), + out.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return out; +} + +std::tuple ca_map_backward_cuda(const at::Tensor& dout, const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = dout.size(0); + auto c = dout.size(1); + auto h = dout.size(2); + auto w = dout.size(3); + + at::Tensor dw = at::zeros_like(weight); + at::Tensor dg = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(weight.type(), "ca_map_backward_kernel_w", [&] { + ca_map_backward_kernel_w<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dw.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_backward_kernel_g", [&] { + ca_map_backward_kernel_g<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dg.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dw, dg); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/helper.h b/segutils/core/nn/csrc/cuda/helper.h new file mode 100644 index 0000000..cc5ea88 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/helper.h @@ -0,0 +1,334 @@ +#include +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/psa_cuda.cu b/segutils/core/nn/csrc/cuda/psa_cuda.cu new file mode 100644 index 0000000..c47c98a --- /dev/null +++ b/segutils/core/nn/csrc/cuda/psa_cuda.cu @@ -0,0 +1,214 @@ +#include +#include + +#include +#include +#include + +#define PSA_TYPE_COLLECT 1 +#define PSA_TYPE_DISTRIBUTE 2 + +const int CUDA_NUM_THREADS = 512; + +inline int GET_BLOCKS(const int N) { + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__global__ void psa_collect_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_collect_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((h * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)]; + } + } + } +} + +at::Tensor psa_forward_cuda(const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor out = at::zeros({n, h * w, h * w}, hc.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_collect_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_distribute_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return out; +} + +at::Tensor psa_backward_cuda(const at::Tensor& dout, const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor dhc = at::zeros_like(hc); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_collect_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_distribute_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return dhc; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/syncbn_cuda.cu b/segutils/core/nn/csrc/cuda/syncbn_cuda.cu new file mode 100644 index 0000000..dcaed67 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/syncbn_cuda.cu @@ -0,0 +1,488 @@ +#include +// #include +#include +#include +#include + +#include +#include + +#include "helper.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void batchnorm_forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void inp_batchnorm_forward_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void expectation_forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void inp_batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void inp_expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + batchnorm_forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "inp_batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_batchnorm_forward_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + batchnorm_backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + inp_batchnorm_backward_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector expectation_forward_cuda( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_forward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + expectation_forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/vision.h b/segutils/core/nn/csrc/cuda/vision.h new file mode 100644 index 0000000..6696840 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/vision.h @@ -0,0 +1,84 @@ +#pragma once +#include +#include + + +at::Tensor ca_forward_cuda( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cuda( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cuda( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cuda( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cuda( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cuda( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector expectation_forward_cuda( + const at::Tensor input_); + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/segutils/core/nn/csrc/psa.h b/segutils/core/nn/csrc/psa.h new file mode 100644 index 0000000..1702581 --- /dev/null +++ b/segutils/core/nn/csrc/psa.h @@ -0,0 +1,33 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor psa_forward(const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_forward_cuda(hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_forward_cpu(hc, forward_type); +} + +at::Tensor psa_backward(const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_backward_cuda(dout, hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_backward_cpu(dout, hc, forward_type); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/syncbn.h b/segutils/core/nn/csrc/syncbn.h new file mode 100644 index 0000000..fbcf695 --- /dev/null +++ b/segutils/core/nn/csrc/syncbn.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_forward_cpu(input_, ex_, exs_, gamma_, beta_, eps); +} + +at::Tensor inp_batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_backward_cpu(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); +} + +std::vector inp_batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector expectation_forward(const at::Tensor input_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_forward_cuda(input_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor expectation_backward(const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_backward_cuda(input_, gradEx_, gradExs_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor inp_expectation_backward(const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (output_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_expectation_backward_cuda(gradInput_, output_, gradEx_, gradExs_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/vision.cpp b/segutils/core/nn/csrc/vision.cpp new file mode 100644 index 0000000..c369176 --- /dev/null +++ b/segutils/core/nn/csrc/vision.cpp @@ -0,0 +1,19 @@ +#include "ca.h" +#include "psa.h" +#include "syncbn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ca_forward", &ca_forward, "ca_forward"); + m.def("ca_backward", &ca_backward, "ca_backward"); + m.def("ca_map_forward", &ca_map_forward, "ca_map_forward"); + m.def("ca_map_backward", &ca_map_backward, "ca_map_backward"); + m.def("psa_forward", &psa_forward, "psa_forward"); + m.def("psa_backward", &psa_backward, "psa_backward"); + m.def("batchnorm_forward", &batchnorm_forward, "batchnorm_forward"); + m.def("inp_batchnorm_forward", &inp_batchnorm_forward, "inp_batchnorm_forward"); + m.def("batchnorm_backward", &batchnorm_backward, "batchnorm_backward"); + m.def("inp_batchnorm_backward", &inp_batchnorm_backward, "inp_batchnorm_backward"); + m.def("expectation_forward", &expectation_forward, "expectation_forward"); + m.def("expectation_backward", &expectation_backward, "expectation_backward"); + m.def("inp_expectation_backward", &inp_expectation_backward, "inp_expectation_backward"); +} \ No newline at end of file diff --git a/segutils/core/nn/jpu.py b/segutils/core/nn/jpu.py new file mode 100644 index 0000000..db23bab --- /dev/null +++ b/segutils/core/nn/jpu.py @@ -0,0 +1,68 @@ +"""Joint Pyramid Upsampling""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['JPU'] + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, + dilation=1, bias=False, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.conv = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.pointwise(x) + return x + + +# copy from: https://github.com/wuhuikai/FastFCN/blob/master/encoding/nn/customize.py +class JPU(nn.Module): + def __init__(self, in_channels, width=512, norm_layer=nn.BatchNorm2d, **kwargs): + super(JPU, self).__init__() + + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv4 = nn.Sequential( + nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv3 = nn.Sequential( + nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + + self.dilation1 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=1, dilation=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation2 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=2, dilation=2, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation3 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=4, dilation=4, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation4 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=8, dilation=8, bias=False), + norm_layer(width), + nn.ReLU(True)) + + def forward(self, *inputs): + feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])] + size = feats[-1].size()[2:] + feats[-2] = F.interpolate(feats[-2], size, mode='bilinear', align_corners=True) + feats[-3] = F.interpolate(feats[-3], size, mode='bilinear', align_corners=True) + feat = torch.cat(feats, dim=1) + feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], + dim=1) + + return inputs[0], inputs[1], inputs[2], feat diff --git a/segutils/core/nn/psa_block.py b/segutils/core/nn/psa_block.py new file mode 100644 index 0000000..c8ff11b --- /dev/null +++ b/segutils/core/nn/psa_block.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CollectAttention', 'DistributeAttention', 'psa_collect', 'psa_distribute'] + + +class _PSACollect(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 1) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 1) + + return dhc + + +class _PSADistribute(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 2) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 2) + + return dhc + + +psa_collect = _PSACollect.apply +psa_distribute = _PSADistribute.apply + + +class CollectAttention(nn.Module): + """Collect Attention Generation Module""" + + def __init__(self): + super(CollectAttention, self).__init__() + + def forward(self, x): + out = psa_collect(x) + return out + + +class DistributeAttention(nn.Module): + """Distribute Attention Generation Module""" + + def __init__(self): + super(DistributeAttention, self).__init__() + + def forward(self, x): + out = psa_distribute(x) + return out diff --git a/segutils/core/nn/setup.py b/segutils/core/nn/setup.py new file mode 100644 index 0000000..ec800c4 --- /dev/null +++ b/segutils/core/nn/setup.py @@ -0,0 +1,56 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# !/usr/bin/env python +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/90c226cf10e098263d1df28bda054a5f22513b4f/setup.py + +import os +import glob +import torch + +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME + +requirements = ["torch"] + + +def get_extension(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "csrc") + + main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) + + sources = main_file + source_cpu + extension = CppExtension + + define_macros = [] + + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + ) + ] + + return ext_modules + + +setup( + name="semantic_segmentation", + version="0.1", + author="tramac", + description="semantic segmentation in pytorch", + ext_modules=get_extension(), + cmdclass={"build_ext": BuildExtension} +) \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/__init__.py b/segutils/core/nn/sync_bn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/functions.py b/segutils/core/nn/sync_bn/functions.py new file mode 100644 index 0000000..b0102e6 --- /dev/null +++ b/segutils/core/nn/sync_bn/functions.py @@ -0,0 +1,285 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## Email: zhanghang0704@gmail.com +## Copyright (c) 2018 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization functions""" +import torch.cuda.comm as comm + +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from core.nn.sync_bn import lib + +__all__ = ['syncbatchnorm', 'inp_syncbatchnorm'] + + +class syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + if x.is_cuda: + y = lib.gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + y = lib.cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if x.is_cuda: + dx_ = lib.gpu.expectation_backward(x, _dex, _dexs) + else: + raise NotImplemented + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +def _act_forward(ctx, x): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_forward(x, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +def _act_backward(ctx, x, dx): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_backward(x, dx, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +class inp_syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-5, + activation='none', slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsuqeeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + ctx.mark_dirty(x) + + # BN forward + activation + if x.is_cuda: + lib.gpu.batchnorm_inp_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + _act_forward(ctx, x) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_inp_backward(dz, z, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if z.is_cuda: + lib.gpu.expectation_inp_backward(dx, z, _dex, _dexs, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = syncbatchnorm_.apply +inp_syncbatchnorm = inp_syncbatchnorm_.apply diff --git a/segutils/core/nn/sync_bn/lib/__init__.py b/segutils/core/nn/sync_bn/lib/__init__.py new file mode 100644 index 0000000..98c3374 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/__init__.py @@ -0,0 +1,20 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') + +cpu = load('sync_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'syncbn_cpu.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('sync_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'activation_kernel.cu'), + os.path.join(gpu_path, 'syncbn_kernel.cu'), + ], extra_cuda_cflags=["--expt-extended-lambda"], + build_directory=gpu_path, verbose=False) diff --git a/segutils/core/nn/sync_bn/lib/cpu/.ninja_deps b/segutils/core/nn/sync_bn/lib/cpu/.ninja_deps new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/cpu/.ninja_log b/segutils/core/nn/sync_bn/lib/cpu/.ninja_log new file mode 100644 index 0000000..d4c4d9d --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/.ninja_log @@ -0,0 +1,7 @@ +# ninja log v5 +0 6679 1555417150 syncbn_cpu.o b884354b4810778d +0 7702 1555417151 operator.o df6e270344a1d164 +7703 8115 1555417151 sync_cpu.so d148b4e40b0af67e +0 5172 1557113015 syncbn_cpu.o 9052547bb175072 +0 6447 1557113016 operator.o 209836e0b0c1e97e +6447 6613 1557113016 sync_cpu.so d148b4e40b0af67e diff --git a/segutils/core/nn/sync_bn/lib/cpu/build.ninja b/segutils/core/nn/sync_bn/lib/cpu/build.ninja new file mode 100644 index 0000000..e432f66 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/build.ninja @@ -0,0 +1,21 @@ +ninja_required_version = 1.3 +cxx = c++ + +cflags = -DTORCH_EXTENSION_NAME=sync_cpu -DTORCH_API_INCLUDE_EXTENSION_H -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/TH -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/THC -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/include/python3.6m -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++11 +ldflags = -shared + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out + depfile = $out.d + deps = gcc + +rule link + command = $cxx $in $ldflags -o $out + +build operator.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/operator.cpp +build syncbn_cpu.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp + +build sync_cpu.so: link operator.o syncbn_cpu.o + +default sync_cpu.so + diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.cpp b/segutils/core/nn/sync_bn/lib/cpu/operator.cpp new file mode 100644 index 0000000..5981ffc --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/operator.cpp @@ -0,0 +1,8 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CPU, "BatchNorm forward (CPU)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CPU, "BatchNorm backward (CPU)"); + m.def("sumsquare_forward", &Sum_Square_Forward_CPU, "SumSqu forward (CPU)"); + m.def("sumsquare_backward", &Sum_Square_Backward_CPU, "SumSqu backward (CPU)"); +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.h b/segutils/core/nn/sync_bn/lib/cpu/operator.h new file mode 100644 index 0000000..215fd53 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/operator.h @@ -0,0 +1,26 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_); + +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + bool train); + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input_); + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input_, + const at::Tensor gradSum_, + const at::Tensor gradSquare_); \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.o b/segutils/core/nn/sync_bn/lib/cpu/operator.o new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/cpu/setup.py b/segutils/core/nn/sync_bn/lib/cpu/setup.py new file mode 100644 index 0000000..b0ecd6c --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension + +setup( + name='syncbn_cpu', + ext_modules=[ + CppExtension('syncbn_cpu', [ + 'operator.cpp', + 'syncbn_cpu.cpp', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) diff --git a/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..6b6bb73 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp @@ -0,0 +1,61 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta) { + auto output = (input - broadcast_to(mean, input)) / broadcast_to(std, input); + output = output * broadcast_to(gamma, input) + broadcast_to(beta, input); + return output; +} + +// Not implementing CPU backward for now +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput, + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta, + bool train) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input); + at::Tensor gradgamma = at::zeros_like(gamma); + at::Tensor gradbeta = at::zeros_like(beta); + at::Tensor gradMean = at::zeros_like(mean); + at::Tensor gradStd = at::zeros_like(std); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input) { + /* outputs */ + at::Tensor sum = torch::zeros({input.size(1)}, input.options()); + at::Tensor square = torch::zeros({input.size(1)}, input.options()); + return {sum, square}; +} + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input, + const at::Tensor gradSum, + const at::Tensor gradSquare) { + /* outputs */ + at::Tensor gradInput = at::zeros_like(input); + return gradInput; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.o b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.o new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/gpu/__init__.py b/segutils/core/nn/sync_bn/lib/gpu/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu b/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu new file mode 100644 index 0000000..e696667 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu @@ -0,0 +1,46 @@ +#include +// #include +#include +#include +#include + +#include + +#include +#include + + +namespace { + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +} + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope) { + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "LeakyRelu_Backward_CUDA", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); + /* + // unstable after scaling + at::leaky_relu_(z, 1.0 / slope); + at::leaky_relu_backward(dz, z, slope); + */ +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/common.h b/segutils/core/nn/sync_bn/lib/gpu/common.h new file mode 100644 index 0000000..aa38296 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/common.h @@ -0,0 +1,224 @@ +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h b/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h new file mode 100644 index 0000000..c67dfae --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h @@ -0,0 +1,110 @@ +#include + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/operator.cpp b/segutils/core/nn/sync_bn/lib/gpu/operator.cpp new file mode 100644 index 0000000..48e28fe --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/operator.cpp @@ -0,0 +1,13 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_inp_forward", &BatchNorm_Forward_Inp_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("batchnorm_inp_backward", &BatchNorm_Inp_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("expectation_forward", &Expectation_Forward_CUDA, "Expectation forward (CUDA)"); + m.def("expectation_backward", &Expectation_Backward_CUDA, "Expectation backward (CUDA)"); + m.def("expectation_inp_backward", &Expectation_Inp_Backward_CUDA, "Inplace Expectation backward (CUDA)"); + m.def("leaky_relu_forward", &LeakyRelu_Forward_CUDA, "Learky ReLU forward (CUDA)"); + m.def("leaky_relu_backward", &LeakyRelu_Backward_CUDA, "Learky ReLU backward (CUDA)"); +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/operator.h b/segutils/core/nn/sync_bn/lib/gpu/operator.h new file mode 100644 index 0000000..246570d --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/operator.h @@ -0,0 +1,59 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_); + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope); + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope); \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/setup.py b/segutils/core/nn/sync_bn/lib/gpu/setup.py new file mode 100644 index 0000000..14c01f6 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='syncbn_gpu', + ext_modules=[ + CUDAExtension('sync_gpu', [ + 'operator.cpp', + 'activation_kernel.cu', + 'syncbn_kernel.cu', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu b/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu new file mode 100644 index 0000000..2a7e840 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu @@ -0,0 +1,489 @@ +#include +// #include +#include +#include +#include + +#include "common.h" +#include "device_tensor.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void BatchNorm_Forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Forward_Inp_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Backward_Inp_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void BatchNorm_Backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + + +template +__global__ void Expectation_Forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void Expectation_Backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void Expectation_Backward_Inp_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_Inp_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_Inp_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + Expectation_Forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + Expectation_Backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + Expectation_Backward_Inp_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/syncbn.py b/segutils/core/nn/sync_bn/syncbn.py new file mode 100644 index 0000000..f1247af --- /dev/null +++ b/segutils/core/nn/sync_bn/syncbn.py @@ -0,0 +1,124 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## ECE Department, Rutgers University +## Email: zhang.hang@rutgers.edu +## Copyright (c) 2017 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch + +from torch.nn.modules.batchnorm import _BatchNorm +from queue import Queue +from .functions import * + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01, inplace=True): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.inplace = False if activation == 'none' else inplace + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + if self.inplace: + return inp_syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + else: + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}, inplace={}'.format( + self.sync, self.activation, self.slope, self.inplace) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/segutils/core/nn/syncbn.py b/segutils/core/nn/syncbn.py new file mode 100644 index 0000000..c52ec1a --- /dev/null +++ b/segutils/core/nn/syncbn.py @@ -0,0 +1,223 @@ +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch +import torch.cuda.comm as comm + +from queue import Queue +from torch.autograd import Function +from torch.nn.modules.batchnorm import _BatchNorm +from torch.autograd.function import once_differentiable +from core.nn import _C + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +class _SyncBatchNorm(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + _ex, _exs = _C.expectation_forward(x) + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + y = _C.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + dx, _dex, _dexs, dgamma, dbeta = _C.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + dx_ = _C.expectation_backward(x, _dex, _dexs) + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = _SyncBatchNorm.apply + + +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}'.format( + self.sync, self.activation, self.slope) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/segutils/core/utils/__init__.py b/segutils/core/utils/__init__.py new file mode 100644 index 0000000..067a8d0 --- /dev/null +++ b/segutils/core/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility functions.""" +from __future__ import absolute_import + +from .download import download, check_sha1 +from .filesystem import makedirs, try_import_pycocotools diff --git a/segutils/core/utils/distributed.py b/segutils/core/utils/distributed.py new file mode 100644 index 0000000..257cdf9 --- /dev/null +++ b/segutils/core/utils/distributed.py @@ -0,0 +1,258 @@ +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" +import math +import pickle +import torch +import torch.utils.data as data +import torch.distributed as dist + +from torch.utils.data.sampler import Sampler, BatchSampler + +__all__ = ['get_world_size', 'get_rank', 'synchronize', 'is_main_process', + 'all_gather', 'make_data_sampler', 'make_batch_data_sampler', + 'reduce_dict', 'reduce_loss_dict'] + + +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/comm.py +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.IntTensor([tensor.numel()]).to("cuda") + size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) + if local_size != max_size: + padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +def reduce_loss_dict(loss_dict): + """ + Reduce the loss dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + loss_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return loss_dict + with torch.no_grad(): + loss_names = [] + all_losses = [] + for k in sorted(loss_dict.keys()): + loss_names.append(k) + all_losses.append(loss_dict[k]) + all_losses = torch.stack(all_losses, dim=0) + dist.reduce(all_losses, dst=0) + if dist.get_rank() == 0: + # only main process gets accumulated, so only divide by + # world_size in this case + all_losses /= world_size + reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} + return reduced_losses + + +def make_data_sampler(dataset, shuffle, distributed): + if distributed: + return DistributedSampler(dataset, shuffle=shuffle) + if shuffle: + sampler = data.sampler.RandomSampler(dataset) + else: + sampler = data.sampler.SequentialSampler(dataset) + return sampler + + +def make_batch_data_sampler(sampler, images_per_batch, num_iters=None, start_iter=0): + batch_sampler = data.sampler.BatchSampler(sampler, images_per_batch, drop_last=True) + if num_iters is not None: + batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter) + return batch_sampler + + +# Code is copy-pasted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/distributed.py +class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset: offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class IterationBasedBatchSampler(BatchSampler): + """ + Wraps a BatchSampler, resampling from it until + a specified number of iterations have been sampled + """ + + def __init__(self, batch_sampler, num_iterations, start_iter=0): + self.batch_sampler = batch_sampler + self.num_iterations = num_iterations + self.start_iter = start_iter + + def __iter__(self): + iteration = self.start_iter + while iteration <= self.num_iterations: + # if the underlying sampler has a set_epoch method, like + # DistributedSampler, used for making each process see + # a different split of the dataset, then set it + if hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(iteration) + for batch in self.batch_sampler: + iteration += 1 + if iteration > self.num_iterations: + break + yield batch + + def __len__(self): + return self.num_iterations + + +if __name__ == '__main__': + pass diff --git a/segutils/core/utils/download.py b/segutils/core/utils/download.py new file mode 100644 index 0000000..fec8bb4 --- /dev/null +++ b/segutils/core/utils/download.py @@ -0,0 +1,88 @@ +"""Download files with progress bar.""" +import os +import hashlib +import requests +from tqdm import tqdm + +def check_sha1(filename, sha1_hash): + """Check whether the sha1 hash of the file content matches the expected hash. + Parameters + ---------- + filename : str + Path to the file. + sha1_hash : str + Expected sha1 hash in hexadecimal digits. + Returns + ------- + bool + Whether the file content matches the expected hash. + """ + sha1 = hashlib.sha1() + with open(filename, 'rb') as f: + while True: + data = f.read(1048576) + if not data: + break + sha1.update(data) + + sha1_file = sha1.hexdigest() + l = min(len(sha1_file), len(sha1_hash)) + return sha1.hexdigest()[0:l] == sha1_hash[0:l] + +def download(url, path=None, overwrite=False, sha1_hash=None): + """Download an given URL + Parameters + ---------- + url : str + URL to download + path : str, optional + Destination path to store downloaded file. By default stores to the + current directory with same name as in url. + overwrite : bool, optional + Whether to overwrite destination file if already exists. + sha1_hash : str, optional + Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified + but doesn't match. + Returns + ------- + str + The file path of the downloaded file. + """ + if path is None: + fname = url.split('/')[-1] + else: + path = os.path.expanduser(path) + if os.path.isdir(path): + fname = os.path.join(path, url.split('/')[-1]) + else: + fname = path + + if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): + dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) + if not os.path.exists(dirname): + os.makedirs(dirname) + + print('Downloading %s from %s...'%(fname, url)) + r = requests.get(url, stream=True) + if r.status_code != 200: + raise RuntimeError("Failed downloading url %s"%url) + total_length = r.headers.get('content-length') + with open(fname, 'wb') as f: + if total_length is None: # no content length header + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + else: + total_length = int(total_length) + for chunk in tqdm(r.iter_content(chunk_size=1024), + total=int(total_length / 1024. + 0.5), + unit='KB', unit_scale=False, dynamic_ncols=True): + f.write(chunk) + + if sha1_hash and not check_sha1(fname, sha1_hash): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(fname)) + + return fname \ No newline at end of file diff --git a/segutils/core/utils/filesystem.py b/segutils/core/utils/filesystem.py new file mode 100644 index 0000000..ab2510d --- /dev/null +++ b/segutils/core/utils/filesystem.py @@ -0,0 +1,123 @@ +"""Filesystem utility functions.""" +from __future__ import absolute_import +import os +import errno + + +def makedirs(path): + """Create directory recursively if not exists. + Similar to `makedir -p`, you can skip checking existence before this function. + Parameters + ---------- + path : str + Path of the desired dir + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def try_import(package, message=None): + """Try import specified package, with custom message support. + Parameters + ---------- + package : str + The name of the targeting package. + message : str, default is None + If not None, this function will raise customized error message when import error is found. + Returns + ------- + module if found, raise ImportError otherwise + """ + try: + return __import__(package) + except ImportError as e: + if not message: + raise e + raise ImportError(message) + + +def try_import_cv2(): + """Try import cv2 at runtime. + Returns + ------- + cv2 module if found. Raise ImportError otherwise + """ + msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \ + or `pip install opencv-python --user` (note that this is unofficial PYPI package)." + return try_import('cv2', msg) + + +def import_try_install(package, extern_url=None): + """Try import the specified package. + If the package not installed, try use pip to install and import if success. + Parameters + ---------- + package : str + The name of the package trying to import. + extern_url : str or None, optional + The external url if package is not hosted on PyPI. + For example, you can install a package using: + "pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx". + In this case, you can pass the url to the extern_url. + Returns + ------- + + The imported python module. + """ + try: + return __import__(package) + except ImportError: + try: + from pip import main as pipmain + except ImportError: + from pip._internal import main as pipmain + + # trying to install package + url = package if extern_url is None else extern_url + pipmain(['install', '--user', url]) # will raise SystemExit Error if fails + + # trying to load again + try: + return __import__(package) + except ImportError: + import sys + import site + user_site = site.getusersitepackages() + if user_site not in sys.path: + sys.path.append(user_site) + return __import__(package) + return __import__(package) + + +"""Import helper for pycocotools""" + + +# NOTE: for developers +# please do not import any pycocotools in __init__ because we are trying to lazy +# import pycocotools to avoid install it for other users who may not use it. +# only import when you actually use it + + +def try_import_pycocotools(): + """Tricks to optionally install and import pycocotools""" + # first we can try import pycocotools + try: + import pycocotools as _ + except ImportError: + import os + # we need to install pycootools, which is a bit tricky + # pycocotools sdist requires Cython, numpy(already met) + import_try_install('cython') + # pypi pycocotools is not compatible with windows + win_url = 'git+https://github.com/zhreshold/cocoapi.git#subdirectory=PythonAPI' + try: + if os.name == 'nt': + import_try_install('pycocotools', win_url) + else: + import_try_install('pycocotools') + except ImportError: + faq = 'cocoapi FAQ' + raise ImportError('Cannot import or install pycocotools, please refer to %s.' % faq) diff --git a/segutils/core/utils/logger.py b/segutils/core/utils/logger.py new file mode 100644 index 0000000..a2de227 --- /dev/null +++ b/segutils/core/utils/logger.py @@ -0,0 +1,30 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import logging +import os +import sys + +__all__ = ['setup_logger'] + + +# reference from: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py +def setup_logger(name, save_dir, distributed_rank, filename="log.txt", mode='w'): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + if not os.path.exists(save_dir): + os.makedirs(save_dir) + fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) # 'a+' for add, 'w' for overwrite + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger diff --git a/segutils/core/utils/loss.py b/segutils/core/utils/loss.py new file mode 100644 index 0000000..aab5314 --- /dev/null +++ b/segutils/core/utils/loss.py @@ -0,0 +1,196 @@ +"""Custom losses.""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd import Variable + +__all__ = ['MixSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyOHEMLoss', + 'EncNetLoss', 'ICNetLoss', 'get_segmentation_loss'] + + +# TODO: optim function +class MixSoftmaxCrossEntropyLoss(nn.CrossEntropyLoss): + def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs, **kwargs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs)) + + +# reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/loss.py +class EncNetLoss(nn.CrossEntropyLoss): + """2D Cross Entropy Loss with SE Loss""" + + def __init__(self, se_loss=True, se_weight=0.2, nclass=19, aux=False, + aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(EncNetLoss, self).__init__(weight, None, ignore_index) + self.se_loss = se_loss + self.aux = aux + self.nclass = nclass + self.se_weight = se_weight + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if not self.se_loss and not self.aux: + return super(EncNetLoss, self).forward(*inputs) + elif not self.se_loss: + pred1, pred2, target = tuple(inputs) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + return dict(loss=loss1 + self.aux_weight * loss2) + elif not self.aux: + print (inputs) + pred, se_pred, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred) + loss1 = super(EncNetLoss, self).forward(pred, target) + loss2 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.se_weight * loss2) + else: + pred1, se_pred, pred2, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + loss3 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3) + + @staticmethod + def _get_batch_label_vector(target, nclass): + # target is a 3D Variable BxHxW, output is 2D BxnClass + batch = target.size(0) + tvect = Variable(torch.zeros(batch, nclass)) + for i in range(batch): + hist = torch.histc(target[i].cpu().data.float(), + bins=nclass, min=0, + max=nclass - 1) + vect = hist > 0 + tvect[i] = vect + return tvect + + +# TODO: optim function +class ICNetLoss(nn.CrossEntropyLoss): + """Cross Entropy Loss for ICNet""" + + def __init__(self, nclass, aux_weight=0.4, ignore_index=-1, **kwargs): + super(ICNetLoss, self).__init__(ignore_index=ignore_index) + self.nclass = nclass + self.aux_weight = aux_weight + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + + pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs) + # [batch, W, H] -> [batch, 1, W, H] + target = target.unsqueeze(1).float() + target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze( + 1).long() + loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4) + loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8) + loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16) + return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight) + + +class OhemCrossEntropy2d(nn.Module): + def __init__(self, ignore_index=-1, thresh=0.7, min_kept=100000, use_weight=True, **kwargs): + super(OhemCrossEntropy2d, self).__init__() + self.ignore_index = ignore_index + self.thresh = float(thresh) + self.min_kept = int(min_kept) + if use_weight: + weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, + 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, + 1.0865, 1.1529, 1.0507]) + self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index) + else: + self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) + + def forward(self, pred, target): + n, c, h, w = pred.size() + target = target.view(-1) + valid_mask = target.ne(self.ignore_index) + target = target * valid_mask.long() + num_valid = valid_mask.sum() + + prob = F.softmax(pred, dim=1) + prob = prob.transpose(0, 1).reshape(c, -1) + + if self.min_kept > num_valid: + print("Lables: {}".format(num_valid)) + elif num_valid > 0: + prob = prob.masked_fill_(1 - valid_mask, 1) + mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] + threshold = self.thresh + if self.min_kept > 0: + index = mask_prob.argsort() + threshold_index = index[min(len(index), self.min_kept) - 1] + if mask_prob[threshold_index] > self.thresh: + threshold = mask_prob[threshold_index] + kept_mask = mask_prob.le(threshold) + valid_mask = valid_mask * kept_mask + target = target * kept_mask.long() + + target = target.masked_fill_(1 - valid_mask, self.ignore_index) + target = target.view(n, h, w) + + return self.criterion(pred, target) + + +class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d): + def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(*inputs)) + + +def get_segmentation_loss(model, use_ohem=False, **kwargs): + if use_ohem: + return MixSoftmaxCrossEntropyOHEMLoss(**kwargs) + + model = model.lower() + if model == 'encnet': + return EncNetLoss(**kwargs) + elif model == 'icnet': + return ICNetLoss(nclass=4, **kwargs) + else: + return MixSoftmaxCrossEntropyLoss(**kwargs) diff --git a/segutils/core/utils/lr_scheduler.py b/segutils/core/utils/lr_scheduler.py new file mode 100644 index 0000000..32b3795 --- /dev/null +++ b/segutils/core/utils/lr_scheduler.py @@ -0,0 +1,179 @@ +"""Popular Learning Rate Schedulers""" +from __future__ import division +import math +import torch + +from bisect import bisect_right + +__all__ = ['LRScheduler', 'WarmupMultiStepLR', 'WarmupPolyLR'] + + +class LRScheduler(object): + r"""Learning Rate Scheduler + + Parameters + ---------- + mode : str + Modes for learning rate scheduler. + Currently it supports 'constant', 'step', 'linear', 'poly' and 'cosine'. + base_lr : float + Base learning rate, i.e. the starting learning rate. + target_lr : float + Target learning rate, i.e. the ending learning rate. + With constant mode target_lr is ignored. + niters : int + Number of iterations to be scheduled. + nepochs : int + Number of epochs to be scheduled. + iters_per_epoch : int + Number of iterations in each epoch. + offset : int + Number of iterations before this scheduler. + power : float + Power parameter of poly scheduler. + step_iter : list + A list of iterations to decay the learning rate. + step_epoch : list + A list of epochs to decay the learning rate. + step_factor : float + Learning rate decay factor. + """ + + def __init__(self, mode, base_lr=0.01, target_lr=0, niters=0, nepochs=0, iters_per_epoch=0, + offset=0, power=0.9, step_iter=None, step_epoch=None, step_factor=0.1, warmup_epochs=0): + super(LRScheduler, self).__init__() + assert (mode in ['constant', 'step', 'linear', 'poly', 'cosine']) + + if mode == 'step': + assert (step_iter is not None or step_epoch is not None) + self.niters = niters + self.step = step_iter + epoch_iters = nepochs * iters_per_epoch + if epoch_iters > 0: + self.niters = epoch_iters + if step_epoch is not None: + self.step = [s * iters_per_epoch for s in step_epoch] + + self.step_factor = step_factor + self.base_lr = base_lr + self.target_lr = base_lr if mode == 'constant' else target_lr + self.offset = offset + self.power = power + self.warmup_iters = warmup_epochs * iters_per_epoch + self.mode = mode + + def __call__(self, optimizer, num_update): + self.update(num_update) + assert self.learning_rate >= 0 + self._adjust_learning_rate(optimizer, self.learning_rate) + + def update(self, num_update): + N = self.niters - 1 + T = num_update - self.offset + T = min(max(0, T), N) + + if self.mode == 'constant': + factor = 0 + elif self.mode == 'linear': + factor = 1 - T / N + elif self.mode == 'poly': + factor = pow(1 - T / N, self.power) + elif self.mode == 'cosine': + factor = (1 + math.cos(math.pi * T / N)) / 2 + elif self.mode == 'step': + if self.step is not None: + count = sum([1 for s in self.step if s <= T]) + factor = pow(self.step_factor, count) + else: + factor = 1 + else: + raise NotImplementedError + + # warm up lr schedule + if self.warmup_iters > 0 and T < self.warmup_iters: + factor = factor * 1.0 * T / self.warmup_iters + + if self.mode == 'step': + self.learning_rate = self.base_lr * factor + else: + self.learning_rate = self.target_lr + (self.base_lr - self.target_lr) * factor + + def _adjust_learning_rate(self, optimizer, lr): + optimizer.param_groups[0]['lr'] = lr + # enlarge the lr at the head + for i in range(1, len(optimizer.param_groups)): + optimizer.param_groups[i]['lr'] = lr * 10 + + +# separating MultiStepLR with WarmupLR +# but the current LRScheduler design doesn't allow it +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/solver/lr_scheduler.py +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method="linear", last_epoch=-1): + super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones) + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted got {}".format(warmup_method)) + + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_factor == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + return [base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs] + + +class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method='linear', last_epoch=-1): + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted " + "got {}".format(warmup_method)) + + self.target_lr = target_lr + self.max_iters = max_iters + self.power = power + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + super(WarmupPolyLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + N = self.max_iters - self.warmup_iters + T = self.last_epoch - self.warmup_iters + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_method == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup type.") + return [self.target_lr + (base_lr - self.target_lr) * warmup_factor for base_lr in self.base_lrs] + factor = pow(1 - T / N, self.power) + return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs] + + +if __name__ == '__main__': + import torch + import torch.nn as nn + + model = nn.Conv2d(16, 16, 3, 1, 1) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + lr_scheduler = WarmupPolyLR(optimizer, niters=1000) diff --git a/segutils/core/utils/parallel.py b/segutils/core/utils/parallel.py new file mode 100644 index 0000000..cb9e896 --- /dev/null +++ b/segutils/core/utils/parallel.py @@ -0,0 +1,162 @@ +"""Utils for Semantic Segmentation""" +import threading +import torch +import torch.cuda.comm as comm +from torch.nn.parallel.data_parallel import DataParallel +from torch.nn.parallel._functions import Broadcast +from torch.autograd import Function + +__all__ = ['DataParallelModel', 'DataParallelCriterion'] + + +class Reduce(Function): + @staticmethod + def forward(ctx, *inputs): + ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))] + inputs = sorted(inputs, key=lambda i: i.get_device()) + return comm.reduce_add(inputs) + + @staticmethod + def backward(ctx, gradOutputs): + return Broadcast.apply(ctx.target_gpus, gradOutputs) + + +class DataParallelModel(DataParallel): + """Data parallelism + + Hide the difference of single/multiple GPUs to the user. + In the forward pass, the module is replicated on each device, + and each replica handles a portion of the input. During the backwards + pass, gradients from each replica are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + Parameters + ---------- + module : object + Network to be parallelized. + sync : bool + enable synchronization (default: False). + Inputs: + - **inputs**: list of input + Outputs: + - **outputs**: list of output + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + def gather(self, outputs, output_device): + return outputs + + def replicate(self, module, device_ids): + modules = super(DataParallelModel, self).replicate(module, device_ids) + return modules + + +# Reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/parallel.py +class DataParallelCriterion(DataParallel): + """ + Calculate loss in multiple-GPUs, which balance the memory usage for + Semantic Segmentation. + + The targets are splitted across the specified devices by chunking in + the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`. + + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> criterion = DataParallelCriterion(criterion, device_ids=[0, 1, 2]) + >>> y = net(x) + >>> loss = criterion(y, target) + """ + + def forward(self, inputs, *targets, **kwargs): + # the inputs should be the outputs of DataParallelModel + if not self.device_ids: + return self.module(inputs, *targets, **kwargs) + targets, kwargs = self.scatter(targets, kwargs, self.device_ids) + if len(self.device_ids) == 1: + return self.module(inputs, *targets[0], **kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = criterion_parallel_apply(replicas, inputs, targets, kwargs) + return Reduce.apply(*outputs) / len(outputs) + + +def get_a_var(obj): + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, list) or isinstance(obj, tuple): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + + +def criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): + r"""Applies each `module` in :attr:`modules` in parallel on arguments + contained in :attr:`inputs` (positional), attr:'targets' (positional) and :attr:`kwargs_tup` (keyword) + on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + targets (tensor): targets to the modules + devices (list of int or torch.device): CUDA devices + :attr:`modules`, :attr:`inputs`, :attr:'targets' :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs) + assert len(targets) == len(inputs) + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = ({},) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + lock = threading.Lock() + results = {} + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input, target, kwargs, device=None): + torch.set_grad_enabled(grad_enabled) + if device is None: + device = get_a_var(input).get_device() + try: + with torch.cuda.device(device): + output = module(*(list(input) + target), **kwargs) + with lock: + results[i] = output + except Exception as e: + with lock: + results[i] = e + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, target, kwargs, device)) + for i, (module, input, target, kwargs, device) in + enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, Exception): + raise output + outputs.append(output) + return outputs diff --git a/segutils/core/utils/score.py b/segutils/core/utils/score.py new file mode 100644 index 0000000..a037e65 --- /dev/null +++ b/segutils/core/utils/score.py @@ -0,0 +1,161 @@ +"""Evaluation Metrics for Semantic Segmentation""" +import torch +import numpy as np + +__all__ = ['SegmentationMetric', 'batch_pix_accuracy', 'batch_intersection_union', + 'pixelAccuracy', 'intersectionAndUnion', 'hist_info', 'compute_score'] + + +class SegmentationMetric(object): + """Computes pixAcc and mIoU metric scores + """ + + def __init__(self, nclass): + super(SegmentationMetric, self).__init__() + self.nclass = nclass + self.reset() + + def update(self, preds, labels): + """Updates the internal evaluation result. + + Parameters + ---------- + labels : 'NumpyArray' or list of `NumpyArray` + The labels of the data. + preds : 'NumpyArray' or list of `NumpyArray` + Predicted values. + """ + + def evaluate_worker(self, pred, label): + correct, labeled = batch_pix_accuracy(pred, label) + inter, union = batch_intersection_union(pred, label, self.nclass) + + self.total_correct += correct + self.total_label += labeled + if self.total_inter.device != inter.device: + self.total_inter = self.total_inter.to(inter.device) + self.total_union = self.total_union.to(union.device) + self.total_inter += inter + self.total_union += union + + if isinstance(preds, torch.Tensor): + evaluate_worker(self, preds, labels) + elif isinstance(preds, (list, tuple)): + for (pred, label) in zip(preds, labels): + evaluate_worker(self, pred, label) + + def get(self): + """Gets the current evaluation result. + + Returns + ------- + metrics : tuple of float + pixAcc and mIoU + """ + pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label) # remove np.spacing(1) + IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union) + mIoU = IoU.mean().item() + return pixAcc, mIoU + + def reset(self): + """Resets the internal evaluation result to initial state.""" + self.total_inter = torch.zeros(self.nclass) + self.total_union = torch.zeros(self.nclass) + self.total_correct = 0 + self.total_label = 0 + + +# pytorch version +def batch_pix_accuracy(output, target): + """PixAcc""" + # inputs are numpy array, output 4D, target 3D + predict = torch.argmax(output.long(), 1) + 1 + target = target.long() + 1 + + pixel_labeled = torch.sum(target > 0).item() + pixel_correct = torch.sum((predict == target) * (target > 0)).item() + assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled" + return pixel_correct, pixel_labeled + + +def batch_intersection_union(output, target, nclass): + """mIoU""" + # inputs are numpy array, output 4D, target 3D + mini = 1 + maxi = nclass + nbins = nclass + predict = torch.argmax(output, 1) + 1 + target = target.float() + 1 + + predict = predict.float() * (target > 0).float() + intersection = predict * (predict == target).float() + # areas of intersection and union + # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. + area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi) + area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi) + area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi) + area_union = area_pred + area_lab - area_inter + assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area" + return area_inter.float(), area_union.float() + + +def pixelAccuracy(imPred, imLab): + """ + This function takes the prediction and label of a single image, returns pixel-wise accuracy + To compute over many images do: + for i = range(Nimages): + (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \ + pixelAccuracy(imPred[i], imLab[i]) + mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + pixel_labeled = np.sum(imLab >= 0) + pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) + pixel_accuracy = 1.0 * pixel_correct / pixel_labeled + return (pixel_accuracy, pixel_correct, pixel_labeled) + + +def intersectionAndUnion(imPred, imLab, numClass): + """ + This function takes the prediction and label of a single image, + returns intersection and union areas for each class + To compute over many images do: + for i in range(Nimages): + (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) + IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + imPred = imPred * (imLab >= 0) + + # Compute area intersection: + intersection = imPred * (imPred == imLab) + (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) + + # Compute area union: + (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) + (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) + area_union = area_pred + area_lab - area_intersection + return (area_intersection, area_union) + + +def hist_info(pred, label, num_cls): + assert pred.shape == label.shape + k = (label >= 0) & (label < num_cls) + labeled = np.sum(k) + correct = np.sum((pred[k] == label[k])) + + return np.bincount(num_cls * label[k].astype(int) + pred[k], minlength=num_cls ** 2).reshape(num_cls, + num_cls), labeled, correct + + +def compute_score(hist, correct, labeled): + iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + freq = hist.sum(1) / hist.sum() + freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() + mean_pixel_acc = correct / labeled + + return iu, mean_IU, mean_IU_no_back, mean_pixel_acc diff --git a/segutils/core/utils/visualize.py b/segutils/core/utils/visualize.py new file mode 100644 index 0000000..c63d6c9 --- /dev/null +++ b/segutils/core/utils/visualize.py @@ -0,0 +1,158 @@ +import os +import numpy as np +from PIL import Image + +__all__ = ['get_color_pallete', 'print_iou', 'set_img_color', + 'show_prediction', 'show_colorful_images', 'save_colorful_images'] + + +def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False): + n = iu.size + lines = [] + for i in range(n): + if class_names is None: + cls = 'Class %d:' % (i + 1) + else: + cls = '%d %s' % (i + 1, class_names[i]) + # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + if show_no_back: + lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % ( + mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100)) + else: + lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100)) + lines.append('=================================================') + line = "\n".join(lines) + + print(line) + + +def set_img_color(img, label, colors, background=0, show255=False): + for i in range(len(colors)): + if i != background: + img[np.where(label == i)] = colors[i] + if show255: + img[np.where(label == 255)] = 255 + + return img + + +def show_prediction(img, pred, colors, background=0): + im = np.array(img, np.uint8) + set_img_color(im, pred, colors, background) + out = np.array(im) + + return out + + +def show_colorful_images(prediction, palettes): + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + im.show() + + +def save_colorful_images(prediction, filename, output_dir, palettes): + ''' + :param prediction: [B, H, W, C] + ''' + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + fn = os.path.join(output_dir, filename) + out_dir = os.path.split(fn)[0] + if not os.path.exists(out_dir): + os.mkdir(out_dir) + im.save(fn) + + +def get_color_pallete(npimg, dataset='pascal_voc'): + """Visualize image. + + Parameters + ---------- + npimg : numpy.ndarray + Single channel image with shape `H, W, 1`. + dataset : str, default: 'pascal_voc' + The dataset that model pretrained on. ('pascal_voc', 'ade20k') + Returns + ------- + out_img : PIL.Image + Image with color pallete + """ + # recovery boundary + if dataset in ('pascal_voc', 'pascal_aug'): + npimg[npimg == -1] = 255 + # put colormap + if dataset == 'ade20k': + npimg = npimg + 1 + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(adepallete) + return out_img + elif dataset == 'citys': + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(cityspallete) + return out_img + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(vocpallete) + return out_img + + +def _getvocpallete(num_cls): + n = num_cls + pallete = [0] * (n * 3) + for j in range(0, n): + lab = j + pallete[j * 3 + 0] = 0 + pallete[j * 3 + 1] = 0 + pallete[j * 3 + 2] = 0 + i = 0 + while (lab > 0): + pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i = i + 1 + lab >>= 3 + return pallete + + +vocpallete = _getvocpallete(256) + +adepallete = [ + 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, + 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, + 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, + 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, + 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, + 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, + 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, + 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, + 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, + 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, + 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, + 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, + 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, + 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, + 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, + 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, + 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, + 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] + +cityspallete = [ + 128, 64, 128, + 244, 35, 232, + 70, 70, 70, + 102, 102, 156, + 190, 153, 153, + 153, 153, 153, + 250, 170, 30, + 220, 220, 0, + 107, 142, 35, + 152, 251, 152, + 0, 130, 180, + 220, 20, 60, + 255, 0, 0, + 0, 0, 142, + 0, 0, 70, + 0, 60, 100, + 0, 80, 100, + 0, 0, 230, + 119, 11, 32, +] diff --git a/segutils/crackUtils.py b/segutils/crackUtils.py new file mode 100644 index 0000000..ca98ba6 --- /dev/null +++ b/segutils/crackUtils.py @@ -0,0 +1,32 @@ +import numpy as np +from skimage.morphology import medial_axis +def Crack_measure(_mask_cv_gray,dsx=(123-30)*1000/35*0.004387636): + '''裂缝实际尺寸测量''' + '''输入:单个裂缝分割图像 + 过程:。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + # 图像转化 + ###READ + img = np.array(_mask_cv_gray.astype(np.int32)) + image0 = binary = img + ###SKELETONIZATION + img_skeletonized, distance = medial_axis(image0, return_distance=True) + #print(img_skeletonized) + img_skeletonized = np.array(img_skeletonized.astype(np.int32)) + ###COMPUTING WIDTH + dist_on_skel = distance * img_skeletonized + + width = dist_on_skel[dist_on_skel != 0] * 2 + for i in range(len(width)): + if width[i] <= 2.0: + width[i] = width[i] + else: + width[i] = width[i] - 2 + ###OUTPUT + real_length = np.count_nonzero(img_skeletonized) *dsx # Each pixel remaining after + real_mean_width = np.mean(width)*dsx + real_max_width = np.max(width)*dsx + real_min_width = np.min(width)*dsx + + return real_length,real_mean_width,real_max_width,real_min_width \ No newline at end of file diff --git a/segutils/model_stages.py b/segutils/model_stages.py new file mode 100644 index 0000000..8647699 --- /dev/null +++ b/segutils/model_stages.py @@ -0,0 +1,542 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +import time +from stdcnet import STDCNet1446, STDCNet813 +#from models_725.bn import InPlaceABNSync as BatchNorm2d +# BatchNorm2d = nn.BatchNorm2d + +#modelSize=(360,640) ##(W,H) +#print('######Attention model input(H,W):',modelSize) +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + # self.bn = BatchNorm2d(out_chan) + # self.bn = BatchNorm2d(out_chan, activation='none') + self.bn = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU() + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):######################1 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule_static(nn.Module): + def __init__(self, in_chan, out_chan,avg_pool2d_kernel_size, *args, **kwargs): + super(AttentionRefinementModule_static, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + # self.bn_atten = nn.BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.bn_atten = nn.BatchNorm2d(out_chan)########################2 + + self.sigmoid_atten = nn.Sigmoid() + self.avg_pool2d_kernel_size = avg_pool2d_kernel_size + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + #atten = F.avg_pool2d(feat, feat.size()[2:]) + + atten = F.avg_pool2d(feat, self.avg_pool2d_kernel_size) + #print('------------------newline89:','out:',atten.size(),'in:',feat.size(), self.avg_pool2d_kernel_size) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + # self.bn_atten = nn.BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.bn_atten = nn.BatchNorm2d(out_chan)########################2 + + self.sigmoid_atten = nn.Sigmoid() + + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + atten = F.avg_pool2d(feat, feat.size()[2:]) + + #atten = F.avg_pool2d(feat, self.avg_pool2d_kernel_size) + #print('------------------newline89:','out:',atten.size(),'in:',feat.size(), self.avg_pool2d_kernel_size) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath_static(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False,modelSize=(360,640), *args, **kwargs): + super(ContextPath_static, self).__init__() + + self.backbone_name = backbone + self.modelSize = modelSize + + self.avg_pool_kernel_size_32=[ int(modelSize[0]/32+0.999), int( modelSize[1]/32+0.999 ) ] + self.avg_pool_kernel_size_16=[ int(modelSize[0]/16+0.999), int( modelSize[1]/16+0.999 ) ] + + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + + self.arm16 = AttentionRefinementModule_static(512, 128,self.avg_pool_kernel_size_16) + + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + + self.arm32 = AttentionRefinementModule_static(inplanes, 128,self.avg_pool_kernel_size_32) + + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + print( '------------line179:', feat2.shape , feat4.shape, feat8.shape, feat16.shape, feat32.shape ) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + + #avg = F.avg_pool2d(feat32, feat32.size()[2:]) + print('line147:self.avg_pool_kernel_size_32:',self.avg_pool_kernel_size_32,feat32.shape) + avg = F.avg_pool2d(feat32, self.avg_pool_kernel_size_32) + #print('------------------newline140:','out:','out;',avg.size(),' in:',feat32.size()) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + #print('------------line143,arm32:',feat32.size()) + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + #print('------------line148,arm16:',feat16.size()) + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + # return feat8, feat16_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):#################3 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + +class ContextPath(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False, *args, **kwargs): + super(ContextPath, self).__init__() + + self.backbone_name = backbone + + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + #print('line147:self.avg_pool_kernel_size_32:',self.avg_pool_kernel_size_32) + #avg = F.avg_pool2d(feat32, self.avg_pool_kernel_size_32) + #print('------------------newline140:','out:','out;',avg.size(),' in:',feat32.size()) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + #print('------------line143,arm32:',feat32.size()) + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + #print('------------line148,arm16:',feat16.size()) + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + # return feat8, feat16_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):#################3 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule_static(nn.Module): + def __init__(self, in_chan, out_chan,modelSize ,*args, **kwargs): + super(FeatureFusionModule_static, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.avg_pool_kernel_size=[ int(modelSize[0]/8+0.9999), int( modelSize[1]/8+0.9999 ) ] + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + + #atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = F.avg_pool2d(feat, kernel_size=self.avg_pool_kernel_size) + #print('------------------newline199:',' out:',atten.size(),'in:',feat.size()) + + + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):##################4 + nowd_params += list(module.parameters()) + return wd_params, nowd_params +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan ,*args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + + atten = F.avg_pool2d(feat, feat.size()[2:]) + #atten = F.avg_pool2d(feat, kernel_size=self.avg_pool_kernel_size) + #print('------------------newline199:',' out:',atten.size(),'in:',feat.size()) + + + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):##################4 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet_STDC(nn.Module): + def __init__(self, backbone, n_classes, pretrain_model='', use_boundary_2=False, use_boundary_4=False, + use_boundary_8=False, use_boundary_16=False, use_conv_last=False,**kwargs): + super(BiSeNet_STDC, self).__init__() + if 'modelSize' in kwargs: + + modelSize = kwargs['modelSize'] + else: + modelSize=None + + self.use_boundary_2 = use_boundary_2 + self.use_boundary_4 = use_boundary_4 + self.use_boundary_8 = use_boundary_8 + self.use_boundary_16 = use_boundary_16 + # self.heat_map = heat_map + if modelSize: + self.cp = ContextPath_static(backbone, pretrain_model, use_conv_last=use_conv_last,modelSize=modelSize) + else: + self.cp = ContextPath(backbone, pretrain_model, use_conv_last=use_conv_last) + + if backbone == 'STDCNet1446': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + elif backbone == 'STDCNet813': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + else: + print("backbone is not in backbone lists") + exit(0) + if modelSize: + self.ffm = FeatureFusionModule_static(inplane, 256,modelSize) + else: + self.ffm = FeatureFusionModule(inplane, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + + self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1) + + self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1) + self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1) + self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + # time_0 = time.time() + # feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + # print('----backbone', (time.time() - time_0) * 1000) + # feat_out_sp2 = self.conv_out_sp2(feat_res2) + # + # feat_out_sp4 = self.conv_out_sp4(feat_res4) + # + # feat_out_sp8 = self.conv_out_sp8(feat_res8) + # + # feat_out_sp16 = self.conv_out_sp16(feat_res16) + # time_1 = time.time() + feat_fuse = self.ffm(feat_res8, feat_cp8) + # print('----ffm', (time.time() - time_1) * 1000) + # time_2 = time.time() + feat_out = self.conv_out(feat_fuse) + # feat_out16 = self.conv_out16(feat_cp8) + # feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + # print('----conv_out', (time.time() - time_2) * 1000) + # feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + # feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + + + # if self.use_boundary_2 and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp2, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and (not self.use_boundary_4) and self.use_boundary_8: + return feat_out + + # if (not self.use_boundary_2) and (not self.use_boundary_4) and (not self.use_boundary_8): + # return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, (FeatureFusionModule, BiSeNetOutput)): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + + + + model = BiSeNet_STDC(backbone='STDCNet813', n_classes=2, + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False, + # modelSize=[360,640] + ) + #modelSize=[360,640] + print() + # torch.save(net.state_dict(), 'STDCNet813.pth')### + + diff --git a/segutils/model_stages_0331.py b/segutils/model_stages_0331.py new file mode 100644 index 0000000..df26f7c --- /dev/null +++ b/segutils/model_stages_0331.py @@ -0,0 +1,353 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +import time +from stdcnet import STDCNet1446, STDCNet813 +#from models_725.bn import InPlaceABNSync as BatchNorm2d +# BatchNorm2d = nn.BatchNorm2d + +modelSize=(360,640) ##(W,H) +print('######Attention model input(H,W):',modelSize) +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + # self.bn = BatchNorm2d(out_chan) + # self.bn = BatchNorm2d(out_chan, activation='none') + self.bn = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU() + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):######################1 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan,avg_pool2d_kernel_size, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + # self.bn_atten = nn.BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.bn_atten = nn.BatchNorm2d(out_chan)########################2 + + self.sigmoid_atten = nn.Sigmoid() + self.avg_pool2d_kernel_size = avg_pool2d_kernel_size + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + #atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = F.avg_pool2d(feat, self.avg_pool2d_kernel_size) + #print('------------------newline86:','out:',atten.size(),'in:',feat.size()) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False, *args, **kwargs): + super(ContextPath, self).__init__() + + self.backbone_name = backbone + self.avg_pool_kernel_size_32=[ int(modelSize[0]/32+0.999), int( modelSize[1]/32+0.999 ) ] + self.avg_pool_kernel_size_16=[ int(modelSize[0]/16+0.999), int( modelSize[1]/16+0.999 ) ] + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128,self.avg_pool_kernel_size_16) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128,self.avg_pool_kernel_size_32) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + + #avg = F.avg_pool2d(feat32, feat32.size()[2:]) + #print('line147:self.avg_pool_kernel_size_32:',self.avg_pool_kernel_size_32) + avg = F.avg_pool2d(feat32, self.avg_pool_kernel_size_32) + #print('------------------newline140:','out:','out;',avg.size(),' in:',feat32.size()) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + #print('------------line143,arm32:',feat32.size()) + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + #print('------------line148,arm16:',feat16.size()) + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + # return feat8, feat16_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):#################3 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.avg_pool_kernel_size=[ int(modelSize[0]/8+0.9999), int( modelSize[1]/8+0.9999 ) ] + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + + #atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = F.avg_pool2d(feat, kernel_size=self.avg_pool_kernel_size) + #print('------------------newline199:',' out:',atten.size(),'in:',feat.size()) + + + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):##################4 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet_STDC(nn.Module): + def __init__(self, backbone, n_classes, pretrain_model='', use_boundary_2=False, use_boundary_4=False, + use_boundary_8=False, use_boundary_16=False, use_conv_last=False): + super(BiSeNet_STDC, self).__init__() + + self.use_boundary_2 = use_boundary_2 + self.use_boundary_4 = use_boundary_4 + self.use_boundary_8 = use_boundary_8 + self.use_boundary_16 = use_boundary_16 + # self.heat_map = heat_map + self.cp = ContextPath(backbone, pretrain_model, use_conv_last=use_conv_last) + + if backbone == 'STDCNet1446': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + elif backbone == 'STDCNet813': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + else: + print("backbone is not in backbone lists") + exit(0) + + self.ffm = FeatureFusionModule(inplane, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + + self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1) + + self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1) + self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1) + self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + # time_0 = time.time() + # feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + # print('----backbone', (time.time() - time_0) * 1000) + # feat_out_sp2 = self.conv_out_sp2(feat_res2) + # + # feat_out_sp4 = self.conv_out_sp4(feat_res4) + # + # feat_out_sp8 = self.conv_out_sp8(feat_res8) + # + # feat_out_sp16 = self.conv_out_sp16(feat_res16) + # time_1 = time.time() + feat_fuse = self.ffm(feat_res8, feat_cp8) + # print('----ffm', (time.time() - time_1) * 1000) + # time_2 = time.time() + feat_out = self.conv_out(feat_fuse) + # feat_out16 = self.conv_out16(feat_cp8) + # feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + # print('----conv_out', (time.time() - time_2) * 1000) + # feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + # feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + + + # if self.use_boundary_2 and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp2, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and (not self.use_boundary_4) and self.use_boundary_8: + return feat_out + + # if (not self.use_boundary_2) and (not self.use_boundary_4) and (not self.use_boundary_8): + # return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, (FeatureFusionModule, BiSeNetOutput)): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + + net = BiSeNet('STDCNet813', 19) + net.cuda() + net.eval() + in_ten = torch.randn(1, 3, 768, 1536).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + # torch.save(net.state_dict(), 'STDCNet813.pth')### + + diff --git a/segutils/model_stages_dyna.py b/segutils/model_stages_dyna.py new file mode 100644 index 0000000..81dd956 --- /dev/null +++ b/segutils/model_stages_dyna.py @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +import time +from stdcnet import STDCNet1446, STDCNet813 +#from models_725.bn import InPlaceABNSync as BatchNorm2d +# BatchNorm2d = nn.BatchNorm2d + +modelSize=(360,640) ##(W,H) +print('######Attention model input(H,W):',modelSize) +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + # self.bn = BatchNorm2d(out_chan) + # self.bn = BatchNorm2d(out_chan, activation='none') + self.bn = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU() + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):######################1 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan,avg_pool2d_kernel_size, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + # self.bn_atten = nn.BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.bn_atten = nn.BatchNorm2d(out_chan)########################2 + + self.sigmoid_atten = nn.Sigmoid() + self.avg_pool2d_kernel_size = avg_pool2d_kernel_size + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + #atten = F.avg_pool2d(feat, feat.size()[2:]) + if len(self.avg_pool2d_kernel_size)==0: + atten = F.avg_pool2d(feat, feat.size()[2:]) + else: + atten = F.avg_pool2d(feat, self.avg_pool2d_kernel_size) + #print('------------------newline86:','out:',atten.size(),'in:',feat.size()) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False,dynamic_flag=True, *args, **kwargs): + super(ContextPath, self).__init__() + + self.backbone_name = backbone + if dynamic_flag: + self.avg_pool_kernel_size_32=[] + self.avg_pool_kernel_size_16=[] + else: + self.avg_pool_kernel_size_32=[ int(modelSize[0]/32+0.999), int( modelSize[1]/32+0.999 ) ] + self.avg_pool_kernel_size_16=[ int(modelSize[0]/16+0.999), int( modelSize[1]/16+0.999 ) ] + + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + + self.arm16 = AttentionRefinementModule(512, 128,self.avg_pool_kernel_size_16) + + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128,self.avg_pool_kernel_size_32) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + + #avg = F.avg_pool2d(feat32, feat32.size()[2:]) + #print('line147:self.avg_pool_kernel_size_32:',self.avg_pool_kernel_size_32) + if len(self.avg_pool_kernel_size_32)==0: + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + else: + avg = F.avg_pool2d(feat32, self.avg_pool_kernel_size_32) + #print('------------------newline140:','out:','out;',avg.size(),' in:',feat32.size()) + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + #print('------------line143,arm32:',feat32.size()) + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + #print('------------line148,arm16:',feat16.size()) + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + # return feat8, feat16_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):#################3 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan,dynamic_flag, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.avg_pool_kernel_size=[ int(modelSize[0]/8+0.9999), int( modelSize[1]/8+0.9999 ) ] + self.dynamic_flag=dynamic_flag + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + if self.dynamic_flag: + atten = F.avg_pool2d(feat, feat.size()[2:]) + else: + atten = F.avg_pool2d(feat, kernel_size=self.avg_pool_kernel_size) + #print('------------------newline199:',' out:',atten.size(),'in:',feat.size()) + + + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):##################4 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet_STDC(nn.Module): + def __init__(self, backbone, n_classes, pretrain_model='', use_boundary_2=False, use_boundary_4=False, + use_boundary_8=False, use_boundary_16=False, use_conv_last=False,dynamic_flag=True): + super(BiSeNet_STDC, self).__init__() + + self.use_boundary_2 = use_boundary_2 + self.use_boundary_4 = use_boundary_4 + self.use_boundary_8 = use_boundary_8 + self.use_boundary_16 = use_boundary_16 + self.dynamic_flag=dynamic_flag + # self.heat_map = heat_map + self.cp = ContextPath(backbone, pretrain_model, use_conv_last=use_conv_last,dynamic_flag=dynamic_flag) + + if backbone == 'STDCNet1446': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + elif backbone == 'STDCNet813': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + else: + print("backbone is not in backbone lists") + exit(0) + + self.ffm = FeatureFusionModule(inplane, 256,dynamic_flag) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + + self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1) + + self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1) + self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1) + self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + # time_0 = time.time() + # feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + # print('----backbone', (time.time() - time_0) * 1000) + # feat_out_sp2 = self.conv_out_sp2(feat_res2) + # + # feat_out_sp4 = self.conv_out_sp4(feat_res4) + # + # feat_out_sp8 = self.conv_out_sp8(feat_res8) + # + # feat_out_sp16 = self.conv_out_sp16(feat_res16) + # time_1 = time.time() + feat_fuse = self.ffm(feat_res8, feat_cp8) + # print('----ffm', (time.time() - time_1) * 1000) + # time_2 = time.time() + feat_out = self.conv_out(feat_fuse) + # feat_out16 = self.conv_out16(feat_cp8) + # feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + # print('----conv_out', (time.time() - time_2) * 1000) + # feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + # feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + + + # if self.use_boundary_2 and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp2, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and self.use_boundary_4 and self.use_boundary_8: + # return feat_out, feat_out16, feat_out32, feat_out_sp4, feat_out_sp8 + # + # if (not self.use_boundary_2) and (not self.use_boundary_4) and self.use_boundary_8: + return feat_out + + # if (not self.use_boundary_2) and (not self.use_boundary_4) and (not self.use_boundary_8): + # return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, (FeatureFusionModule, BiSeNetOutput)): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + + net = BiSeNet('STDCNet813', 19) + net.cuda() + net.eval() + in_ten = torch.randn(1, 3, 768, 1536).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + # torch.save(net.state_dict(), 'STDCNet813.pth')### + + diff --git a/segutils/run.sh b/segutils/run.sh new file mode 100644 index 0000000..4df2477 --- /dev/null +++ b/segutils/run.sh @@ -0,0 +1,8 @@ +gpu=2080Ti +name=cityMangement3 +nclass=2 +mWidth=640 +mHeight=360 +python segmodel_trt.py --weights /mnt/thsw2/DSP2/${name}/weights/stdc_360X640.pth --nclass ${nclass} --mWidth ${mWidth} --mHeight ${mHeight} + +mv /mnt/thsw2/DSP2/weights/${name}/stdc_${mWidth}X${mHeight}.engine /mnt/thsw2/DSP2/weights/${name}/stdc_${mHeight}X${mWidth}_${gpu}_fp16.engine diff --git a/segutils/run_dy.sh b/segutils/run_dy.sh new file mode 100644 index 0000000..bb17cd0 --- /dev/null +++ b/segutils/run_dy.sh @@ -0,0 +1,9 @@ +#动态尺寸的stdc转trt没有成功,因为里有用到了全局池化,池化的过程中kener_size不固定 +gpu=2080Ti +name=crackMeasurement +nclass=2 +mWidth=0 +mHeight=0 +python toTrt.py --weights /mnt/thsw2/DSP2/weights/${name}/stdc_360X640.pth --nclass ${nclass} --mWidth ${mWidth} --mHeight ${mHeight} + +mv /mnt/thsw2/DSP2/weights/${name}/stdc_${mWidth}X${mHeight}.engine /mnt/thsw2/DSP2/weights/${name}/stdc_${mHeight}X${mWidth}_${gpu}_fp16.engine diff --git a/segutils/segMultiOutModel.py b/segutils/segMultiOutModel.py new file mode 100644 index 0000000..e38838d --- /dev/null +++ b/segutils/segMultiOutModel.py @@ -0,0 +1,377 @@ +import torch +from core.models.bisenet import BiSeNet,BiSeNet_MultiOutput +from torchvision import transforms +import cv2,os,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,model = None,weights=None,modelsize=512,device='cuda:3',multiOutput=False): + #self.args = args + self.model = model + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.multiOutput = multiOutput + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + #mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143) + def eval(self,image,outsize=None,smooth_kernel=0): + imageH,imageW,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,test_flag=True,smooth_kernel = 0) + + time2 = time.time() + + if self.multiOutput: + pred = [outputx.data.cpu().numpy()[0] for outputx in output] + else: + pred = output.data.cpu().numpy() + pred = pred[0] + + time3 = time.time() + + if self.multiOutput: + pred = [ cv2.blur(predx,(smooth_kernel,smooth_kernel) ) for predx in pred] + pred = [cv2.resize(predx.astype(np.uint8),(imageW,imageH)) for predx in pred[0:2]] + else: + pred = cv2.blur(pred,(smooth_kernel,smooth_kernel) ) + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH),interpolation = cv2.INTER_NEAREST) + time4 = time.time() + print('##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image = image * 3.2 - 1.6 + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + print('###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +def test(): + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + model = BiSeNet(nclass) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +label_dic={'landcover':[[0, 0, 0], [255, 0, 0], [0,255,0], [0,0,255], [255,255,0]], + 'deepRoad':[[0,0,0],[255,0,0]], + 'water':[[0,0,0],[255,255,255]], + 'water_building':[[0,0,0],[0,0,255],[255,0,0]], + 'floater':[[0,0,0], [0,255,0],[255,255,0],[255,0,255],[0,128, 255], [255,0,0], [0,255,255] ] + + + + + } + +def index2color(label_mask,label_colours): + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + label_cnt = len(label_colours) + for ll in range(0, label_cnt): + r[label_mask == ll] = label_colours[ll][0] + g[label_mask == ll] = label_colours[ll][1] + b[label_mask == ll] = label_colours[ll][2] + rgb = np.stack((b, g,r), axis=-1) + return rgb.astype(np.uint8) +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id +def result_merge_sep(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + ishow = 2 + #plt.figure(1);plt.imshow(mask_map); + for index,color in zip(indexes,colors): + mask_binaray = (mask_map == index).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binaray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + cv2.drawContours(image,contours,-1,color[::-1],3) + #plt.figure(ishow);plt.imshow(mask_binaray);ishow+=1 + #plt.show() + return image +def result_merge(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + mask_binary = (mask_map>0).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; contours = np.array(contours)[d] + cv2.drawContours(image,contours,-1,colors[0][::-1],3) + + coors = np.array([(np.mean(contours_x ,axis=0)+0.5).astype(np.int32)[0] for contours_x in contours]) + #print(mask_map.shape,coors.shape) + typess = mask_map[ coors[:,1],coors[:,0]] + #for jj,iclass in enumerate(typess): + #print(iclass,colors) + # cv2.drawContours(image,contours,-1, colors[iclass][::-1],3) + + + + return image + +def test_floater(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,7] + outresult=True + weights = 'runs/thFloater/BiSeNet_MultiOutput/train/experiment_4/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/host/workspace/WJ/data/thFloater/val/images/' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:9',multiOutput=True) + + + for i,image_url in enumerate(image_url_list[0:10]) : + image_array0 = cv2.imread(image_url) + image_array0 = cv2.cvtColor(image_array0, cv2.COLOR_BGR2RGB) # cv2默认为bgr顺序 + imageH,imageW,_ = image_array0.shape + #image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + time1=time.time() + + mask_colors=[ { 'mask':pred[0] ,'index':range(1,2),'color':label_dic['water'][0:] }, + { 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } ] + result_draw = result_merge(image_array0,mask_colors) + + + time2=time.time() + + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + + + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) +def test_water_buildings(): + from core.models.bisenet import BiSeNet + #image_url = 'temp_pics/DJI_0645.JPG' + nclass = 3 + outresult=True + weights = 'runs/thWaterBuilding/BiSeNet/train/experiment_2/checkpoint.pth' + model = BiSeNet(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:0',multiOutput=False) + + + for i,image_url in enumerate(image_url_list[0:]) : + #image_url = '/home/thsw/WJ/data/THWaterBuilding/val/images/0anWqgmO9rGe1n8P.png' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + #print(binary[0].shape) + time1=time.time() + + + mask_colors=[ { 'mask':pred ,'index':range(1,3),'color':label_dic['water_building'][1:] }, + #{ 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } + ] + result_draw = result_merge_sep(image_array0,mask_colors) + + + time2=time.time() + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line294: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +def get_illegal_index(contours,hierarchy,water_dilate,overlap_threshold): + out_index=[] + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + imageH,imageW = water_dilate.shape + for ii,cont in enumerate(contours): + build_area=np.zeros((imageH,imageW )) + cv2.fillPoly(build_area,[cont[:,0,:]],1) + area1=np.sum(build_area);area2=np.sum(build_area*water_dilate) + if (area2/area1) >overlap_threshold: + out_index.append(ii) + + + return out_index + + +def test_water_building_seperately(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,2] + outresult=True + weights = 'runs/thWaterBuilding_seperate/BiSeNet_MultiOutput/train/experiment_0/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:1',multiOutput=True) + + print('###line307 image cnt:',len(image_url_list)) + for i,image_url in enumerate(image_url_list[0:1]) : + image_url = '/home/thsw/WJ/data/river_buildings/DJI_20210904092044_0001_S_output896.jpg' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None,smooth_kernel=20) + + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred[0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = get_largest_contours(contours); + water = pred[0].copy(); water[:,:] = 0 + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + + + + + ##画出水体膨胀后的蓝线区域。 + kernel = np.ones((100,100),np.uint8) + water_dilate = cv2.dilate(water,kernel,iterations = 1) + contours, hierarchy = cv2.findContours(water_dilate,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + #print('####line310:',contours) + cv2.drawContours(image_array0,contours,-1,(255,0,0),3) + + + ###逐个建筑判断是否与蓝线内区域有交叉。如果交叉面积占本身面积超过0.1,则认为是违法建筑。 + contours, hierarchy = cv2.findContours(pred[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + outIndex=get_illegal_index(contours,hierarchy,water_dilate,0.1) + + for ii in outIndex: + cv2.drawContours(image_array0,contours,ii,(0,0,255),3) + + + plt.imshow(image_array0);plt.show() + ## + + time0=time.time() + + time1=time.time() + + + mask_colors=[ { 'mask':pred[0],'index':[1],'color':label_dic['water_building'][1:2]}, + { 'mask':pred[1],'index':[1],'color':label_dic['water_building'][2:3]} + ] + result_draw = result_merge_sep(image_array0,mask_colors) + time2=time.time() + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +if __name__=='__main__': + #test() + #test_floater() + #test_water_buildings() + test_water_building_seperately() + + + + + + diff --git a/segutils/segWaterBuilding.py b/segutils/segWaterBuilding.py new file mode 100644 index 0000000..725b26c --- /dev/null +++ b/segutils/segWaterBuilding.py @@ -0,0 +1,391 @@ +import torch +from core.models.bisenet import BiSeNet,BiSeNet_MultiOutput +from torchvision import transforms +import cv2,os,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,model = None,weights=None,modelsize=512,device='cuda:3',multiOutput=False): + #self.args = args + self.model = model + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.multiOutput = multiOutput + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + #mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143) + def eval(self,image,outsize=None,smooth_kernel=0): + imageH,imageW,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,test_flag=True,smooth_kernel = 0) + + time2 = time.time() + + if self.multiOutput: + pred = [outputx.data.cpu().numpy()[0] for outputx in output] + else: + pred = output.data.cpu().numpy() + pred = pred[0] + + time3 = time.time() + + if self.multiOutput: + pred = [ cv2.blur(predx,(smooth_kernel,smooth_kernel) ) for predx in pred] + pred = [cv2.resize(predx.astype(np.uint8),(imageW,imageH)) for predx in pred[0:2]] + else: + pred = cv2.blur(pred,(smooth_kernel,smooth_kernel) ) + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH),interpolation = cv2.INTER_NEAREST) + time4 = time.time() + outStr= '##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ) + #print('##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image = image * 3.2 - 1.6 + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + outStr='###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) + #print('###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +def test(): + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + model = BiSeNet(nclass) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +label_dic={'landcover':[[0, 0, 0], [255, 0, 0], [0,255,0], [0,0,255], [255,255,0]], + 'deepRoad':[[0,0,0],[255,0,0]], + 'water':[[0,0,0],[255,255,255]], + 'water_building':[[0,0,0],[0,0,255],[255,0,0]], + 'floater':[[0,0,0], [0,255,0],[255,255,0],[255,0,255],[0,128, 255], [255,0,0], [0,255,255] ] + + + + + } + +def index2color(label_mask,label_colours): + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + label_cnt = len(label_colours) + for ll in range(0, label_cnt): + r[label_mask == ll] = label_colours[ll][0] + g[label_mask == ll] = label_colours[ll][1] + b[label_mask == ll] = label_colours[ll][2] + rgb = np.stack((b, g,r), axis=-1) + return rgb.astype(np.uint8) +def get_largest_contours(contours,cnt): + areas = [cv2.contourArea(x) for x in contours] + areas_bak = areas.copy() + areas_bak.sort() + cnt_max = len(areas_bak) + cnt_ret = min(cnt,cnt_max) + assert cnt_ret>0 , 'contours:%d , segCnt:%d'%(cnt_max,cnt) + max_id=[ areas.index( areas_bak[-x] ) for x in range(1,cnt_ret+1) ] + + return max_id +def result_merge_sep(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + ishow = 2 + #plt.figure(1);plt.imshow(mask_map); + for index,color in zip(indexes,colors): + mask_binaray = (mask_map == index).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binaray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + cv2.drawContours(image,contours,-1,color[::-1],3) + #plt.figure(ishow);plt.imshow(mask_binaray);ishow+=1 + #plt.show() + return image +def result_merge(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + mask_binary = (mask_map>0).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; contours = np.array(contours)[d] + cv2.drawContours(image,contours,-1,colors[0][::-1],3) + + coors = np.array([(np.mean(contours_x ,axis=0)+0.5).astype(np.int32)[0] for contours_x in contours]) + #print(mask_map.shape,coors.shape) + typess = mask_map[ coors[:,1],coors[:,0]] + #for jj,iclass in enumerate(typess): + #print(iclass,colors) + # cv2.drawContours(image,contours,-1, colors[iclass][::-1],3) + + + + return image + +def test_floater(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,7] + outresult=True + weights = 'runs/thFloater/BiSeNet_MultiOutput/train/experiment_4/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/host/workspace/WJ/data/thFloater/val/images/' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:9',multiOutput=True) + + + for i,image_url in enumerate(image_url_list[0:10]) : + image_array0 = cv2.imread(image_url) + image_array0 = cv2.cvtColor(image_array0, cv2.COLOR_BGR2RGB) # cv2默认为bgr顺序 + imageH,imageW,_ = image_array0.shape + #image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + time1=time.time() + + mask_colors=[ { 'mask':pred[0] ,'index':range(1,2),'color':label_dic['water'][0:] }, + { 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } ] + result_draw = result_merge(image_array0,mask_colors) + + + time2=time.time() + + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + + + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) +def test_water_buildings(): + from core.models.bisenet import BiSeNet + #image_url = 'temp_pics/DJI_0645.JPG' + nclass = 3 + outresult=True + weights = 'runs/thWaterBuilding/BiSeNet/train/experiment_2/checkpoint.pth' + model = BiSeNet(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:0',multiOutput=False) + + + for i,image_url in enumerate(image_url_list[0:]) : + #image_url = '/home/thsw/WJ/data/THWaterBuilding/val/images/0anWqgmO9rGe1n8P.png' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + #print(binary[0].shape) + time1=time.time() + + + mask_colors=[ { 'mask':pred ,'index':range(1,3),'color':label_dic['water_building'][1:] }, + #{ 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } + ] + result_draw = result_merge_sep(image_array0,mask_colors) + + + time2=time.time() + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line294: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +def get_illegal_index(contours,hierarchy,water_dilate,overlap_threshold): + out_index=[] + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + imageH,imageW = water_dilate.shape + for ii,cont in enumerate(contours): + cont = cont.astype(np.int32) + build_area=np.zeros((imageH,imageW )) + try: + cv2.fillPoly(build_area,[cont[:,0,:]],1) + area1=np.sum(build_area);area2=np.sum(build_area*water_dilate) + if (area2/area1) >overlap_threshold: + out_index.append(ii) + except Exception as e: + print('###read error:%s '%(e)) + print(cont.shape,type(cont),cont.dtype) + + + return out_index + +def illBuildings(pred,image_array0): + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred[0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + water = pred[0].copy(); water[:,:] = 0 + if len(contours)==0: + return image_array0,water + max_id = get_largest_contours(contours); + + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + + ##画出水体膨胀后的蓝线区域。 + kernel = np.ones((100,100),np.uint8) + water_dilate = cv2.dilate(water,kernel,iterations = 1) + contours, hierarchy = cv2.findContours(water_dilate,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + #print('####line310:',contours) + cv2.drawContours(image_array0,contours,-1,(255,0,0),3) + + ##确定违法建筑并绘图 + ###逐个建筑判断是否与蓝线内区域有交叉。如果交叉面积占本身面积超过0.1,则认为是违法建筑。 + contours, hierarchy = cv2.findContours(pred[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + outIndex=get_illegal_index(contours,hierarchy,water_dilate,0.1) + + for ii in outIndex: + cv2.drawContours(image_array0,contours,ii,(0,0,255),3) + return image_array0,water + +def test_water_building_seperately(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,2] + outresult=True + weights = 'runs/thWaterBuilding_seperate/BiSeNet_MultiOutput/train/experiment_0/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:1',multiOutput=True) + + print('###line307 image cnt:',len(image_url_list)) + for i,image_url in enumerate(image_url_list[0:1]) : + image_url = '/home/thsw/WJ/data/river_buildings/DJI_20210904092044_0001_S_output896.jpg' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None,smooth_kernel=20) + + image_array0,water = illBuildings(pred,image_array0) + + + plt.imshow(image_array0);plt.show() + ## + + time0=time.time() + + time1=time.time() + + + mask_colors=[ { 'mask':pred[0],'index':[1],'color':label_dic['water_building'][1:2]}, + { 'mask':pred[1],'index':[1],'color':label_dic['water_building'][2:3]} + ] + result_draw = result_merge_sep(image_array0,mask_colors) + time2=time.time() + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +if __name__=='__main__': + #test() + #test_floater() + #test_water_buildings() + test_water_building_seperately() + + + + + + diff --git a/segutils/seg_detect.py b/segutils/seg_detect.py new file mode 100644 index 0000000..84611b7 --- /dev/null +++ b/segutils/seg_detect.py @@ -0,0 +1,132 @@ +import torch +from core.models.bisenet import BiSeNet +from torchvision import transforms +import cv2,os +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:3'): + #self.args = args + self.model = BiSeNet(nclass) + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + def eval(self,image,outsize=None): + imageW,imageH,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,outsize=outsize) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image -= self.mean + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + #image /= self.std + + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + print('resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +if __name__=='__main__': + + + + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + + + segmodel = SegModel(nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + + + ##计算findconturs时间与大小的关系 + binary0 = binary.copy() + for ii,ss in enumerate([22,256,512,1024,2048]): + time0=time.time() + image = cv2.resize(binary0,(ss,ss)) + time1=time.time() + if ii ==0: + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + else: + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print('size:%d resize:%.1f ,findtime:%.1f '%(ss, get_ms(time1,time0),get_ms(time2,time1))) + \ No newline at end of file diff --git a/segutils/segmodel.py b/segutils/segmodel.py new file mode 100644 index 0000000..7d03646 --- /dev/null +++ b/segutils/segmodel.py @@ -0,0 +1,165 @@ +import torch +import sys,os +sys.path.extend(['../AIlib2/segutils']) +from model_stages import BiSeNet_STDC +from torchvision import transforms +import cv2,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt +import time +from PIL import Image +import torch.nn.functional as F +import torchvision.transforms as transforms +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0'): + #self.args = args + self.model = BiSeNet_STDC(backbone='STDCNet813', n_classes=nclass, + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False) + self.device = device + + self.model.load_state_dict(torch.load(weights, map_location=torch.device(self.device) )) + self.model= self.model.to(self.device) + self.mean = (0.485, 0.456, 0.406) + self.std = (0.229, 0.224, 0.225) + + def eval(self,image): + time0 = time.time() + imageH, imageW, _ = image.shape + image = self.RB_convert(image) + print('line32: image:',image[100,100,:],image.shape ) + img = self.preprocess_image(image) + if self.device != 'cpu': + imgs = img.to(self.device) + else:imgs=img + time1 = time.time() + self.model.eval() + with torch.no_grad(): + print(' segmodel.py line35:',len(imgs),imgs[0].shape , imgs[0,:,100,100]) + output = self.model(imgs) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + return pred,outstr + def eval_zyy(self,image):###此函数采用的预处理方法,和zyy跑出来的结果一致 + self.to_tensor = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(self.mean, self.std), + ]) + time0 = time.time() + imageH, imageW, _ = image.shape + imgs= Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + imgs = self.to_tensor(imgs) + if self.device != 'cpu': + imgs = imgs.to(self.device) + imgs = torch.unsqueeze(imgs, dim=0) + imgs = F.interpolate(imgs, [ 360,640 ], mode='bilinear', align_corners=True) + time1 = time.time() + self.model.eval() + with torch.no_grad(): + print('###line 64 img:',imgs[0].shape, imgs[0][0,10:12,10:12]) + output = self.model(imgs) + print('###line69:',output.size()) + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + print('####line78:',pred.shape,np.max(pred),np.min(pred)) + return pred,outstr + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + image = cv2.resize(image, (640,360), interpolation=cv2.INTER_LINEAR) + image = image.astype(np.float32) + image /= 255.0 + + image[:, :, 0] -= self.mean[0] + image[:, :, 1] -= self.mean[1] + image[:, :, 2] -= self.mean[2] + + image[:, :, 0] /= self.std[0] + image[:, :, 1] /= self.std[1] + image[:, :, 2] /= self.std[2] + + image = np.transpose(image, (2, 0, 1)) + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + return image + def RB_convert(self,image): + image_c = image.copy() + image_c[:,:,0] = image[:,:,2] + image_c[:,:,2] = image[:,:,0] + return image_c + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +if __name__=='__main__': + impth = '../../river_demo/images/slope/' + outpth= 'results' + folders = os.listdir(impth) + weights = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.pth' + segmodel = SegModel(nclass=2,weights=weights) + + for i in range(len(folders)): + + imgpath = os.path.join(impth, folders[i]) + time0 = time.time() + + #img = Image.open(imgpath).convert('RGB') + img = cv2.imread(imgpath) + img = np.array(img) + + + time1 = time.time() + pred, outstr = segmodel.eval(image=img)##### + time2 = time.time() + + binary0 = pred.copy() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time3 = time.time() + + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + cv2.drawContours(img,contours,max_id,(0,255,255),3) + time4 = time.time() + + + #img_n = cv2.cvtColor(img,cv2.COLOR_RGB2BGR) + cv2.imwrite( os.path.join( outpth,folders[i] ) ,img ) + time5 = time.time() + print('image:%d ,infer:%.1f ms,findcontours:%.1f ms, draw:%.1f, total:%.1f'%(i,get_ms(time2,time1),get_ms(time3,time2),get_ms(time4,time3),get_ms(time4,time1))) + + + + + + + + + diff --git a/segutils/segmodel_BiseNet.py b/segutils/segmodel_BiseNet.py new file mode 100644 index 0000000..3ca4135 --- /dev/null +++ b/segutils/segmodel_BiseNet.py @@ -0,0 +1,144 @@ +import torch +import sys,os +sys.path.extend(['segutils']) +from core.models.bisenet import BiSeNet +from torchvision import transforms +import cv2,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0'): + #self.args = args + self.model = BiSeNet(nclass) + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + def eval(self,image): + time0 = time.time() + imageH,imageW,imageC = image.shape + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + #print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) )) + return pred,outstr + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + time0 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + #image -= self.mean + #image /= self.std + image = np.transpose(image, ( 2, 0, 1)) + + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + + return image + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +if __name__=='__main__': + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = '../weights/BiSeNet/checkpoint.pth' + + segmodel = SegModel(nclass=nclass,weights=weights) + + image_urls=glob.glob('../../river_demo/images/slope/*') + out_dir ='../../river_demo/images/results/'; + os.makedirs(out_dir,exist_ok=True) + for im,image_url in enumerate(image_urls[0:]): + #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + H,W,C = image_array0.shape + time_1=time.time() + pred,outstr = segmodel.eval(image_array0 ) + + #plt.figure(1);plt.imshow(pred); + #plt.show() + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + #print(outstr) + #plt.figure(0);plt.imshow(pred) + #plt.figure(1);plt.imshow(image_array0) + #plt.figure(2);plt.imshow(binary0) + #plt.show() + + #print(out_url,ret) + + + + + + + + + diff --git a/segutils/segmodel_STDC.py b/segutils/segmodel_STDC.py new file mode 100644 index 0000000..47b1bfc --- /dev/null +++ b/segutils/segmodel_STDC.py @@ -0,0 +1,131 @@ +import torch +import sys,os +sys.path.extend(['../AIlib/segutils']) +from model_stages import BiSeNet_STDC +from torchvision import transforms +import cv2,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=(640,360),device='cuda:0'): + #self.args = args + self.model = BiSeNet_STDC(backbone='STDCNet813', n_classes=nclass, + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False) + self.device = device + self.model.load_state_dict(torch.load(weights, map_location=torch.device(self.device) )) + self.model= self.model.to(self.device) + self.mean = (0.485, 0.456, 0.406) + self.std = (0.229, 0.224, 0.225) + self.modelsize=modelsize + + def eval(self,image): + time0 = time.time() + imageH, imageW, _ = image.shape + image = self.RB_convert(image) + img = self.preprocess_image(image) + if self.device != 'cpu': + imgs = img.to(self.device) + else:imgs=img + time1 = time.time() + self.model.eval() + with torch.no_grad(): + output = self.model(imgs) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + return pred,outstr + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + image = cv2.resize(image, self.modelsize, interpolation=cv2.INTER_LINEAR) + image = image.astype(np.float32) + image /= 255.0 + + image[:, :, 0] -= self.mean[0] + image[:, :, 1] -= self.mean[1] + image[:, :, 2] -= self.mean[2] + + image[:, :, 0] /= self.std[0] + image[:, :, 1] /= self.std[1] + image[:, :, 2] /= self.std[2] + + image = np.transpose(image, (2, 0, 1)) + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + return image + def RB_convert(self,image): + image_c = image.copy() + image_c[:,:,0] = image[:,:,2] + image_c[:,:,2] = image[:,:,0] + return image_c + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +if __name__=='__main__': + impth = '../../../../data/无人机起飞测试图像/' + outpth= 'results' + folders = os.listdir(impth) + weights = '../weights/STDC/model_maxmIOU75_1720_0.946_360640.pth' + segmodel = SegModel(nclass=2,weights=weights) + + for i in range(len(folders)): + + imgpath = os.path.join(impth, folders[i]) + time0 = time.time() + + #img = Image.open(imgpath).convert('RGB') + img = cv2.imread(imgpath) + img = np.array(img) + + + time1 = time.time() + pred, outstr = segmodel.eval(image=img)##### + time2 = time.time() + + binary0 = pred.copy() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time3 = time.time() + + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + cv2.drawContours(img,contours,max_id,(0,255,255),3) + time4 = time.time() + + + #img_n = cv2.cvtColor(img,cv2.COLOR_RGB2BGR) + cv2.imwrite( os.path.join( outpth,folders[i] ) ,img ) + time5 = time.time() + print('image:%d ,infer:%.1f ms,findcontours:%.1f ms, draw:%.1f, total:%.1f'%(i,get_ms(time2,time1),get_ms(time3,time2),get_ms(time4,time3),get_ms(time4,time1))) + + + + + + + + + diff --git a/segutils/segmodel_trt.py b/segutils/segmodel_trt.py new file mode 100644 index 0000000..1eb7b77 --- /dev/null +++ b/segutils/segmodel_trt.py @@ -0,0 +1,583 @@ +import torch +import argparse +import sys,os +sys.path.extend(['segutils']) +from core.models.bisenet import BiSeNet +from model_stages import BiSeNet_STDC +from torchvision import transforms +import cv2,glob +import numpy as np +import matplotlib.pyplot as plt +import time +from pathlib import Path +from trtUtils import TRTModule,segTrtForward,segtrtEval,segPreProcess_image,get_ms +from concurrent.futures import ThreadPoolExecutor +import tensorrt as trt +from copy import deepcopy +import onnx +import numpy as np +import onnxruntime as ort +import cv2 +#import pycuda.driver as cuda +class SegModel_BiSeNet(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0'): + #self.args = args + self.model = BiSeNet(nclass) + checkpoint = torch.load(weights) + if isinstance(modelsize,list) or isinstance(modelsize,tuple): + self.modelsize = modelsize + else: self.modelsize = (modelsize,modelsize) + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + def eval(self,image): + time0 = time.time() + imageH,imageW,imageC = image.shape + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + #print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) )) + return pred,outstr + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,self.modelsize) + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + time2 = time.time() + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + time3 = time.time() + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + time4 = time.time() + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + #image -= self.mean + #image /= self.std + image = np.transpose(image, ( 2, 0, 1)) + + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + time5 = time.time() + print('resize:%1f ,normalize:%.1f ,Demean:%.1f ,DeVar:%.1f ,other:%.1f'%( self.get_ms(time1,time0 ), self.get_ms(time2,time1 ), self.get_ms(time3,time2 ), self.get_ms(time4,time3 ), self.get_ms(time5,time4 ) )) + + return image +class SegModel_STDC(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0',modelSize=(360,640)): + #self.args = args + self.model = BiSeNet_STDC(backbone='STDCNet813', n_classes=nclass, + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False,modelSize=modelSize) + self.device = device + self.model.load_state_dict(torch.load(weights, map_location=torch.device(self.device) )) + self.model= self.model.to(self.device) + self.mean = (0.485, 0.456, 0.406) + self.std = (0.229, 0.224, 0.225) + self.modelSize = modelSize + + def eval(self,image): + time0 = time.time() + imageH, imageW, _ = image.shape + + image = self.RB_convert(image) + + img = self.preprocess_image(image) + + if self.device != 'cpu': + imgs = img.to(self.device) + else:imgs=img + time1 = time.time() + self.model.eval() + + with torch.no_grad(): + output = self.model(imgs) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + return pred,outstr + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + image = cv2.resize(image, (self.modelSize[1],self.modelSize[0] ), interpolation=cv2.INTER_LINEAR) + image = image.astype(np.float32) + image /= 255.0 + + image[:, :, 0] -= self.mean[0] + image[:, :, 1] -= self.mean[1] + image[:, :, 2] -= self.mean[2] + + image[:, :, 0] /= self.std[0] + image[:, :, 1] /= self.std[1] + image[:, :, 2] /= self.std[2] + + image = np.transpose(image, (2, 0, 1)) + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + return image + def RB_convert(self,image): + image_c = image.copy() + image_c[:,:,0] = image[:,:,2] + image_c[:,:,2] = image[:,:,0] + return image_c + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +def infer_usage(par): + #par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True, + # 'weights':trtFile,'device':device,'max_threads':1, + # 'image_dir':'../../AIdemo2/images/trafficAccident/','out_dir' :'results'} + + + segmodel = par['segmodel'] + + image_urls=glob.glob('%s/*'%(par['image_dir'])) + out_dir =par['out_dir'] + os.makedirs(out_dir,exist_ok=True) + for im,image_url in enumerate(image_urls[0:1]): + #image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + H,W,C = image_array0.shape + time_1=time.time() + pred,outstr = segmodel.eval(image_array0 ) + + + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + time1 = time.time() + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + cv2.imwrite(out_url.replace('.','_mask.'),(pred*50).astype(np.uint8)) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0'),dynamic=False ): + + import onnx + + im = torch.rand(inputShape).to(device) + seg_model.eval() + out=seg_model(im) + print('###test model infer example over ####') + train=False + dynamic = False + opset=11 + print('####begin to export to onnx') + + torch.onnx.export(seg_model, im,onnxFile, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + #dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + # 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + # } if dynamic else None + dynamic_axes={ + 'images': {0: 'batch_size', 2: 'in_width', 3: 'int_height'}, + 'output': {0: 'batch_size', 2: 'out_width', 3: 'out_height'}} if dynamic else None + + ) + ''' + input_name='images' + output_name='output' + torch.onnx.export(seg_model, + im, + onnxFile, + opset_version=11, + input_names=[input_name], + output_names=[output_name], + dynamic_axes={ + input_name: {0: 'batch_size', 2: 'in_width', 3: 'int_height'}, + output_name: {0: 'batch_size', 2: 'out_width', 3: 'out_height'}} + ) + + ''' + + + print('output onnx file:',onnxFile) +def ONNXtoTrt(onnxFile,trtFile): + import tensorrt as trt + #onnx = Path('../weights/BiSeNet/checkpoint.onnx') + #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx') + time0=time.time() + half=True;verbose=True;workspace=4;prefix=colorstr('TensorRT:') + #f = onnx.with_suffix('.engine') # TensorRT engine file + f=trtFile + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnxFile)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + print(f'{prefix} Network Description:') + for inp in inputs: + print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + print(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + time1=time.time() + print('output trtfile from ONNX, time:%.4f s ,'%(time1-time0),trtFile) +def ONNX_eval(par): + + + model_path = par['weights']; + modelSize=par['modelSize'] + mean = par['mean'] + std = par['std'] + image_urls=glob.glob('%s/*'%(par['image_dir'] )) + out_dir = par['out_dir'] + + # 验证模型合法性 + onnx_model = onnx.load(model_path) + onnx.checker.check_model(onnx_model) + + # 设置模型session以及输入信息 + sess = ort.InferenceSession(str(model_path),providers= ort.get_available_providers()) + print('len():',len( sess.get_inputs() )) + input_name1 = sess.get_inputs()[0].name + + + half = False;device = 'cuda:0' + os.makedirs(out_dir,exist_ok=True) + + for im,image_url in enumerate(image_urls[0:1]): + image_array0 = cv2.imread(image_url) + #img=segPreProcess_image(image_array0).to(device) + img=segPreProcess_image(image_array0,modelSize=modelSize,mean=mean,std=std,numpy=True,RGB_convert_first=par['RGB_convert_first']) + #img = cv2.resize(img,(512,512)).transpose(2,0,1) + img = np.array(img)[np.newaxis, :, :, :].astype(np.float32) + H,W,C = image_array0.shape + time_1=time.time() + #pred,outstr = segmodel.eval(image_array0 ) + print('###line343:',img.shape, os.path.basename(image_url)) + print('###line343:img[0,0,10:12,10:12] ',img[0,0,10:12,10:12]) + output = sess.run(None, {input_name1: img}) + pred =output[0] + + #pred = pred.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + print('###line362:',np.max(pred)) + outstr='###---###' + + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + + time1 = time.time() + + + time2 = time.time() + + #cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + cv2.drawContours(image_array0,contours,-1,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + ret = cv2.imwrite(out_url.replace('.jpg','_mask.jpg').replace('.png','_mask.png' ),(pred*50).astype(np.uint8)) + time4 = time.time() + + print('image:%d,%s ,%d*%d,eval:%.1f ms, %s,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,get_ms(time0,time_1),outstr,get_ms(time1,time0), get_ms(time3,time2),get_ms(time3,time_1)) ) + print('outimage:',out_url) + + + #print(output) + +class SegModel_STDC_trt(object): + def __init__(self,weights=None,modelsize=512,std=(0.229, 0.224, 0.225),mean=(0.485, 0.456, 0.406),device='cuda:0'): + + logger = trt.Logger(trt.Logger.INFO) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + self.model = TRTModule(engine, ["images"], ["output"]) + self.mean = mean + self.std = std + self.device = device + self.modelsize = modelsize + + + def eval(self,image): + time0=time.time() + H,W,C=image.shape + img_input=self.segPreProcess_image(image) + time1=time.time() + pred=self.model(img_input) + time2=time.time() + pred=torch.argmax(pred,dim=1).cpu().numpy()[0] + #pred = np.argmax(pred.cpu().numpy(), axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + return pred,outstr + def segPreProcess_image(self,image): + + image = cv2.resize(image,self.modelsize) + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + + image = image.astype(np.float32) + image /= 255.0 + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + image = np.transpose(image, ( 2, 0, 1)) + + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + return image.to(self.device) + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + + + +def EngineInfer_onePic_thread(pars_thread): + + engine,image_array0,out_dir,image_url,im ,par= pars_thread[0:6] + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + + H,W,C = image_array0.shape + time0=time.time() + + time1=time.time() + # 运行模型 + + + #pred,segInfoStr=segtrtEval(engine,image_array0,par={'modelSize':(640,360),'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}) + pred,segInfoStr=segtrtEval(engine,image_array0,par=par) + cv2.imwrite(out_url.replace('.','_mask.'),(pred*50).astype(np.uint8)) + + pred = 1 - pred + time2=time.time() + + outstr='###---###' + binary0 = pred.copy() + time3 = time.time() + + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + #if len(contours)>0: + # max_id = get_largest_contours(contours) + # binary0[:,:] = 0 + # cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + time4 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time5 = time.time() + + ret = cv2.imwrite(out_url,image_array0) + time6 = time.time() + + print('image:%d,%s ,%d*%d, %s,,findcontours:%.1f ms,draw:%.1f total:%.1f'%(im,os.path.basename(image_url),H,W,segInfoStr, get_ms(time4,time3),get_ms(time5,time4),get_ms(time5,time0) )) + + + return 'success' + + +def EngineInfer(par): + + modelSize=par['modelSize'];mean = par['mean'] ;std = par['std'] ;RGB_convert_first=par['RGB_convert_first'];device=par['device'] + weights=par['weights']; image_dir=par['image_dir'] + max_threads=par['max_threads'];par['numpy']=False + image_urls=glob.glob('%s/*'%(image_dir)) + out_dir =par['out_dir'] + + os.makedirs(out_dir,exist_ok=True) + + #trt_model = SegModel_STDC_trt(weights=weights,modelsize=modelSize,std=std,mean=mean,device=device) + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + engine=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + print('#####load TRT file:',weights,'success #####') + + pars_thread=[] + pars_threads=[] + for im,image_url in enumerate(image_urls[0:]): + image_array0 = cv2.imread(image_url) + pars_thread=[engine,image_array0,out_dir,image_url,im,par] + pars_threads.append(pars_thread) + #EngineInfer_onePic_thread(pars_thread) + t1=time.time() + if max_threads==1: + for i in range(len(pars_threads[0:])): + EngineInfer_onePic_thread(pars_threads[i]) + + ''' + pred,segInfoStr=segtrtEval(pars_threads[i][0],pars_threads[i][1],par) + bname=os.path.basename( pars_threads[i][3] ) + outurl= os.path.join( out_dir , bname.replace( '.png','_mask.png').replace('.jpg','._mask.jpg') ) + ret=cv2.imwrite( outurl,(pred*50).astype(np.uint8)) + print(ret,outurl)''' + + else: + with ThreadPoolExecutor(max_workers=max_threads) as t: + for result in t.map(EngineInfer_onePic_thread, pars_threads): + tt=result + + t2=time.time() + print('All %d images time:%.1f ms, each:%.1f ms , with %d threads'%(len(image_urls),(t2-t1)*1000, (t2-t1)*1000.0/len(image_urls), max_threads) ) + + + +if __name__=='__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)') + parser.add_argument('--nclass', type=int, default=2, help='segmodel nclass') + parser.add_argument('--mWidth', type=int, default=640, help='segmodel mWdith') + parser.add_argument('--mHeight', type=int, default=360, help='segmodel mHeight') + opt = parser.parse_args() + print( opt.weights ) + #pthFile = Path('../../../yolov5TRT/weights/river/stdc_360X640.pth') + pthFile = Path(opt.weights) + onnxFile = str(pthFile.with_suffix('.onnx')).replace('360X640', '%dX%d'%( opt.mWidth,opt.mHeight )) + trtFile = onnxFile.replace('.onnx','.engine' ) + + nclass = opt.nclass; device=torch.device('cuda:0'); + + '''###BiSeNet + weights = '../weights/BiSeNet/checkpoint.pth';;inputShape =(1, 3, 512,512) + segmodel = SegModel_BiSeNet(nclass=nclass,weights=weights) + seg_model=segmodel.model + ''' + + ##STDC net + weights = pthFile + inputShape =(1, 3, opt.mHeight,opt.mWidth)#(bs,channels,height,width) + #inputShape =(1, 3, 360,640)#(bs,channels,height,width) + segmodel = SegModel_STDC(nclass=nclass,weights=weights,modelSize=(inputShape[2],inputShape[3])); + + + seg_model=segmodel.model + + par={'modelSize':(inputShape[3],inputShape[2]),'mean':(0.485, 0.456, 0.406),'std':(0.229, 0.224, 0.225),'RGB_convert_first':True, + 'weights':trtFile,'device':device,'max_threads':1,'predResize':True, + 'image_dir':'../../AIdemo2/images/trafficAccident/','out_dir' :'results'} + + par_onnx =deepcopy( par) + par_onnx['weights']=onnxFile + par_pth =deepcopy( par);par_pth['segmodel']=segmodel; + #infer_usage(par_pth) + + toONNX(seg_model,onnxFile,inputShape=inputShape,device=device,dynamic=True) + print('####trt to onnx over###') + ONNXtoTrt(onnxFile,trtFile) + + #EngineInfer(par) + + + #ONNX_eval(par_onnx) + + + + + + + + + + diff --git a/segutils/stdcnet.py b/segutils/stdcnet.py new file mode 100644 index 0000000..0d6c53c --- /dev/null +++ b/segutils/stdcnet.py @@ -0,0 +1,302 @@ +import torch +import torch.nn as nn +from torch.nn import init +import math + +class ConvX(nn.Module): + def __init__(self, in_planes, out_planes, kernel=3, stride=1): + super(ConvX, self).__init__() + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel, stride=stride, padding=kernel//2, bias=False) + self.bn = nn.BatchNorm2d(out_planes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + out = self.relu(self.bn(self.conv(x))) + return out + + +class AddBottleneck(nn.Module): + def __init__(self, in_planes, out_planes, block_num=3, stride=1): + super(AddBottleneck, self).__init__() + assert block_num > 1, print("block number should be larger than 1.") + self.conv_list = nn.ModuleList() + self.stride = stride + if stride == 2: + self.avd_layer = nn.Sequential( + nn.Conv2d(out_planes//2, out_planes//2, kernel_size=3, stride=2, padding=1, groups=out_planes//2, bias=False), + nn.BatchNorm2d(out_planes//2), + ) + self.skip = nn.Sequential( + nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=2, padding=1, groups=in_planes, bias=False), + nn.BatchNorm2d(in_planes), + nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False), + nn.BatchNorm2d(out_planes), + ) + stride = 1 + + for idx in range(block_num): + if idx == 0: + self.conv_list.append(ConvX(in_planes, out_planes//2, kernel=1)) + elif idx == 1 and block_num == 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//2, stride=stride)) + elif idx == 1 and block_num > 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//4, stride=stride)) + elif idx < block_num - 1: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx+1)))) + else: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx)))) + + def forward(self, x): + out_list = [] + out = x + + for idx, conv in enumerate(self.conv_list): + if idx == 0 and self.stride == 2: + out = self.avd_layer(conv(out)) + else: + out = conv(out) + out_list.append(out) + + if self.stride == 2: + x = self.skip(x) + + return torch.cat(out_list, dim=1) + x + + + +class CatBottleneck(nn.Module): + def __init__(self, in_planes, out_planes, block_num=3, stride=1): + super(CatBottleneck, self).__init__() + assert block_num > 1, print("block number should be larger than 1.") + self.conv_list = nn.ModuleList() + self.stride = stride + if stride == 2: + self.avd_layer = nn.Sequential( + nn.Conv2d(out_planes//2, out_planes//2, kernel_size=3, stride=2, padding=1, groups=out_planes//2, bias=False), + nn.BatchNorm2d(out_planes//2), + ) + self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + stride = 1 + + for idx in range(block_num): + if idx == 0: + self.conv_list.append(ConvX(in_planes, out_planes//2, kernel=1)) + elif idx == 1 and block_num == 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//2, stride=stride)) + elif idx == 1 and block_num > 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//4, stride=stride)) + elif idx < block_num - 1: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx+1)))) + else: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx)))) + + def forward(self, x): + out_list = [] + out1 = self.conv_list[0](x) + + for idx, conv in enumerate(self.conv_list[1:]): + if idx == 0: + if self.stride == 2: + out = conv(self.avd_layer(out1)) + else: + out = conv(out1) + else: + out = conv(out) + out_list.append(out) + + if self.stride == 2: + out1 = self.skip(out1) + out_list.insert(0, out1) + + out = torch.cat(out_list, dim=1) + return out + +#STDC2Net +class STDCNet1446(nn.Module): + def __init__(self, base=64, layers=[4,5,3], block_num=4, type="cat", num_classes=1000, dropout=0.20, pretrain_model='', use_conv_last=False): + super(STDCNet1446, self).__init__() + if type == "cat": + block = CatBottleneck + elif type == "add": + block = AddBottleneck + self.use_conv_last = use_conv_last + self.features = self._make_layers(base, layers, block_num, block) + self.conv_last = ConvX(base*16, max(1024, base*16), 1, 1) + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(max(1024, base*16), max(1024, base*16), bias=False) + self.bn = nn.BatchNorm1d(max(1024, base*16)) + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(p=dropout) + self.linear = nn.Linear(max(1024, base*16), num_classes, bias=False) + + self.x2 = nn.Sequential(self.features[:1]) + self.x4 = nn.Sequential(self.features[1:2]) + self.x8 = nn.Sequential(self.features[2:6]) + self.x16 = nn.Sequential(self.features[6:11]) + self.x32 = nn.Sequential(self.features[11:]) + + if pretrain_model: + print('use pretrain model {}'.format(pretrain_model)) + self.init_weight(pretrain_model) + else: + self.init_params() + + def init_weight(self, pretrain_model): + + state_dict = torch.load(pretrain_model)["state_dict"] + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def _make_layers(self, base, layers, block_num, block): + features = [] + features += [ConvX(3, base//2, 3, 2)] + features += [ConvX(base//2, base, 3, 2)] + + for i, layer in enumerate(layers): + for j in range(layer): + if i == 0 and j == 0: + features.append(block(base, base*4, block_num, 2)) + elif j == 0: + features.append(block(base*int(math.pow(2,i+1)), base*int(math.pow(2,i+2)), block_num, 2)) + else: + features.append(block(base*int(math.pow(2,i+2)), base*int(math.pow(2,i+2)), block_num, 1)) + + return nn.Sequential(*features) + + def forward(self, x): + feat2 = self.x2(x) + feat4 = self.x4(feat2) + feat8 = self.x8(feat4) + feat16 = self.x16(feat8) + feat32 = self.x32(feat16) + if self.use_conv_last: + feat32 = self.conv_last(feat32) + + return feat2, feat4, feat8, feat16, feat32 + + def forward_impl(self, x): + out = self.features(x) + out = self.conv_last(out).pow(2) + out = self.gap(out).flatten(1) + out = self.fc(out) + # out = self.bn(out) + out = self.relu(out) + # out = self.relu(self.bn(self.fc(out))) + out = self.dropout(out) + out = self.linear(out) + return out + +# STDC1Net +class STDCNet813(nn.Module): + def __init__(self, base=64, layers=[2,2,2], block_num=4, type="cat", num_classes=1000, dropout=0.20, pretrain_model='', use_conv_last=False): + super(STDCNet813, self).__init__() + if type == "cat": + block = CatBottleneck + elif type == "add": + block = AddBottleneck + self.use_conv_last = use_conv_last + self.features = self._make_layers(base, layers, block_num, block) + self.conv_last = ConvX(base*16, max(1024, base*16), 1, 1) + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(max(1024, base*16), max(1024, base*16), bias=False) + self.bn = nn.BatchNorm1d(max(1024, base*16)) + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(p=dropout) + self.linear = nn.Linear(max(1024, base*16), num_classes, bias=False) + + self.x2 = nn.Sequential(self.features[:1]) + self.x4 = nn.Sequential(self.features[1:2]) + self.x8 = nn.Sequential(self.features[2:4]) + self.x16 = nn.Sequential(self.features[4:6]) + self.x32 = nn.Sequential(self.features[6:]) + + if pretrain_model: + print('use pretrain model {}'.format(pretrain_model)) + self.init_weight(pretrain_model) + else: + self.init_params() + + def init_weight(self, pretrain_model): + + state_dict = torch.load(pretrain_model)["state_dict"] + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def _make_layers(self, base, layers, block_num, block): + features = [] + features += [ConvX(3, base//2, 3, 2)] + features += [ConvX(base//2, base, 3, 2)] + + for i, layer in enumerate(layers): + for j in range(layer): + if i == 0 and j == 0: + features.append(block(base, base*4, block_num, 2)) + elif j == 0: + features.append(block(base*int(math.pow(2,i+1)), base*int(math.pow(2,i+2)), block_num, 2)) + else: + features.append(block(base*int(math.pow(2,i+2)), base*int(math.pow(2,i+2)), block_num, 1)) + + return nn.Sequential(*features) + + def forward(self, x): + feat2 = self.x2(x) + feat4 = self.x4(feat2) + feat8 = self.x8(feat4) + feat16 = self.x16(feat8) + feat32 = self.x32(feat16) + if self.use_conv_last: + feat32 = self.conv_last(feat32) + + return feat2, feat4, feat8, feat16, feat32 + + def forward_impl(self, x): + out = self.features(x) + out = self.conv_last(out).pow(2) + out = self.gap(out).flatten(1) + out = self.fc(out) + # out = self.bn(out) + out = self.relu(out) + # out = self.relu(self.bn(self.fc(out))) + out = self.dropout(out) + out = self.linear(out) + return out + +if __name__ == "__main__": + model = STDCNet813(num_classes=1000, dropout=0.00, block_num=4) + model.eval() + x = torch.randn(1,3,224,224) + y = model(x) + torch.save(model.state_dict(), 'cat.pth') + print(y.size()) diff --git a/segutils/toTrt.py b/segutils/toTrt.py new file mode 100644 index 0000000..df70842 --- /dev/null +++ b/segutils/toTrt.py @@ -0,0 +1,64 @@ + + +from pathlib import Path +import torch +import os,sys +import argparse + +sys.path.extend(['segutils']) +from model_stages import BiSeNet_STDC +from trtUtils2 import pth2onnx,onnx2engine,onnx_inference + +def main(opt): + + if opt.mWidth ==0 or opt.mHeight==0: + modelSize=None + else: + modelSize = ( int(opt.mHeight), int(opt.mWidth) ) + model = BiSeNet_STDC(backbone='STDCNet813', n_classes=int(opt.nclass), + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False, + modelSize=modelSize + ) + + model.load_state_dict(torch.load(opt.weights.strip(), map_location='cuda:0' )) + #model= model.to(device) + + + + + #pth_model='../weights/best_mae.pth' + pth_model=opt.weights.strip() + onnx_name = pth_model.replace('.pth','_dynamic.onnx') + trt_name = onnx_name.replace('.onnx','.engine') + dynamic_hw ={'input':{0:'batch',2:'H',3:'W'}, + 'output0':{1:'C',2:'H',3:'W'}, + 'output1':{1:'C',2:'H',3:'W'}, + + } + + + inputShape =(1, 3, 128*4,128*4)#(bs,channels,height,width) + + input_profile_shapes = [(1,3,256,256),(1,3,1024,1024),(1,3,2048,2048)] + + pth2onnx(model,onnx_name,input_shape=(1,3,512,512),input_names=['input'],output_names=[ 'output0' ,'output1'],dynamix_axis=dynamic_hw) + + onnx2engine(onnx_name,trt_name,input_shape=[1,3,-1,-1],half=True,max_batch_size=1,input_profile_shapes=input_profile_shapes) + + + + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='stdc_360X640.pth', help='model path(s)') + parser.add_argument('--nclass', type=int, default=2, help='segmodel nclass') + parser.add_argument('--mWidth', type=int, default=640, help='segmodel mWdith') + parser.add_argument('--mHeight', type=int, default=360, help='segmodel mHeight') + opt = parser.parse_args() + + main(opt) \ No newline at end of file diff --git a/segutils/trafficUtils.py b/segutils/trafficUtils.py new file mode 100644 index 0000000..4846650 --- /dev/null +++ b/segutils/trafficUtils.py @@ -0,0 +1,641 @@ +# 设定开关,将最小外接矩形中心点间的距离作为vehicle之间的距离 +import numpy as np +import math, cv2, time +from copy import deepcopy + +def xyxy_coordinate(boundbxs,contour): + ''' + 输入:两个对角坐标xyxy + 输出:四个点位置 + ''' + x1 = boundbxs[0] + y1 = boundbxs[1] + x2 = boundbxs[2] + y2 = boundbxs[3] + + for x in (x1,x2): + for y in (y1,y2): + flag = cv2.pointPolygonTest(contour, (int(x), int(y)), + False) # 若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag == 1: + return 1 + + return flag + +def get_ms(time2, time1): + return (time2 - time1) * 1000.0 + + +def two_points_distance(x1, y1, x2, y2): + distance = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2) + return distance + + +# 保存正常vehicle和非正常vehicle的信息(当contours顶点数小于6时,无法拟合最小外接矩形,定义为非正常vehicle) +def saveVehicle1(traffic_dict, contours, normVehicleBD, normVehicle, count, i, unnormVehicle, normVehicleCOOR): + if len(contours) >= 6: + normVehicleBD.append(contours) + normVehicle.append(traffic_dict['det'][count]) + rect = cv2.minAreaRect(contours) + normVehicleCOOR.append(rect[0]) + else: + traffic_dict['det'][int(i / 2)] = traffic_dict['det'][int(i / 2)] + [0, 0.3, 999, -1, 3] + unnormVehicle.append(traffic_dict['det'][int(i / 2)]) + return normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR + + +# saveVehicle2和saveVehicle1有区别 +def saveVehicle2(traffic_dict, contours, normVehicleBD, normVehicle, count, i, unnormVehicle, normVehicleCOOR, centerCOOR): + if len(contours) >= 6: + normVehicleBD.append(contours) + normVehicle.append(traffic_dict['det'][count]) + normVehicleCOOR.append(centerCOOR) + else: + traffic_dict['det'][int(i / 2)] = traffic_dict['det'][int(i / 2)] + [0, 0.3, 999, -1, 3] + unnormVehicle.append(traffic_dict['det'][int(i / 2)]) + return normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR + + +# 对于不在道路上的vehicle,将输出信息补全 +def supplementInformation(traffic_dict, i, roundness, y_min, y_max, imgVehicle, rect): + score = -1 + traffic_dict['det'][i] = traffic_dict['det'][i] + [0, roundness, 999, [-1, -1, -1], 666] + if y_min > 0 and y_max < imgVehicle.shape[0] and roundness > traffic_dict['roundness']: # 过滤掉上下方被speedRoad的边界截断的vehicle + score = (min(rect[1]) - max(rect[1]) * traffic_dict['roundness']) / (max(rect[1]) * (1 - traffic_dict['roundness'])) + return score + + +# 判断交通事故类型 +def judgeAccidentType(traffic_dict, b): + if max(traffic_dict['det'][b][9]) == traffic_dict['det'][b][9][0] and traffic_dict['det'][b][9][0] != -1: + return 0 + elif max(traffic_dict['det'][b][9]) == traffic_dict['det'][b][9][1] and traffic_dict['det'][b][9][1] != -1: + return 1 + elif max(traffic_dict['det'][b][9]) == traffic_dict['det'][b][9][2] and traffic_dict['det'][b][9][2] != -1: + return 2 + else: + return 3 + + +# 计算距离得分 +def distanceScore(vehicleWH, index1, index2, smallestDistance, traffic_dict): + d1 = (min(vehicleWH[index1]) + min(vehicleWH[index2])) / 2 + d2 = min(min(vehicleWH[index1]), min(vehicleWH[index2])) + max(min(vehicleWH[index1]), min(vehicleWH[index2])) / 2 + if smallestDistance == d1: + score1 = 1 + traffic_dict['det'][index2][9][2] = score1 + traffic_dict['det'][index2][10] = judgeAccidentType(traffic_dict, index2) + elif smallestDistance < d2: + score1 = 1 - (smallestDistance - d1) / (d2 - d1) + if 0 < score1 < 1: + traffic_dict['det'][index2][9][2] = score1 + traffic_dict['det'][index2][10] = judgeAccidentType(traffic_dict, index2) + else: + traffic_dict['det'][index2][10] = judgeAccidentType(traffic_dict, index2) + else: + traffic_dict['det'][index2][10] = judgeAccidentType(traffic_dict, index2) + return traffic_dict['det'] + + +# 计算两个contours之间的最短距离 +def array_distance(arr1, arr2): + ''' + 计算两个数组中,每任意两个点之间L2距离 + arr1和arr2都必须是numpy数组 + 且维度分别是mx2,nx2 + 输出数组维度为mxn + ''' + m, _ = arr1.shape + n, _ = arr2.shape + arr1_power = np.power(arr1, 2) + arr1_power_sum = arr1_power[:, 0] + arr1_power[:, 1] # 第1区域,x与y的平方和 + arr1_power_sum = np.tile(arr1_power_sum, (n, 1)) # 将arr1_power_sum沿着y轴复制n倍,沿着x轴复制1倍,这里用于与arr2进行计算。 n x m 维度 + arr1_power_sum = arr1_power_sum.T # 将arr1_power_sum进行转置 + arr2_power = np.power(arr2, 2) + arr2_power_sum = arr2_power[:, 0] + arr2_power[:, 1] # 第2区域,x与y的平方和 + arr2_power_sum = np.tile(arr2_power_sum, (m, 1)) # 将arr1_power_sum沿着y轴复制m倍,沿着x轴复制1倍,这里用于与arr1进行计算。 m x n 维度 + dis = arr1_power_sum + arr2_power_sum - (2 * np.dot(arr1, arr2.T)) # np.dot(arr1, arr2.T)矩阵相乘,得到xy的值。 + dis = np.sqrt(dis) + return dis + + +# 存储所有道路的信息 +def storageRoad(contours, allRoadContent, traffic_dict): + speedRoadAngle = 0 + for cnt in contours: # 道路 + rect = cv2.minAreaRect(cnt) + if rect[1][0] * rect[1][1] > traffic_dict['RoadArea']: # 过滤掉面积小于阈值的speedRoad + if rect[1][0] <= rect[1][1]: + if rect[2] >= 0 and rect[2] < 90: + speedRoadAngle = rect[2] + 90 + elif rect[2] == 90: + speedRoadAngle = 0 + else: + if rect[2] >= 0 and rect[2] <= 90: + speedRoadAngle = rect[2] + allRoadContent.append([cnt, speedRoadAngle, rect[1]]) + return allRoadContent + + +# 存储所有vehicle的信息,方法1 +def storageVehicle1(traffic_dict, normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR, imgVehicle): + #输入: + # + #输出:traffic_dict['det'], normVehicleBD, unnormVehicle, normVehicleCOOR + # traffic_dict['det']:resize缩小之后的坐标,类别,得分.[cls,x0,y0,x1,y1,score] + + # normVehicleBD : 正常车辆的contours。(正常车辆指的是countrous定点数>=6) + # unnormVehicle : resize缩小之后的异常车辆坐标,类别,得分.[cls,x0,y0,x1,y1,score] + count = 0 + for i in range(0, len(traffic_dict['vehicleCOOR']), 2): + mask = np.zeros(imgVehicle.shape[:2], dtype="uint8") + x0 = int(traffic_dict['vehicleCOOR'][i][0] * traffic_dict['ZoomFactor']['y']) + y0 = int(traffic_dict['vehicleCOOR'][i][1] * traffic_dict['ZoomFactor']['x']) + x1 = int(traffic_dict['vehicleCOOR'][i + 1][0] * traffic_dict['ZoomFactor']['y']) + y1 = int(traffic_dict['vehicleCOOR'][i + 1][1] * traffic_dict['ZoomFactor']['x']) + cv2.rectangle(mask, (x0, y0), (x1, y1), 255, -1, lineType=cv2.LINE_AA) + imgVehicle_masked = cv2.bitwise_and(imgVehicle, imgVehicle, mask=mask) + img2 = cv2.cvtColor(imgVehicle_masked, cv2.COLOR_BGR2GRAY) + contours2, hierarchy2 = cv2.findContours(img2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + if len(contours2) != 0: + if len(contours2) > 1: # 这里我通过比较同一检测框内各个contours对应的最小外接矩形的面积,来剔除那些存在干扰的contours,最终只保留一个contours + vehicleArea = [] # 存储vehicle的最小外接矩形的面积 + for j in range(len(contours2)): + rect = cv2.minAreaRect(contours2[j]) + vehicleArea.append(rect[1][0] * rect[1][1]) + maxAreaIndex = vehicleArea.index(max(vehicleArea)) + maxAreaContours = contours2[maxAreaIndex] + normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = saveVehicle1(traffic_dict,maxAreaContours,normVehicleBD,normVehicle,count,i,unnormVehicle, normVehicleCOOR) + elif len(contours2) == 1: + normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = saveVehicle1(traffic_dict,contours2[0],normVehicleBD,normVehicle,count,i,unnormVehicle, normVehicleCOOR) + else: + traffic_dict['det'][int(i / 2)] = traffic_dict['det'][int(i / 2)] + [0, 0.3, 999, -1, 3] + unnormVehicle.append(traffic_dict['det'][int(i / 2)]) + count += 1 + traffic_dict['det'] = normVehicle + return traffic_dict['det'], normVehicleBD, unnormVehicle, normVehicleCOOR + + +# 存储所有vehicle的信息,方法2 +def storageVehicle2(traffic_dict, normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR, imgVehicle): + img = cv2.cvtColor(imgVehicle, cv2.COLOR_BGR2GRAY) + count = 0 + for i in range(0, len(traffic_dict['vehicleCOOR']), 2): + row1 = int(traffic_dict['vehicleCOOR'][i][1] * traffic_dict['ZoomFactor']['x']) + row2 = int(traffic_dict['vehicleCOOR'][i + 1][1] * traffic_dict['ZoomFactor']['x']) + col1 = int(traffic_dict['vehicleCOOR'][i][0] * traffic_dict['ZoomFactor']['y']) + col2 = int(traffic_dict['vehicleCOOR'][i + 1][0] * traffic_dict['ZoomFactor']['y']) + + if row1 >= 2: + row1 = row1 - 2 + if row2 <= (traffic_dict['modelSize'][1] - 2): + row2 = row2 + 2 + if col1 >= 2: + col1 = col1 - 2 + if col2 <= (traffic_dict['modelSize'][0] - 2): + col2 = col2 + 2 + centerCOOR = (int((col1 + col2) / 2), int((row1 + row2) / 2)) + img1 = img[row1:row2, col1:col2] + up = np.zeros((20, (col2 - col1)), dtype='uint8') + left = np.zeros(((40 + row2 - row1), 20), dtype='uint8') + + img1 = np.concatenate((up, img1), axis=0) + img1 = np.concatenate((img1, up), axis=0) + + img1 = np.concatenate((left, img1), axis=1) + img2 = np.concatenate((img1, left), axis=1) + contours2, hierarchy = cv2.findContours(img2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + if len(contours2) != 0: + if len(contours2) > 1: + vehicleArea = [] # 存储vehicle的最小外接矩形的面积 + for j in range(len(contours2)): + rect = cv2.minAreaRect(contours2[j]) + vehicleArea.append(rect[1][0] * rect[1][1]) + maxAreaIndex = vehicleArea.index(max(vehicleArea)) + maxAreaContours = contours2[maxAreaIndex] + normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = saveVehicle2(traffic_dict,maxAreaContours,normVehicleBD,normVehicle,count,i,unnormVehicle,normVehicleCOOR,centerCOOR) + elif len(contours2) == 1: + normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = saveVehicle2(traffic_dict,contours2[0],normVehicleBD,normVehicle,count,i,unnormVehicle,normVehicleCOOR,centerCOOR) + else: + traffic_dict['det'][int(i / 2)] = traffic_dict['det'][int(i / 2)] + [0, 0.3, 999, -1, 3] + unnormVehicle.append(traffic_dict['det'][int(i / 2)]) + count += 1 + traffic_dict['det'] = normVehicle + return traffic_dict['det'], normVehicleBD, unnormVehicle, normVehicleCOOR + + +# 计算角度和长宽比得分 +def angleRoundness(normVehicleBD, vehicleBox, vehicleWH, allRoadContent, traffic_dict, normVehicleCOOR, imgVehicle): + ##输出:vehicleBox, vehicleWH, traffic_dict['det'] + # vehicleBox--正常车辆通过contours得出的box,[ (x0,y0),(x1,y1),(x2,y2),(x3,y3)] + # vehicleWH--正常车辆通过contours得出的box,[ (w,h)] + # traffic_dict['det']--[[cls, x0, y0, x1, y1, score, 角度, 长宽比, 最小距离, max([角度得分, 长宽比得分, 最小距离得分]), 交通事故类别], ...] + for i in range(len(normVehicleBD)): + ellipse = cv2.fitEllipse(normVehicleBD[i]) + vehicleAngle = 0 + if ellipse[2] >= 0 and ellipse[2] < 90: + vehicleAngle = 90 + ellipse[2] + elif ellipse[2] >= 90 and ellipse[2] < 180: + vehicleAngle = ellipse[2] - 90 + elif ellipse[2] == 180: + vehicleAngle = 90 + rect = cv2.minAreaRect(normVehicleBD[i]) + box = cv2.boxPoints(rect).astype(np.int32) + center = normVehicleCOOR[i] + vehicleBox.append(box) + vehicleWH.append(rect[1]) + roundness = min(rect[1]) / max(rect[1]) + y_min = np.min(box[:, 1]) + y_max = np.max(box[:, 1]) + if len(allRoadContent) != 0: + for j in range(len(allRoadContent)): + flag = cv2.pointPolygonTest(allRoadContent[j][0], center, False) + if flag >= 0: + roadVehicleAngle = abs(vehicleAngle - allRoadContent[j][1]) + traffic_dict['det'][i] = traffic_dict['det'][i] + [roadVehicleAngle, roundness, 999, [-1, -1, -1], 666] + if y_min > 0 and y_max < imgVehicle.shape[0]: # 过滤掉上下方被speedRoad的边界截断的vehicle + if roadVehicleAngle >= traffic_dict['roadVehicleAngle']: # 当道路同水平方向的夹角与车辆同水平方向的夹角的差值在15°和75°之间时,需要将车辆框出来 + if roadVehicleAngle > 90: + score1 = float((180 - roadVehicleAngle) / 90) + else: + score1 = float(roadVehicleAngle / 90) + traffic_dict['det'][i][9][0] = score1 + if roundness > traffic_dict['roundness']: + score2 = (min(rect[1]) - max(rect[1]) * traffic_dict['roundness']) / (max(rect[1]) * (1 - traffic_dict['roundness'])) + traffic_dict['det'][i][9][1] = score2 + break + else: + j += 1 + if len(traffic_dict['det'][i]) == 6: + traffic_dict['det'][i][9][1] = supplementInformation(traffic_dict, i, roundness, y_min, y_max, imgVehicle, rect) + else: + traffic_dict['det'][i][9][1] = supplementInformation(traffic_dict, i, roundness, y_min, y_max, imgVehicle, rect) + i += 1 + return vehicleBox, vehicleWH, traffic_dict['det'] + + +# 对于某一vehicle,以该vehicle的最小外接矩形的中心点为圆心O1,划定半径范围,求O1与半径范围内的其他vehicle的中心点之间的距离 +def vehicleDistance1(normVehicleCOOR, normVehicleBD, traffic_dict, vehicleWH): + if len(normVehicleCOOR) > 1: + for b in range(len(normVehicleCOOR)): + contoursMinDistance = [] # 存储contours之间的最短距离 + tmp = normVehicleCOOR[b] + normVehicleCOOR[b] = normVehicleCOOR[0] + normVehicleCOOR[0] = tmp + targetContours = [] # 存储目标vehicle和中心点同目标车辆中心点之间的距离小于traffic_dict['radius']的vehicle的box + for c in range(1, len(normVehicleCOOR)): + if two_points_distance(normVehicleCOOR[0][0], normVehicleCOOR[0][1], normVehicleCOOR[c][0], normVehicleCOOR[c][1]) <= traffic_dict['radius']: + if normVehicleBD[b] not in targetContours: + targetContours.append(normVehicleBD[b]) + if c == b: + targetContours.append(normVehicleBD[0]) + else: + targetContours.append(normVehicleBD[c]) + if len(targetContours) != 0: + goalVehicleContour = np.squeeze(targetContours[0], 1) + for d in range(1, len(targetContours)): + elseVehicleContour = np.squeeze(targetContours[d], 1) + dist_arr = array_distance(goalVehicleContour, elseVehicleContour) + min_dist = dist_arr[dist_arr > 0].min() + contoursMinDistance.append(min_dist) + traffic_dict['det'][b][8] = min(contoursMinDistance) + if traffic_dict['det'][b][8] < min(vehicleWH[b]) * traffic_dict['vehicleFactor']: + score1 = 1 - traffic_dict['det'][b][8] / (min(vehicleWH[b]) * traffic_dict['vehicleFactor']) + traffic_dict['det'][b][9][2] = score1 + traffic_dict['det'][b][10] = judgeAccidentType(traffic_dict, b) + else: + traffic_dict['det'][b][8] = 999 + traffic_dict['det'][b][10] = judgeAccidentType(traffic_dict, b) + tmp = normVehicleCOOR[b] + normVehicleCOOR[b] = normVehicleCOOR[0] + normVehicleCOOR[0] = tmp + else: # 路上只有一辆车 + if max(traffic_dict['det'][0][9]) == traffic_dict['det'][0][9][0] and traffic_dict['det'][0][9][0] != -1: + traffic_dict['det'][0][10] = 0 + elif max(traffic_dict['det'][0][9]) == traffic_dict['det'][0][9][1] and traffic_dict['det'][0][9][1] != -1: + traffic_dict['det'][0][10] = 1 + else: + traffic_dict['det'][0][10] = 3 + return traffic_dict['det'] + + +# 计算vehicle的最小外接矩形中心点之间的距离和距离得分 +def vehicleDistance2(normVehicleCOOR, traffic_dict, vehicleWH): + if len(normVehicleCOOR) > 1: # 有多辆车 + for b in range(len(normVehicleCOOR)): + centerDistance = [] # 存储contours之间的最短距离 + tmp = normVehicleCOOR[b] + normVehicleCOOR[b] = normVehicleCOOR[0] + normVehicleCOOR[0] = tmp + for c in range(1, len(normVehicleCOOR)): + centerDistance.append(two_points_distance(normVehicleCOOR[0][0], normVehicleCOOR[0][1], normVehicleCOOR[c][0], normVehicleCOOR[c][1])) + smallestDistance = min(centerDistance) + index = centerDistance.index(smallestDistance) + traffic_dict['det'][b][8] = smallestDistance + if index == b - 1: # 序号0和b对应的vehicle + traffic_dict['det'] = distanceScore(vehicleWH, 0, b, smallestDistance, traffic_dict) + else: + traffic_dict['det'] = distanceScore(vehicleWH, index+1, b, smallestDistance, traffic_dict) + tmp = normVehicleCOOR[b] + normVehicleCOOR[b] = normVehicleCOOR[0] + normVehicleCOOR[0] = tmp + else: # 路上只有一辆车 + if max(traffic_dict['det'][0][9]) == traffic_dict['det'][0][9][0] and traffic_dict['det'][0][9][0] != -1: + traffic_dict['det'][0][10] = 0 + elif max(traffic_dict['det'][0][9]) == traffic_dict['det'][0][9][1] and traffic_dict['det'][0][9][1] != -1: + traffic_dict['det'][0][10] = 1 + else: + traffic_dict['det'][0][10] = 3 + return traffic_dict['det'] + + +def PostProcessing( traffic_dict): + """ + 对于字典traffic_dict中的各个键,说明如下: + RoadArea:speedRoad的最小外接矩形的面积 + roadVehicleAngle:判定发生交通事故的speedRoad与vehicle间的最小夹角 + vehicleCOOR:是一个列表,用于存储被检测出的vehicle的坐标(vehicle检测模型) + roundness:长宽比 ,vehicle的长与宽的比率,设置为0.7,若宽与长的比值大于0.7,则判定该vehicle发生交通事故 + ZoomFactor:存储的是图像在H和W方向上的缩放因子,其值小于1 + 'cls':类别号 + 'vehicleFactor':两辆车之间的安全距离被定义为:min(车辆1的宽,车辆2的宽) * vehicleFactor + 'radius':半径,以某一vehicle的最小外接矩形的中点为圆心,以radius为半径,划定范围,过滤车辆 + 'distanceFlag':开关。计算vehicle之间的距离时,可选择不同的函数 + 'vehicleFlag':开关。存储vehicle的信息时,可选择不同的函数 + 未发生交通事故时,得分为-1,”事故类型“为3 + 最终输出格式:[[cls, x0, y0, x1, y1, score, 角度, 长宽比, 最小距离, max([角度得分, 长宽比得分, 最小距离得分]), 交通事故类别], ...] + 交通事故类别:0表示角度,1表示长宽比,2表示最短距离,3表示未发生交通事故 + """ + + det_cors = [] + #print('###line338:', traffic_dict['det']) + for bb in traffic_dict['det']: + det_cors.append((int(bb[1]), int(bb[2]))) + det_cors.append((int(bb[3]), int(bb[4]))) + traffic_dict['vehicleCOOR'] = det_cors + + + #testImageArray = testImageArray[:, :, 0] + #H, W = testImageArray.shape[0:2] # sourceImage的分辨率为1080x1920 + + traffic_dict['modelSize']=[640,360] + #traffic_dict['mask'] = cv2.resize(traffic_dict['mask'],(640,360)) + + mask = traffic_dict['mask'] + H, W = mask.shape[0:2] + #(640, 360) 720 1280 (720, 1280) + ####line361: (1920, 1080) 720 1280 (720, 1280) + + ###line361: [640, 360] 360 640 (360, 640) + + + + #print('###line361:',traffic_dict['modelSize'], H,W ,mask.shape) + + scaleH = traffic_dict['modelSize'][1] / H # 自适应调整缩放比例 + scaleW = traffic_dict['modelSize'][0] / W + traffic_dict['ZoomFactor'] = {'x': scaleH, 'y': scaleW} + new_hw = [int(H * scaleH), int(W * scaleW)] + + mask = cv2.resize(mask, (new_hw[1], new_hw[0])) + if len(mask.shape) == 3: + mask = mask[:, :, 0] + + t1 = time.time() + normVehicleBD = [] # 存储一副图像中合格vehicle的contours,合格vehicle,即:contours中的顶点数大于等于6 + imgRoad = mask.copy() + imgVehicle = mask.copy() + imgRoad[imgRoad == 2] = 0 # 将vehicle过滤掉,只包含背景和speedRoad + imgVehicle[imgVehicle == 1] = 0 # 将speedRoad过滤掉,只包含背景和vehicle + imgRoad = cv2.cvtColor(np.uint8(imgRoad), cv2.COLOR_RGB2BGR) # 道路 + imgVehicle = cv2.cvtColor(np.uint8(imgVehicle), cv2.COLOR_RGB2BGR) # 车辆 + + t2 = time.time() + img1 = cv2.cvtColor(imgRoad, cv2.COLOR_BGR2GRAY) + contours, hierarchy = cv2.findContours(img1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + t3 = time.time() + + allRoadContent = [] # 存放所有的speedRoad信息,单个speedRoad的信息为:[cnt, speedRoadAngle, rect[1]] + vehicleBox = [] # 存储合格vehicle的box参数,合格vehicle,即:contours顶点个数大于等于6 + vehicleWH = [] # 存储合格vehicle的宽高 + normVehicle = [] # 存储合格vehicle的信息 + unnormVehicle = [] # 存储不合格vehicle的信息,不合格vehicle,即:contours顶点个数小于6 + normVehicleCOOR = [] # 存储合格vehicle的中心点坐标 + allRoadContent = storageRoad(contours, allRoadContent, traffic_dict) + t4 = time.time() + + # 开关。存储vehicle的信息时,可选择不同的函数 + if traffic_dict['vehicleFlag'] == True: + traffic_dict['det'], normVehicleBD, unnormVehicle, normVehicleCOOR = storageVehicle1(traffic_dict, normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR, imgVehicle) + #所有车辆的[cls,x0,y0,x1,y1,score] + else: + traffic_dict['det'], normVehicleBD, unnormVehicle, normVehicleCOOR = storageVehicle2(traffic_dict, normVehicleBD, normVehicle, unnormVehicle, normVehicleCOOR, imgVehicle) + t5 = time.time() + if len(normVehicleBD) != 0: + t6 = time.time() + vehicleBox, vehicleWH, traffic_dict['det'] = angleRoundness(normVehicleBD, vehicleBox, vehicleWH, allRoadContent, traffic_dict, normVehicleCOOR, imgVehicle) + t7 = time.time() + # 开关。计算vehicle之间的距离时,可选择不同的函数 + if traffic_dict['distanceFlag'] == True: + traffic_dict['det'] = vehicleDistance1(normVehicleCOOR, normVehicleBD, traffic_dict, vehicleWH) + else: + traffic_dict['det'] = vehicleDistance2(normVehicleCOOR, traffic_dict, vehicleWH) + t8 = time.time() + targetList = traffic_dict['det'] + # print("line393", targetList) + for i in range(len(targetList)): + targetList[i][9] = max(targetList[i][9]) + if len(unnormVehicle) != 0: + targetList = targetList + unnormVehicle + t9 = time.time() + # print("line462", targetList) # 目标对象list, [[cls, x0, y0, x1, y1, score, 角度, 长宽比, 最小距离, max([角度得分, 长宽比得分, 最小距离得分]), 类别], ...] + ruleJudge='angle-rundness-distance:%.1f'%( get_ms(t9,t6) ) + else: + targetList = unnormVehicle + ruleJudge = 'No angle-rundness-distance judging' + t10 = time.time() + time_infos = '---test---nothing---' + #time_infos = 'postTime:%.2f (分割时间:%.2f, findContours:%.2f, ruleJudge:%.2f, storageRoad:%.2f, storageVehicle:%.2f, angleRoundScore:%.2f, vehicleDistance:%.2f, mergeList:%.2f)' % ( + # get_ms(t10, t1), get_ms(t2, t1), get_ms(t3, t2), get_ms(t10, t3), get_ms(t4, t3), get_ms(t5, t4), get_ms(t7, t6), get_ms(t8, t7), get_ms(t9, t8)) + time_infos = 'postTime:%.2f , ( findContours:%.1f , carContourFilter:%.1f, %s )' %( get_ms(t10,t1), get_ms(t4,t1), get_ms(t5,t4),ruleJudge) + return targetList, time_infos + + +def TrafficPostProcessing(traffic_dict): + """ + 对于字典traffic_dict中的各个键,说明如下: + RoadArea:speedRoad的最小外接矩形的面积 + spillsCOOR:是一个列表,用于存储被检测出的spill的坐标(spill检测模型) + ZoomFactor:存储的是图像在H和W方向上的缩放因子,其值小于1 + 'cls':类别号 + """ + traffic_dict['modelSize'] = [640, 360] + mask = traffic_dict['mask'] + H, W = mask.shape[0:2] + scaleH = traffic_dict['modelSize'][1] / H # 自适应调整缩放比例 + scaleW = traffic_dict['modelSize'][0] / W + traffic_dict['ZoomFactor'] = {'x': scaleH, 'y': scaleW} + new_hw = [int(H * scaleH), int(W * scaleW)] + t0 = time.time() + mask = cv2.resize(mask, (new_hw[1], new_hw[0])) + if len(mask.shape) == 3: + mask = mask[:, :, 0] + imgRoad = mask.copy() + imgRoad[imgRoad == 2] = 0 # 将vehicle过滤掉,只包含背景和speedRoad + imgRoad = cv2.cvtColor(np.uint8(imgRoad), cv2.COLOR_RGB2BGR) # 道路 + imgRoad = cv2.cvtColor(imgRoad, cv2.COLOR_BGR2GRAY) # + contours, thresh = cv2.threshold(imgRoad, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t1 = time.time() + + '''新增模块::如果路面为空,则返回原图、无抛洒物等。''' + if contour_info == []: + # final_img=_img_cv + timeInfos = 'road is empty findContours:%.1f'%get_ms(t0,t1) + + return [], timeInfos + else: + # print(contour_info[0]) + max_contour = contour_info[0][0] + max_contour[:,:,0] = (max_contour[:,:,0] / scaleW).astype(np.int32) # contours恢复原图尺寸 + max_contour[:,:,1] = (max_contour[:,:,1] / scaleH).astype(np.int32) # contours恢复原图尺寸 + + '''3、preds中spillage,通过1中路面过滤''' + init_spillage_filterroad = traffic_dict['det'] + final_spillage_filterroad = [] + for i in range(len(init_spillage_filterroad)): + flag = xyxy_coordinate(init_spillage_filterroad[i],max_contour) + if flag == 1: + final_spillage_filterroad.append(init_spillage_filterroad[i]) + + t2 = time.time() + timeInfos = 'findContours:%.1f , carContourFilter:%.1f' % (get_ms(t0, t1), get_ms(t2, t1)) + + return final_spillage_filterroad, timeInfos # 返回最终绘制的结果图、最高速搞萨物(坐标、类别、置信度) + +def tracfficAccidentMixFunction(preds,seg_pred_mulcls,pars): + tjime0=time.time() + roadIou = pars['roadIou'] if 'roadIou' in pars.keys() else 0.5 + preds = np.array(preds) + #area_factors= np.array([np.sum(seg_pred_mulcls[int(x[2]):int(x[4]), int(x[1]):int(x[3])] )*1.0/(1.0*(x[3]-x[1])*(x[4]-x[2])+0.00001) for x in preds] ) + area_factors= np.array([np.sum(seg_pred_mulcls[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in preds] )#2023.08.03修改数据格式 + water_flag = np.array(area_factors>roadIou) + #print('##line936:',preds ) + dets = preds[water_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 + dets = dets.tolist() + + + + #label_info = get_label_info(pars['label_csv']) + imH,imW = seg_pred_mulcls.shape[0:2] + seg_pred = cv2.resize(seg_pred_mulcls,( pars['modelSize'][0] , pars['modelSize'] [1]) ) + mmH,mmW = seg_pred.shape[0:2] + + fx=mmW/imW;fy=mmH/imH + det_coords=[] + + det_coords_original=[] + for box in dets: + #b_0 = box[1:5];b_0.insert(0,box[0]);b_0.append(box[5] ) + b_0 = box[0:4];b_0.insert(0,box[5]);b_0.append(box[4]) + det_coords_original.append( box ) + if int(box[5]) != pars['CarId'] and int(box[5]) != pars['CthcId']: continue + det_coords.append(b_0) + #print('##line957:',det_coords_original ) + + pars['ZoomFactor']={'x':mmW/imW ,'y':mmH/imH} + + #pars['mask']=seg_pred; + pars['mask']=seg_pred_mulcls; + + + pars['det']=deepcopy(det_coords) + #pars['label_info']=label_info + tlist = list(pars.keys()); tlist.sort() + + if len(det_coords)> 0: + #print('###line459:',pars['mask'].shape, pars['det']) + list8,time_infos = PostProcessing(pars) + #print('###line461:',list8 ) + Accident_results = np.array(list8,dtype=object) + acc_det=[] + #[1.0, 1692.0, 169.0, 1803.0, 221.0, 0.494875431060791, 30, 0.5, 3.0, 0.3, 0] + #[0 , 1 , 2 , 3 , 4 , 5 , 6, 7 , 8 , 9 , 10] + for bpoints in list8: + if bpoints[9]>pars['confThres']: + xyxy=bpoints[1:5];xyxy=[int(x) for x in xyxy] + + cls=pars['cls'];conf=bpoints[9]; + box_acc = [*xyxy,conf,cls] + acc_det.append(box_acc) + #if cls in allowedList: + # p_result[1] = draw_painting_joint(xyxy,p_result[1],label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font,socre_location="leftBottom") + #print('###line475:',acc_det ) + #去掉被定为事故的车辆 + carCorslist = [ [ int(x[0]),int(x[1]), int(x[2]), int(x[3]) ] for x in det_coords_original ] + #print('##line81:',det_coords_original ) + accidentCarIndexs = [ carCorslist.index( [ int(x[0]),int(x[1]), int(x[2]), int(x[3]) ] ) for x in acc_det ] + accidentCarIndexsKeep = set(list(range(len(det_coords_original)))) - set(accidentCarIndexs) + det_coords_original_tmp = [ det_coords_original[x] for x in accidentCarIndexsKeep ] + det_coords_original = det_coords_original_tmp + #print('##line85:',det_coords_original ) + det_coords_original.extend(acc_det) + #4.0, 961.0, 275.0, 1047.0, 288.0, 0.26662659645080566, 0.0, 0.0 + #0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 + #det_coords_original =[ [ *x[1:6], x[0],*x[6:8] ] for x in det_coords_original] + else: + time_infos=" no tracfficAccidentMix process" + + #p_result[2]= deepcopy(det_coords_original) + return deepcopy(det_coords_original),time_infos +def tracfficAccidentMixFunction_N(predList,pars): + preds,seg_pred_mulcls = predList[0:2] + return tracfficAccidentMixFunction(preds,seg_pred_mulcls,pars) + +def mixTraffic_postprocess(preds, seg_pred_mulcls,pars=None): + '''输入:路面上的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断抛洒物是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终路面上物体(坐标、类别、置信度), + ''' + '''1、最大分隔路面作为判断依据''' + roadIou = pars['roadIou'] if 'roadIou' in pars.keys() else 0.5 + preds = np.array(preds) + area_factors = np.array([np.sum(seg_pred_mulcls[int(x[1]):int(x[3]), int(x[0]):int(x[2])]) * 1.0 / ( + 1.0 * (x[2] - x[0]) * (x[3] - x[1]) + 0.00001) for x in preds]) # 2023.08.03修改数据格式 + water_flag = np.array(area_factors > roadIou) + dets = preds[water_flag] ##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 + dets = dets.tolist() + + imH, imW = seg_pred_mulcls.shape[0:2] + seg_pred = cv2.resize(seg_pred_mulcls, (pars['modelSize'][0], pars['modelSize'][1])) + mmH, mmW = seg_pred.shape[0:2] + + fx = mmW / imW; + fy = mmH / imH + det_coords = [] + + for box in dets: + if int(box[5]) != pars['cls']: continue + det_coords.append(box) + + pars['ZoomFactor'] = {'x': mmW / imW, 'y': mmH / imH} + pars['mask'] = seg_pred_mulcls; + + pars['det'] = deepcopy(det_coords) + + if len(det_coords) > 0: + # print('###line459:',pars['mask'].shape, pars['det']) + return TrafficPostProcessing(pars) + + else: + return [], 'no spills find in road' \ No newline at end of file diff --git a/segutils/trtUtils.py b/segutils/trtUtils.py new file mode 100644 index 0000000..ce0fa55 --- /dev/null +++ b/segutils/trtUtils.py @@ -0,0 +1,370 @@ +import argparse +import os +import sys +from pathlib import Path + +import cv2 +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +from collections import OrderedDict, namedtuple +import numpy as np +import time +import tensorrt as trt +#import pycuda.driver as cuda +def trt_version(): + return trt.__version__ + +def torch_device_from_trt(device): + if device == trt.TensorLocation.DEVICE: + return torch.device("cuda") + elif device == trt.TensorLocation.HOST: + return torch.device("cpu") + else: + return TypeError("%s is not supported by torch" % device) + + +def torch_dtype_from_trt(dtype): + if dtype == trt.int8: + return torch.int8 + elif trt_version() >= '7.0' and dtype == trt.bool: + return torch.bool + elif dtype == trt.int32: + return torch.int32 + elif dtype == trt.float16: + return torch.float16 + elif dtype == trt.float32: + return torch.float32 + else: + raise TypeError("%s is not supported by torch" % dtype) + +class TRTModule(torch.nn.Module): + def __init__(self, engine=None, input_names=None, output_names=None): + super(TRTModule, self).__init__() + self.engine = engine + #if self.engine is not None: + #engine创建执行context + # self.context = self.engine.create_execution_context() + + self.input_names = input_names + self.output_names = output_names + + + def forward(self, *inputs): + with self.engine.create_execution_context() as context: + batch_size = inputs[0].shape[0] + bindings = [None] * (len(self.input_names) + len(self.output_names)) + + # 创建输出tensor,并分配内存 + outputs = [None] * len(self.output_names) + for i, output_name in enumerate(self.output_names): + idx = self.engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(self.engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(self.engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + outputs[i] = output + print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + + + for i, input_name in enumerate(self.input_names): + idx = self.engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + + + #self.context.execute_async( batch_size, bindings, torch.cuda.current_stream().cuda_stream)# 执行推理 , + #self.context.execute_async_v2(bindings=bindings, stream_handle=torch.cuda.current_stream().cuda_stream) # 执行推理 + context.execute_v2(bindings) # 执行推理 + + + if len(outputs) == 1: + outputs = outputs[0] + + return outputs[0] +def get_ms(t1,t0): + return (t1-t0)*1000.0 +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + +def toONNX(seg_model,onnxFile,inputShape=(1,3,360,640),device=torch.device('cuda:0'),dynamic=False ): + + import onnx + + im = torch.rand(inputShape).to(device) + seg_model.eval() + out=seg_model(im) + print('###test model infer example over ####') + train=False + dynamic = False + opset=11 + print('####begin to export to onnx') + + torch.onnx.export(seg_model, im,onnxFile, opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + #dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + # 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + # } if dynamic else None + dynamic_axes={ + 'images': {0: 'batch_size', 2: 'in_width', 3: 'int_height'}, + 'output': {0: 'batch_size', 2: 'out_width', 3: 'out_height'}} if dynamic else None + + ) + + print('output onnx file:',onnxFile) +def ONNXtoTrt(onnxFile,trtFile): + import tensorrt as trt + #onnx = Path('../weights/BiSeNet/checkpoint.onnx') + #onnxFile = Path('../weights/STDC/model_maxmIOU75_1720_0.946_360640.onnx') + time0=time.time() + half=True;verbose=True;workspace=4;prefix=colorstr('TensorRT:') + #f = onnx.with_suffix('.engine') # TensorRT engine file + f=trtFile + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnxFile)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + print(f'{prefix} Network Description:') + for inp in inputs: + print(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + print(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + print(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + time1=time.time() + print('output trtfile from ONNX, time:%.4f s ,'%(time1-time0),trtFile) + +def segPreProcess_image(image,modelSize=(640,360),mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) ,numpy=False, RGB_convert_first=False ): + time0 = time.time() + if RGB_convert_first: + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + time1 = time.time() + image = cv2.resize(image,modelSize) + time2 = time.time() + image = image.astype(np.float32) + image /= 255.0 + time3 = time.time() + image[:,:,0] -=mean[0] + image[:,:,1] -=mean[1] + image[:,:,2] -=mean[2] + time4 = time.time() + image[:,:,0] /= std[0] + image[:,:,1] /= std[1] + image[:,:,2] /= std[2] + if not RGB_convert_first: + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + print('RG convert:%.1f resize:%1f ,normalize:%.1f ,Demean:%.1f ,DeVar:%.1f '%( get_ms(time1,time0 ), get_ms(time2,time1 ), get_ms(time3,time2 ), get_ms(time4,time3 ), get_ms(time5,time4 ) ), numpy, RGB_convert_first) + if numpy: + return image + else: + + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + return image +def segPreProcess_image_torch(image,modelSize=(640,360),mean=(0.335, 0.358, 0.332),std = (0.141, 0.138, 0.143) ,numpy=False, RGB_convert_first=False,device='cuda:0' ): + #输入是numpy,输出torch + t1 = torch.from_numpy( np.array( std))*255.0 + t2 = torch.from_numpy(np.array(mean)/np.array(std)) + + time0 = time.time() + if RGB_convert_first: + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + time1 = time.time() + image = cv2.resize(image,modelSize) + image = np.transpose(image, ( 2, 0, 1)) + time2 = time.time() + image = torch.from_numpy(image).float().to(device) + time3 = time.time() + image = image.unsqueeze(0) + + + #image[:,:,:,:]= image/255.0 + + #image[:,0,:,:] -=mean[0];image[:,1,:,:] -=mean[1];image[:,2,:,:] -=mean[2] + image[:,0,:,:] /= t1[0];image[:,1,:,:] /= t1[1];image[:,2,:,:] /= t1[2] + time4 = time.time() + #image[:,0,:,:] /= std[0];image[:,1,:,:] /= std[1];image[:,2,:,:] /= std[2] + image[:,0,:,:] -=t2[0];image[:,1,:,:] -=t2[1];image[:,2,:,:] -=t2[2] + time5 = time.time() + #print('RG convert:%.1f resizeee:%1f ,normalize:%.1f ,Demean:%.1f ,DeVar:%.1f '%( get_ms(time1,time0 ), get_ms(time2,time1 ), get_ms(time3,time2 ), get_ms(time4,time3 ), get_ms(time5,time4 ) ), numpy, RGB_convert_first) + return image + + +def yolov5Trtforward(model,im): + + namess=[ model.get_binding_name(index) for index in range(model.num_bindings) ] + input_names = [namess[0]];output_names=namess[1:] + + with model.create_execution_context() as context: + batch_size = im.shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = model.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(model.get_binding_dtype(idx))#找到对应的数据类型 + shape = tuple(model.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(model.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + outputs[i] = output + #print('###line144:',idx,dtype,shape,output.size()) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + + + for i, input_name in enumerate(input_names): + idx = model.get_binding_index(input_name) + bindings[idx] = im.contiguous().data_ptr() + context.execute_v2(bindings) + + return outputs[3] + +def segTrtForward(engine,inputs,contextFlag=False): + + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + #with engine.create_execution_context() as context: + input_names=['images'];output_names=['output'] + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + context.execute_v2(bindings) # 执行推理 + + + + if len(outputs) == 1: + outputs = outputs[0] + + return outputs[0] +def OcrTrtForward(engine,inputs,contextFlag=False): + + t0=time.time() + #with engine.create_execution_context() as context: + if not contextFlag: context = engine.create_execution_context() + else: context=contextFlag + + input_names=['images'];output_names=['output'] + batch_size = inputs[0].shape[0] + bindings = [None] * (len(input_names) + len(output_names)) + t1=time.time() + # 创建输出tensor,并分配内存 + outputs = [None] * len(output_names) + for i, output_name in enumerate(output_names): + idx = engine.get_binding_index(output_name)#通过binding_name找到对应的input_id + dtype = torch_dtype_from_trt(engine.get_binding_dtype(idx))#找到对应的数据类型 + shape = (batch_size,) + tuple(engine.get_binding_shape(idx))#找到对应的形状大小 + device = torch_device_from_trt(engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + #print('&'*10,'device:',device,'idx:',idx,'shape:',shape,'dtype:',dtype,' device:',output.get_device()) + outputs[i] = output + #print('###line65:',output_name,i,idx,dtype,shape) + bindings[idx] = output.data_ptr()#绑定输出数据指针 + t2=time.time() + + for i, input_name in enumerate(input_names): + idx =engine.get_binding_index(input_name) + bindings[idx] = inputs[0].contiguous().data_ptr()#应当为inputs[i],对应3个输入。但由于我们使用的是单张图片,所以将3个输入全设置为相同的图片。 + #print('#'*10,'input_names:,', input_name,'idx:',idx, inputs[0].dtype,', inputs[0] device:',inputs[0].get_device()) + t3=time.time() + context.execute_v2(bindings) # 执行推理 + t4=time.time() + + + if len(outputs) == 1: + outputs = outputs[0] + outstr='create Context:%.2f alloc memory:%.2f prepare input:%.2f conext infer:%.2f, total:%.2f'%((t1-t0 )*1000 , (t2-t1)*1000,(t3-t2)*1000,(t4-t3)*1000, (t4-t0)*1000 ) + return outputs[0],outstr + +def segtrtEval(engine,image_array0,par={'modelSize':(640,360),'nclass':2,'predResize':True,'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True}): + time0_0=time.time() + H,W,C=image_array0.shape + #img_input = segPreProcess_image(image_array0,modelSize=par['modelSize'],mean=par['mean'],std =par['std'],numpy=par['numpy'], RGB_convert_first=par['RGB_convert_first'] ) + img_input = segPreProcess_image_torch(image_array0,modelSize=par['modelSize'],mean=par['mean'],std =par['std'],numpy=par['numpy'], RGB_convert_first=par['RGB_convert_first'] ) + img_input = img_input.to('cuda:0') + time1_0=time.time() + pred=segTrtForward(engine,[img_input]) + time2_0=time.time() + pred=torch.argmax(pred,dim=1).cpu().numpy()[0] + time3_0 = time.time() + if 'predResize' in par.keys(): + if par['predResize']: + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + else: + pred = cv2.resize(pred.astype(np.uint8),(W,H)) + time4_0 = time.time() + segInfoStr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f '%( get_ms(time1_0,time0_0),get_ms(time2_0,time1_0),get_ms(time3_0,time2_0),get_ms(time4_0,time3_0),get_ms(time4_0,time0_0) ) + return pred,segInfoStr + diff --git a/segutils/trtUtils2.py b/segutils/trtUtils2.py new file mode 100644 index 0000000..371d4e8 --- /dev/null +++ b/segutils/trtUtils2.py @@ -0,0 +1,464 @@ +# +# Copyright 1993-2020 NVIDIA Corporation. All rights reserved. +# +# NOTICE TO LICENSEE: +# +# This source code and/or documentation ("Licensed Deliverables") are +# subject to NVIDIA intellectual property rights under U.S. and +# international Copyright laws. +# +# These Licensed Deliverables contained herein is PROPRIETARY and +# CONFIDENTIAL to NVIDIA and is being provided under the terms and +# conditions of a form of NVIDIA software license agreement by and +# between NVIDIA and Licensee ("License Agreement") or electronically +# accepted by Licensee. Notwithstanding any terms or conditions to +# the contrary in the License Agreement, reproduction or disclosure +# of the Licensed Deliverables to any third party without the express +# written consent of NVIDIA is prohibited. +# +# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE +# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS +# PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. +# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED +# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, +# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. +# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE +# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY +# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THESE LICENSED DELIVERABLES. +# +# U.S. Government End Users. These Licensed Deliverables are a +# "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT +# 1995), consisting of "commercial computer software" and "commercial +# computer software documentation" as such terms are used in 48 +# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government +# only as a commercial end item. Consistent with 48 C.F.R.12.212 and +# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all +# U.S. Government End Users acquire the Licensed Deliverables with +# only those rights set forth herein. +# +# Any use of the Licensed Deliverables in individual and commercial +# software must include, in the user documentation and internal +# comments to the code, the above Disclaimer and U.S. Government End +# Users Notice. +# + + +import argparse +import pycuda.driver as cuda +import pycuda.autoinit +import numpy as np +import torch +import tensorrt as trt + + +import time +import onnx +import onnxruntime +import os,sys,cv2 +#from model.u2net import U2NET + +#cuda.init() +model_names = ['u2net.onnx','u2net_dynamic_batch.onnx','u2net_dynamic_hw.onnx','u2net_dynamic_batch-hw.onnx' ] +dynamic_batch = {'input':{0:'batch'}, + 'output0':{0:'batch'}, + 'output1':{0:'batch'}, + 'output2':{0:'batch'}, + 'output3':{0:'batch'}, + 'output4':{0:'batch'}, + 'output5':{0:'batch'}, + 'output6':{0:'batch'}} +dynamic_hw ={'input':{2:'H',3:'W'}, + 'output0':{2:'H',3:'W'}, + 'output1':{2:'H',3:'W'}, + 'output2':{2:'H',3:'W'}, + 'output3':{2:'H',3:'W'}, + 'output4':{2:'H',3:'W'}, + 'output5':{2:'H',3:'W'}, + 'output6':{2:'H',3:'W'}} +dynamic_batch_hw ={'input':{0:'batch',2:'H',3:'W'}, + 'output0':{0:'batch',2:'H',3:'W'}, + 'output1':{0:'batch',2:'H',3:'W'}, + 'output2':{0:'batch',2:'H',3:'W'}, + 'output3':{0:'batch',2:'H',3:'W'}, + 'output4':{0:'batch',2:'H',3:'W'}, + 'output5':{0:'batch',2:'H',3:'W'}, + 'output6':{0:'batch',2:'H',3:'W'}} +dynamic_=[None,dynamic_batch,dynamic_hw,dynamic_batch_hw] + +TRT_LOGGER = trt.Logger() +def pth2onnx(pth_model,onnx_name,input_shape=(1,3,512,512),input_names=['input'],output_names=['output'],dynamix_axis=None): + #pth_model:输入加载权重后的pth模型 + #onnx_name:输出的onnx模型路径 + #input_shape:模型输入的尺寸(建议尺寸) + #input_names:模型输入的名字,list格式,可以有多个输入 + #output_names:模型输入的名字,list格式,可以有多个输出 + #dynamix_axis:字典格式,None-表示静态输入。每一个模型的输入输出都可以定义动态的维度 + # 如dynamic_batch_hw ={'input':{0:'batch',2:'H',3:'W'}, 'output':{0:'batch',2:'H',3:'W'}}, + # 表示input的B,H,W和output的B,H,W是动态尺寸 + print('[I] beg to converting pth to onnx ...... ',dynamix_axis) + input_tensor = torch.ones(input_shape) + if next(pth_model.parameters()).is_cuda: + input_tensor = input_tensor.to('cuda:0') + with torch.no_grad(): + torch.onnx.export(pth_model, + input_tensor, + onnx_name, + opset_version=11, + input_names=input_names, + do_constant_folding=True, + output_names=output_names, + dynamic_axes=dynamix_axis) + onnx_model = onnx.load(onnx_name) + try: + onnx.checker.check_model(onnx_model) + except Exception as e: + print('[Error] model incorrect:',e) + else: + print('[I] conver to onnx over in ', onnx_name) + print('') +def onnx_inference(onnx_input,model_name,outputName=['output0','output1','output2','output3','output4','output5','output6' ]): + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] + print('8'*10, ' line125:',model_name) + #outputName = ['pred_logits', 'pred_points'] + onnx_session = onnxruntime.InferenceSession(model_name,providers=providers) + try: + onnx_output = onnx_session.run(outputName,onnx_input) + + except Exception as e: + onnx_output=None + print(e) + return onnx_output +def onnx2engine(onnx_file_path,engine_file_path,input_shape=[1,3,512,512],half=True,max_batch_size=1,input_profile_shapes=[None,None,None]): + #onnx_file_path:输入的onnx路径 + #engine_file_path:输出的trt模型路径 + #input_shape:默认的模型输入尺寸, 如[1,3,512,512] ,如果是动态的可以为[1,3,-1,-1] + #half:是否使用fp16,默认True + #max_batch_size:最大的bachsize,默认是1 + #input_profile_shapes:动态输入时输入的三个尺寸[最小尺寸,优化尺寸,最大尺寸],此时input_shape一定有-1 + # 如(1,3,512,512),(1,3,1024,1024),(1,3,2048,2048), + builder = trt.Builder(TRT_LOGGER) + network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + config = builder.create_builder_config() + parser = trt.OnnxParser(network,TRT_LOGGER) + runtime = trt.Runtime(TRT_LOGGER) + + # 最大内存占用,一般1G,trt特有的,一切与优化有关,显存溢出需要重新设置 + config.max_workspace_size = 1<<30 #256MB + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + builder.max_batch_size = max_batch_size # 推理的时候要保证batch_size<=max_batch_size + + # parse model file + if not os.path.exists(onnx_file_path): + print(f'onnx file {onnx_file_path} not found,please run torch_2_onnx.py first to generate it') + exit(0) + print(f'Loading ONNX file from path {onnx_file_path}...') + with open(onnx_file_path,'rb') as model: + print('Beginning ONNX file parsing') + if not parser.parse(model.read()): + print('ERROR:Failed to parse the ONNX file') + for error in range(parser.num_errors): + print(parser.get_error(error)) + return None + + # Static input setting + network.get_input(0).shape=input_shape + # Dynamic input setting 动态输入在builder的profile设置 + # 为每个动态输入绑定一个profile + if -1 in input_shape: + profile = builder.create_optimization_profile() + profile.set_shape(network.get_input(0).name,input_profile_shapes[0],input_profile_shapes[1],input_profile_shapes[2] )#最小的尺寸,常用的尺寸,最大的尺寸,推理时候输入需要在这个范围内 + config.add_optimization_profile(profile) + + print('Completed parsing the ONNX file') + print(f'Building an engine from file {onnx_file_path}; this may take a while...') + + t0 = time.time() + engine = builder.build_engine(network,config) + + with open(engine_file_path,'wb') as f: + # f.write(plan) + f.write(engine.serialize()) + t1 = time.time() + print('Completed creating Engine:%s, %.1f'%(engine_file_path,t1-t0)) + + + + + +try: + # Sometimes python2 does not understand FileNotFoundError + FileNotFoundError +except NameError: + FileNotFoundError = IOError + +#EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) + +def GiB(val): + return val * 1 << 30 + + +def add_help(description): + parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + args, _ = parser.parse_known_args() + + +def find_sample_data(description="Runs a TensorRT Python sample", subfolder="", find_files=[]): + ''' + Parses sample arguments. + + Args: + description (str): Description of the sample. + subfolder (str): The subfolder containing data relevant to this sample + find_files (str): A list of filenames to find. Each filename will be replaced with an absolute path. + + Returns: + str: Path of data directory. + ''' + + # Standard command-line arguments for all samples. + kDEFAULT_DATA_ROOT = os.path.join(os.sep, "usr", "src", "tensorrt", "data") + parser = argparse.ArgumentParser(description=description, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-d", "--datadir", help="Location of the TensorRT sample data directory, and any additional data directories.", action="append", default=[kDEFAULT_DATA_ROOT]) + args, _ = parser.parse_known_args() + + def get_data_path(data_dir): + # If the subfolder exists, append it to the path, otherwise use the provided path as-is. + data_path = os.path.join(data_dir, subfolder) + if not os.path.exists(data_path): + print("WARNING: " + data_path + " does not exist. Trying " + data_dir + " instead.") + data_path = data_dir + # Make sure data directory exists. + if not (os.path.exists(data_path)): + print("WARNING: {:} does not exist. Please provide the correct data path with the -d option.".format(data_path)) + return data_path + + data_paths = [get_data_path(data_dir) for data_dir in args.datadir] + return data_paths, locate_files(data_paths, find_files) + +def locate_files(data_paths, filenames): + """ + Locates the specified files in the specified data directories. + If a file exists in multiple data directories, the first directory is used. + + Args: + data_paths (List[str]): The data directories. + filename (List[str]): The names of the files to find. + + Returns: + List[str]: The absolute paths of the files. + + Raises: + FileNotFoundError if a file could not be located. + """ + found_files = [None] * len(filenames) + for data_path in data_paths: + # Find all requested files. + for index, (found, filename) in enumerate(zip(found_files, filenames)): + if not found: + file_path = os.path.abspath(os.path.join(data_path, filename)) + if os.path.exists(file_path): + found_files[index] = file_path + + # Check that all files were found + for f, filename in zip(found_files, filenames): + if not f or not os.path.exists(f): + raise FileNotFoundError("Could not find {:}. Searched in data paths: {:}".format(filename, data_paths)) + return found_files + +# Simple helper data class that's a little nicer to use than a 2-tuple. +class HostDeviceMem(object): + def __init__(self, host_mem, device_mem): + self.host = host_mem + self.device = device_mem + + def __str__(self): + return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device) + + def __repr__(self): + return self.__str__() + +# Allocates all buffers required for an engine, i.e. host/device inputs/outputs. +def allocate_buffers(engine,input_shape,streamFlag=True): + inputs = [] + outputs = [] + bindings = [] + if streamFlag: + stream = cuda.Stream() + else: stream=None + + + for ib,binding in enumerate(engine): + dims = engine.get_binding_shape(binding) + #print(engine.get_binding_name(ib),dims,engine.max_batch_size) + if -1 in dims: + if isinstance(input_shape,list): + dims = input_shape[ib] + else: + dims = input_shape + + # size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size + #size = trt.volume(dims) * engine.max_batch_size + size = trt.volume(dims) + dtype = trt.nptype(engine.get_binding_dtype(binding)) + # Allocate host and device buffers + + host_mem = cuda.pagelocked_empty(size, dtype) + + device_mem = cuda.mem_alloc(host_mem.nbytes) + # Append the device buffer to device bindings. + bindings.append(int(device_mem)) + # Append to the appropriate list. + if engine.binding_is_input(binding): + inputs.append(HostDeviceMem(host_mem, device_mem)) + else: + outputs.append(HostDeviceMem(host_mem, device_mem)) + return inputs, outputs, bindings, stream +# This function is generalized for multiple inputs/outputs. +# inputs and outputs are expected to be lists of HostDeviceMem objects. +def do_inference(context, bindings, inputs, outputs, stream, batch_size=1): + # Transfer input data to the GPU. + [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] + # Run inference. + context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle) + # Transfer predictions back from the GPU. + [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] + # Synchronize the stream + stream.synchronize() + # Return only the host outputs. + return [out.host for out in outputs] + +# This function is generalized for multiple inputs/outputs for full dimension networks. +# inputs and outputs are expected to be lists of HostDeviceMem objects. +def do_inference_v2(context, bindings, inputs, outputs, stream): + # Transfer input data to the GPU. + [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] + # Run inference. + #stream.synchronize() + #context.execute_v2(bindings) # 执行推 + + context.execute_async_v2(bindings=bindings, stream_handle=stream.handle) + + # Transfer predictions back from the GPU. + [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] + # Synchronize the stream + stream.synchronize() + # Return only the host outputs. + return [out.host for out in outputs] + +def trt_inference( img,img_h,img_w,context,inputs,outputs,bindings,stream,input_name = 'input'): + #输入: + #img--np格式,NCHW + #img_h,img_w--输入模型时图像的H,W。动态输入是需要知道。 + #context--外面开辟的trt上下文 + #inputs,outputs,bindings,stream--第一次处理图像时,开辟的内存及其地址绑定到trt的输出 + #input_name--模型输入tensor的名字 + #输出 + #trt_outputs--为list格式,里面的元素是numpy格式 + + origin_inputshape = context.get_tensor_shape( input_name) + + #if origin_inputshape[-1]==-1: + context.set_optimization_profile_async(0,stream.handle) + origin_inputshape[-2],origin_inputshape[-1]=(img_h,img_w) + context.set_input_shape(input_name, (origin_inputshape)) + + + + inputs[0].host = np.ascontiguousarray(img) + trt_outputs = do_inference_v2(context,bindings=bindings,inputs=inputs,outputs=outputs,stream=stream) + return trt_outputs + +# def do_inference_v3(context, bindings, inputs, outputs, stream,h_,w_): +# ''' +# Copy from https://github.com/zhaogangthu/keras-yolo3-ocr-tensorrt/blob/master/tensorRT_yolo3/common.py +# +# ''' +# # Transfer input data to the GPU. +# +# context.set_binding_shape(0, (1, 3, h_, w_)) +# +# [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] +# # Run inference. +# context.execute_async_v2(bindings=bindings, stream_handle=stream.handle) +# # Transfer predictions back from the GPU. +# [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] +# # Synchronize the stream +# stream.synchronize() +# # Return only the host outputs. +# return [out.host for out in outputs] +if __name__=='__main__': + model_path='weights/u2net_portrait.pth' + onnx_name = model_path.replace('.pth','.onnx') + trt_name = model_path.replace('.pth','.engine') + pth_model = U2NET(3,1) + pth_model.load_state_dict(torch.load(model_path)) + + + input_names=['input'] + output_names=['output%d'%(i) for i in range(7)] + dynamix_axis = dynamic_hw + input_shape =(1,3,512,512) + #测试pth转为onnx模型 + #pth2onnx(pth_model,onnx_name,input_shape=input_shape ,input_names=input_names ,output_names=output_names ,dynamix_axis=dynamix_axis ) + + #测试onnx模型转为trt模型 + + input_profile_shapes = [(1,3,512,512),(1,3,1024,1024),(1,3,2048,2048)] + input_shape = [1,3,-1,-1] + half=True + max_batch_size = 1 + onnx2engine(onnx_name,trt_name,input_shape=input_shape,half=half,max_batch_size=max_batch_size,input_profile_shapes=input_profile_shapes) + ''' + with torch.no_grad(): + for i,model_name in enumerate(model_names): + print(f'process model:{model_name}...') + torch.onnx.export(model, + input_tensor, + model_name, + opset_version=11, + input_names=['input'], + output_names=['output0','output1','output2','output3','output4','output5','output6'], + dynamic_axes=dynamic_[i]) + + print(f'onnx model:{model_name} saved successfully...') + + #print('sleep 10s...') + time.sleep(10) + print(f'begin check onnx model:{model_name}...') + + onnx_model = onnx.load(model_name) + try: + onnx.checker.check_model(onnx_model) + except Exception as e: + print('model incorrect') + print(e) + else: + print('model correct') + + print('*'*50) + print('Begin to test...') + case_1 = np.random.rand(1,3,512,512).astype(np.float32) + case_2 = np.random.rand(2,3,512,512).astype(np.float32) + case_3 = np.random.rand(1,3,224,224).astype(np.float32) + cases = [case_1,case_2,case_3] + + providers=['CUDAExecutionProvider', 'CPUExecutionProvider'] + for model_name in model_names: + print('-'*50,model_name) + onnx_session = onnxruntime.InferenceSession(model_name,providers=providers) + for i,case in enumerate(cases): + onnx_input = {'input':case} + try: + onnx_output = onnx_session.run(['output0','output1','output2','output3','output4','output5','output6'],onnx_input)[0] + except Exception as e: + print(f'Input:{i} on model:{model_name} failed') + print(e) + else: + print(f'Input:{i} on model:{model_name} succeed') + ''' diff --git a/stdc.py b/stdc.py new file mode 100644 index 0000000..5eeaf4a --- /dev/null +++ b/stdc.py @@ -0,0 +1,121 @@ +from models.experimental import attempt_load +import tensorrt as trt +import torch +import sys +from segutils.trtUtils import segPreProcess_image,segTrtForward,segPreProcess_image_torch +from segutils.model_stages import BiSeNet_STDC +import time,cv2 +import numpy as np + +class stdcModel(object): + def __init__(self, weights=None, + par={'modelSize':(640,360),'dynamic':False,'nclass':2,'predResize':True,'mean':(0.485, 0.456, 0.406),'std' :(0.229, 0.224, 0.225),'numpy':False, 'RGB_convert_first':True} + ): + + self.par = par + self.device = 'cuda:0' + self.half =True + if 'dynamic' not in par.keys(): + self.dynamic=False + else: self.dynamic=par['dynamic'] + + if weights.endswith('.engine'): + self. infer_type ='trt' + elif weights.endswith('.pth') or weights.endswith('.pt') : + self. infer_type ='pth' + else: + print('#########ERROR:',weights,': no registered inference type, exit') + sys.exit(0) + + if self.infer_type=='trt': + if self.dynamic : + print('####################ERROR##########,STDC动态模型不能采用trt格式########') + + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + self.model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + elif self.infer_type=='pth': + if self.dynamic: modelSize=None + else: modelSize=( self.par['modelSize'][1], self.par['modelSize'][0] ) + self.model = BiSeNet_STDC(backbone='STDCNet813', n_classes=par['seg_nclass'], + use_boundary_2=False, use_boundary_4=False, + use_boundary_8=True, use_boundary_16=False, + use_conv_last=False, + modelSize = modelSize + ) + + self.model.load_state_dict(torch.load(weights, map_location=torch.device(self.device) )) + self.model= self.model.to(self.device) + print('#########加载模型:',weights,' 类型:',self.infer_type) + def preprocess_image(self,image): + image = self.RB_convert(image) + + if self.dynamic: + H,W=image.shape[0:2]; + yscale = self.par['modelSize'][1]/H + xscale = self.par['modelSize'][0]/W + dscale = min(yscale,xscale) + re_size = ( int((dscale*W)//4*4), int( (dscale*H)//4*4 ) ) + else: re_size = self.par['modelSize'] + #print('####line 58:,', re_size,image.shape) + image = cv2.resize(image,re_size, interpolation=cv2.INTER_LINEAR) + + image = image.astype(np.float32) + image /= 255.0 + + image[:, :, 0] -= self.par['mean'][0] + image[:, :, 1] -= self.par['mean'][1] + image[:, :, 2] -= self.par['mean'][2] + + image[:, :, 0] /= self.par['std'][0] + image[:, :, 1] /= self.par['std'][1] + image[:, :, 2] /= self.par['std'][2] + + image = np.transpose(image, (2, 0, 1)) + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + if self.device != 'cpu': + image = image.to(self.device) + + return image + + def RB_convert(self,image): + image_c = image.copy() + image_c[:,:,0] = image[:,:,2] + image_c[:,:,2] = image[:,:,0] + return image_c + + + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + + def eval(self,image): + time0 = time.time() + imageH, imageW, _ = image.shape + + img = self.preprocess_image(image) + time1 = time.time() + + if self.infer_type=='trt': + pred=segTrtForward(self.model,[img]) + + elif self.infer_type=='pth': + self.model.eval() + with torch.no_grad(): + pred = self.model(img) + + time2 = time.time() + pred=torch.argmax(pred,dim=1).cpu().numpy()[0] + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-cpu-argmax:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + return pred,outstr + + + + + + \ No newline at end of file diff --git a/trackUtils/sort.py b/trackUtils/sort.py new file mode 100644 index 0000000..1d7dcf3 --- /dev/null +++ b/trackUtils/sort.py @@ -0,0 +1,528 @@ +from __future__ import print_function + +import os +import numpy as np +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.patches as patches +from skimage import io + +import glob +import time,cv2 +import argparse +from filterpy.kalman import KalmanFilter +from PIL import Image,ImageDraw,ImageFont + +np.random.seed(0) +''' +def plot_one_box_ForTrack(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) +''' +def plot_one_box_ForTrack(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) + + draw.rectangle([(box[0],box[1]),(box[2],box[3])], width=line_thickness, outline=tuple(color)) # plot + + + if label: + tmax = min(round(max(im.size) / 40),20) + fontsize = max(tmax, 12) + font = ImageFont.truetype("../AIlib2/conf/platech.ttf", fontsize,encoding='utf-8') + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + + im_array = np.asarray(im) + + return im_array + +def drawBoxTraceSimplied(track_det_result,iiframe, img_draw,rainbows=None,boxFlag=True,traceFlag=True,names=[]): + + boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ] + + if boxFlag: + ###在某一帧上,画上检测框 + for box in boxes_oneFrame: + x0,y0,x1,y1,conf,cls = box[0:6] + + #cv2.rectangle(img_draw, ( int(x0), int(y0) ), ( int(x1), int(y1) ), (255,0,20), 2) + if len(names)==0: + txtstring='%d:%.2f'%(cls,conf) + else: txtstring='%s:%.2f'%(names[int(cls)],conf) + img_draw = plot_one_box_ForTrack( box[0:4], img_draw, color=rainbows[ int(cls)], label=txtstring, line_thickness=3) + + if traceFlag: + ###在某一帧上,画上轨迹 + track_ids = boxes_oneFrame[:,7].tolist() + boxes_before_oneFrame = track_det_result[ track_det_result[:,6]<=iiframe ] + for trackId in track_ids: + boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,7]==trackId] + xcs = (boxes_before_oneFrame_oneId[:,0]+boxes_before_oneFrame_oneId[:,2])//2 + ycs = (boxes_before_oneFrame_oneId[:,1]+boxes_before_oneFrame_oneId[:,3])//2 + + [cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ), + ( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2) + for i,_ in enumerate(xcs) if i < len(xcs)-1 ] + + return img_draw + + +def moving_average_wang(interval, windowsize): + outNum = interval.copy() + if windowsize==1: + return outNum + assert windowsize%2!=0 + window = np.ones(int(windowsize)) / float(windowsize) + re = np.convolve(interval, window, 'valid') + cnt = int((windowsize - 1)/2+0.5) + total = len(interval) + outNum = np.zeros( (total,),dtype=np.float32 ) + outNum[0]=interval[0] + outNum[-1]=interval[-1] + for i in range(1,cnt): + outNum[i] = np.mean( interval[0:2*i-1] ) + outNum[-i-1] = np.mean( interval[-2*i-1:] ) + #print('###line113:',outNum.shape,re.shape,cnt,windowsize) + outNum[cnt:-cnt]=re[:] + return outNum + +def track_draw_trace(tracks,im0): + for track in tracks: + [cv2.line(im0, (int(track.centroidarr[i][0]), + int(track.centroidarr[i][1])), + (int(track.centroidarr[i+1][0]), + int(track.centroidarr[i+1][1])), + (255,0,0), thickness=2) + for i,_ in enumerate(track.centroidarr) + if i < len(track.centroidarr)-1 ] + + + return im0 + +"""Function to Draw Bounding boxes""" +def track_draw_boxes(img, bbox, identities=None, categories=None, names=None ): + for i, box in enumerate(bbox): + #print('####line33 sort.py:',box) + x1, y1, x2, y2 = [int(x) for x in box] + + cat = int(categories[i]) if categories is not None else 0 + id = int(identities[i]) if identities is not None else 0 + data = (int((box[0]+box[2])/2),(int((box[1]+box[3])/2))) + label = str(id) + ":"+ names[cat] + (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1) + cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,20), 2) + cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1) + cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX, + 0.6, [255, 255, 255], 1) + # cv2.circle(img, data, 6, color,-1) #centroid of box + + + return img + +def track_draw_all_boxes(tracked_dets,im0,names): + if len(tracked_dets)>0: + bbox_xyxy = tracked_dets[:,:4] + identities = tracked_dets[:, 8] + categories = tracked_dets[:, 4] + track_draw_boxes(im0, bbox_xyxy, identities, categories, names) + return im0 +####轨迹采用跟踪链中的结果。box采用track.update后的结果。 +def track_draw_boxAndTrace(tracked_dets,tracks,im0,names): + track_draw_all_boxes(tracked_dets,im0,names) + track_draw_trace(tracks,im0) + return im0 + +####轨迹和box都采用跟踪链中的结果 +def track_draw_trace_boxes(tracks,im0,names): + for track in tracks: + [cv2.line(im0, (int(track.centroidarr[i][0]), + int(track.centroidarr[i][1])), + (int(track.centroidarr[i+1][0]), + int(track.centroidarr[i+1][1])), + (255,0,0), thickness=2) + for i,_ in enumerate(track.centroidarr) + if i < len(track.centroidarr)-1 ] + bbox_xyxy = track.bbox_history[-1][0:4] + identities,categories = track.id , track.detclass + #print('####sort.py line74:',bbox_xyxy) + track_draw_boxes(im0, [bbox_xyxy], [identities], [categories], names) + + + return im0 + +def linear_assignment(cost_matrix): + try: + import lap #linear assignment problem solver + _, x, y = lap.lapjv(cost_matrix, extend_cost = True) + return np.array([[y[i],i] for i in x if i>=0]) + except ImportError: + from scipy.optimize import linear_sum_assignment + x,y = linear_sum_assignment(cost_matrix) + return np.array(list(zip(x,y))) + + +"""From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]""" +def iou_batch(bb_test, bb_gt): + + bb_gt = np.expand_dims(bb_gt, 0) + bb_test = np.expand_dims(bb_test, 1) + + xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0]) + yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) + xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) + yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) + + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh) + return(o) + + +"""Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio""" +def convert_bbox_to_z(bbox): + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w/2. + y = bbox[1] + h/2. + s = w * h + #scale is just area + r = w / float(h) + return np.array([x, y, s, r]).reshape((4, 1)) + + +"""Takes a bounding box in the centre form [x,y,s,r] and returns it in the form + [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right""" +def convert_x_to_bbox(x, score=None): + w = np.sqrt(x[2] * x[3]) + h = x[2] / w + if(score==None): + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4)) + else: + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) + +"""This class represents the internal state of individual tracked objects observed as bbox.""" +class KalmanBoxTracker(object): + + count = 0 + def __init__(self, bbox): + """ + Initialize a tracker using initial bounding box + + Parameter 'bbox' must have 'detected class' int number at the -1 position. + """ + self.kf = KalmanFilter(dim_x=7, dim_z=4) + self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]]) + self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]]) + + self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes') + self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities + self.kf.P *= 10. + self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things) + self.kf.Q[4:,4:] *= 0.5 + + self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR + self.time_since_update = 0 + self.id = KalmanBoxTracker.count + KalmanBoxTracker.count += 1 + self.history = [] + self.hits = 0 + self.hit_streak = 0 + self.age = 0 + self.frames = [] + self.centroidarr = [] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + #keep yolov5 detected class information + self.detclass = bbox[5] + self.frames.append( bbox[6] ) ###new added for interpolation + # If we want to store bbox + + self.bbox_history = [bbox] + + def update(self, bbox): + """ + Updates the state vector with observed bbox + """ + self.time_since_update = 0 + self.history = [] + self.hits += 1 + self.hit_streak += 1 + self.kf.update(convert_bbox_to_z(bbox)) + self.detclass = bbox[5] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + self.frames.append( bbox[6] ) ###new added for interpolation + self.bbox_history.append(bbox) + + def predict(self): + """ + Advances the state vector and returns the predicted bounding box estimate + """ + if((self.kf.x[6]+self.kf.x[2])<=0): + self.kf.x[6] *= 0.0 + self.kf.predict() + self.age += 1 + if(self.time_since_update>0): + self.hit_streak = 0 + self.time_since_update += 1 + self.history.append(convert_x_to_bbox(self.kf.x)) + # bbox=self.history[-1] + # CX = (bbox[0]+bbox[2])/2 + # CY = (bbox[1]+bbox[3])/2 + # self.centroidarr.append((CX,CY)) + + return self.history[-1] + + + def get_state(self): + """ + Returns the current bounding box estimate + # test + arr1 = np.array([[1,2,3,4]]) + arr2 = np.array([0]) + arr3 = np.expand_dims(arr2, 0) + np.concatenate((arr1,arr3), axis=1) + """ + arr_detclass = np.expand_dims(np.array([self.detclass]), 0) + + arr_u_dot = np.expand_dims(self.kf.x[4],0) + arr_v_dot = np.expand_dims(self.kf.x[5],0) + arr_s_dot = np.expand_dims(self.kf.x[6],0) + + return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1) + +def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3): + """ + Assigns detections to tracked object (both represented as bounding boxes) + Returns 3 lists of + 1. matches, + 2. unmatched_detections + 3. unmatched_trackers + """ + if(len(trackers)==0): + return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) + + iou_matrix = iou_batch(detections, trackers) + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() ==1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-iou_matrix) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + #filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]]= self.min_hits or self.frame_count <= self.min_hits): + ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value + i -= 1 + #remove dead tracklet + # 跟踪失败或离开画面的目标从卡尔曼跟踪器中删除 + if(trk.time_since_update >self.max_age): + self.trackers.pop(i) #pop按键或索引位置删除对应元素 + # 返回当前画面中所有目标的box与id,以二维矩阵形式返回 + if(len(ret) > 0): + #print('####sort.py line282:',len(ret),ret[0].shape, (np.concatenate(ret)).shape) + return np.concatenate(ret) + return np.empty((0,6)) + +def parse_args(): + """Parse input arguments.""" + parser = argparse.ArgumentParser(description='SORT demo') + parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true') + parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data') + parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train') + parser.add_argument("--max_age", + help="Maximum number of frames to keep alive a track without associated detections.", + type=int, default=1) + parser.add_argument("--min_hits", + help="Minimum number of associated detections before track is initialised.", + type=int, default=3) + parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3) + args = parser.parse_args() + return args + +if __name__ == '__main__': + # all train + args = parse_args() + display = args.display + phase = args.phase + total_time = 0.0 + total_frames = 0 + colours = np.random.rand(32, 3) #used only for display + if(display): + if not os.path.exists('mot_benchmark'): + print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n') + exit() + plt.ion() + fig = plt.figure() + ax1 = fig.add_subplot(111, aspect='equal') + + if not os.path.exists('output'): + os.makedirs('output') + pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt') + for seq_dets_fn in glob.glob(pattern): + mot_tracker = Sort(max_age=args.max_age, + min_hits=args.min_hits, + iou_threshold=args.iou_threshold) #create instance of the SORT tracker + seq_dets = np.loadtxt(seq_dets_fn, delimiter=',') + seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0] + + with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file: + print("Processing %s."%(seq)) + for frame in range(int(seq_dets[:,0].max())): + frame += 1 #detection and frame numbers begin at 1 + dets = seq_dets[seq_dets[:, 0]==frame, 2:7] + dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] + total_frames += 1 + + if(display): + fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame)) + im =io.imread(fn) + ax1.imshow(im) + plt.title(seq + ' Tracked Targets') + + start_time = time.time() + trackers = mot_tracker.update(dets) + cycle_time = time.time() - start_time + total_time += cycle_time + + for d in trackers: + print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file) + if(display): + d = d.astype(np.int32) + ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:])) + + if(display): + fig.canvas.flush_events() + plt.draw() + ax1.cla() + + print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time)) + + if(display): + print("Note: to get real runtime results run without the option: --display") diff --git a/trackUtils/sort_obb.py b/trackUtils/sort_obb.py new file mode 100644 index 0000000..b6488fc --- /dev/null +++ b/trackUtils/sort_obb.py @@ -0,0 +1,539 @@ +from __future__ import print_function + +import os +import numpy as np +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.patches as patches +from skimage import io + +import glob +import time,cv2 +import argparse +from filterpy.kalman import KalmanFilter +from PIL import Image,ImageDraw,ImageFont +np.random.seed(0) + +def obbTohbb(obb): + obbarray=np.array(obb) + x0=np.min(obbarray[:,0]) + x1=np.max(obbarray[:,0]) + y0=np.min(obbarray[:,1]) + y1=np.max(obbarray[:,1]) + return [x0,y0,x1,y1] +''' +def plot_one_box_ForTrack(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) +''' +def plot_one_box_ForTrack(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) + + draw.rectangle([(box[0],box[1]),(box[2],box[3])], width=line_thickness, outline=tuple(color)) # plot + + if label: + tmax = min(round(max(im.size) / 40),20) + fontsize = max(tmax, 12) + font = ImageFont.truetype("../AIlib2/conf/platech.ttf", fontsize,encoding='utf-8') + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + + im_array = np.asarray(im) + + return im_array + +def drawBoxTraceSimplied(track_det_result,iiframe, img_draw,rainbows=None,boxFlag=True,traceFlag=True,names=[]): + + boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ] + + if boxFlag: + ###在某一帧上,画上检测框 + for box in boxes_oneFrame: + x0,y0,x1,y1,conf,cls = box[0:6] + #cv2.rectangle(img_draw, ( int(x0), int(y0) ), ( int(x1), int(y1) ), (255,0,20), 2) + if len(names)==0: + txtstring='%d:%.2f'%(cls,conf) + else: txtstring='%s:%.2f'%(names[int(cls)],conf) + img_draw=plot_one_box_ForTrack( box[0:4], img_draw, color=rainbows[ int(cls)], label=txtstring, line_thickness=3) + + if traceFlag: + ###在某一帧上,画上轨迹 + track_ids = boxes_oneFrame[:,7].tolist() + boxes_before_oneFrame = track_det_result[ track_det_result[:,6]<=iiframe ] + for trackId in track_ids: + boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,7]==trackId] + xcs = (boxes_before_oneFrame_oneId[:,0]+boxes_before_oneFrame_oneId[:,2])//2 + ycs = (boxes_before_oneFrame_oneId[:,1]+boxes_before_oneFrame_oneId[:,3])//2 + + [cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ), + ( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2) + for i,_ in enumerate(xcs) if i < len(xcs)-1 ] + + return img_draw + + +def moving_average_wang(interval, windowsize): + outNum = interval.copy() + if windowsize==1: + return outNum + assert windowsize%2!=0 + window = np.ones(int(windowsize)) / float(windowsize) + re = np.convolve(interval, window, 'valid') + cnt = int((windowsize - 1)/2+0.5) + total = len(interval) + outNum = np.zeros( (total,),dtype=np.float32 ) + outNum[0]=interval[0] + outNum[-1]=interval[-1] + for i in range(1,cnt): + outNum[i] = np.mean( interval[0:2*i-1] ) + outNum[-i-1] = np.mean( interval[-2*i-1:] ) + #print('###line113:',outNum.shape,re.shape,cnt,windowsize) + outNum[cnt:-cnt]=re[:] + return outNum + +def track_draw_trace(tracks,im0): + for track in tracks: + [cv2.line(im0, (int(track.centroidarr[i][0]), + int(track.centroidarr[i][1])), + (int(track.centroidarr[i+1][0]), + int(track.centroidarr[i+1][1])), + (255,0,0), thickness=2) + for i,_ in enumerate(track.centroidarr) + if i < len(track.centroidarr)-1 ] + + + return im0 + +"""Function to Draw Bounding boxes""" +def track_draw_boxes(img, bbox, identities=None, categories=None, names=None ): + for i, box in enumerate(bbox): + #print('####line33 sort.py:',box) + x1, y1, x2, y2 = [int(x) for x in box] + + cat = int(categories[i]) if categories is not None else 0 + id = int(identities[i]) if identities is not None else 0 + data = (int((box[0]+box[2])/2),(int((box[1]+box[3])/2))) + label = str(id) + ":"+ names[cat] + (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1) + cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,20), 2) + cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1) + cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX, + 0.6, [255, 255, 255], 1) + # cv2.circle(img, data, 6, color,-1) #centroid of box + + + return img + +def track_draw_all_boxes(tracked_dets,im0,names): + if len(tracked_dets)>0: + bbox_xyxy = tracked_dets[:,:4] + identities = tracked_dets[:, 8] + categories = tracked_dets[:, 4] + track_draw_boxes(im0, bbox_xyxy, identities, categories, names) + return im0 +####轨迹采用跟踪链中的结果。box采用track.update后的结果。 +def track_draw_boxAndTrace(tracked_dets,tracks,im0,names): + track_draw_all_boxes(tracked_dets,im0,names) + track_draw_trace(tracks,im0) + return im0 + +####轨迹和box都采用跟踪链中的结果 +def track_draw_trace_boxes(tracks,im0,names): + for track in tracks: + [cv2.line(im0, (int(track.centroidarr[i][0]), + int(track.centroidarr[i][1])), + (int(track.centroidarr[i+1][0]), + int(track.centroidarr[i+1][1])), + (255,0,0), thickness=2) + for i,_ in enumerate(track.centroidarr) + if i < len(track.centroidarr)-1 ] + bbox_xyxy = track.bbox_history[-1][0:4] + identities,categories = track.id , track.detclass + #print('####sort.py line74:',bbox_xyxy) + track_draw_boxes(im0, [bbox_xyxy], [identities], [categories], names) + + + return im0 + +def linear_assignment(cost_matrix): + try: + import lap #linear assignment problem solver + _, x, y = lap.lapjv(cost_matrix, extend_cost = True) + return np.array([[y[i],i] for i in x if i>=0]) + except ImportError: + from scipy.optimize import linear_sum_assignment + x,y = linear_sum_assignment(cost_matrix) + return np.array(list(zip(x,y))) + + +"""From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]""" +def iou_batch(bb_test, bb_gt): + + bb_gt = np.expand_dims(bb_gt, 0) + bb_test = np.expand_dims(bb_test, 1) + + xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0]) + yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) + xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) + yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) + w = np.maximum(0., xx2 - xx1) + h = np.maximum(0., yy2 - yy1) + wh = w * h + o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) + + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh) + return(o) + + +"""Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio""" +def convert_bbox_to_z(bbox): + w = bbox[2] - bbox[0] + h = bbox[3] - bbox[1] + x = bbox[0] + w/2. + y = bbox[1] + h/2. + s = w * h + #scale is just area + r = w / float(h) + return np.array([x, y, s, r]).reshape((4, 1)) + + +"""Takes a bounding box in the centre form [x,y,s,r] and returns it in the form + [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right""" +def convert_x_to_bbox(x, score=None): + w = np.sqrt(x[2] * x[3]) + h = x[2] / w + if(score==None): + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4)) + else: + return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5)) + +"""This class represents the internal state of individual tracked objects observed as bbox.""" +class KalmanBoxTracker(object): + + count = 0 + def __init__(self, bbox,obb=None): + """ + Initialize a tracker using initial bounding box + + Parameter 'bbox' must have 'detected class' int number at the -1 position. + """ + self.kf = KalmanFilter(dim_x=7, dim_z=4) + self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]]) + self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]]) + + self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes') + self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities + self.kf.P *= 10. + self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things) + self.kf.Q[4:,4:] *= 0.5 + + self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR + self.time_since_update = 0 + self.id = KalmanBoxTracker.count + KalmanBoxTracker.count += 1 + self.history = [] + self.hits = 0 + self.hit_streak = 0 + self.age = 0 + self.frames = [] + self.centroidarr = [] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + #keep yolov5 detected class information + self.detclass = bbox[5] + self.frames.append( bbox[6] ) ###new added for interpolation + # If we want to store bbox + #print(type() + if obb is not None: self.obb_history = [obb] + self.bbox_history = [bbox] + + def update(self, bbox,obb=None): + """ + Updates the state vector with observed bbox + """ + self.time_since_update = 0 + self.history = [] + self.hits += 1 + self.hit_streak += 1 + self.kf.update(convert_bbox_to_z(bbox)) + self.detclass = bbox[5] + CX = (bbox[0]+bbox[2])//2 + CY = (bbox[1]+bbox[3])//2 + self.centroidarr.append((CX,CY)) + self.frames.append( bbox[6] ) ###new added for interpolation + self.bbox_history.append(bbox) + if obb is not None: self.obb_history.append( obb) + def predict(self): + """ + Advances the state vector and returns the predicted bounding box estimate + """ + if((self.kf.x[6]+self.kf.x[2])<=0): + self.kf.x[6] *= 0.0 + self.kf.predict() + self.age += 1 + if(self.time_since_update>0): + self.hit_streak = 0 + self.time_since_update += 1 + self.history.append(convert_x_to_bbox(self.kf.x)) + # bbox=self.history[-1] + # CX = (bbox[0]+bbox[2])/2 + # CY = (bbox[1]+bbox[3])/2 + # self.centroidarr.append((CX,CY)) + + return self.history[-1] + + + def get_state(self): + """ + Returns the current bounding box estimate + # test + arr1 = np.array([[1,2,3,4]]) + arr2 = np.array([0]) + arr3 = np.expand_dims(arr2, 0) + np.concatenate((arr1,arr3), axis=1) + """ + arr_detclass = np.expand_dims(np.array([self.detclass]), 0) + + arr_u_dot = np.expand_dims(self.kf.x[4],0) + arr_v_dot = np.expand_dims(self.kf.x[5],0) + arr_s_dot = np.expand_dims(self.kf.x[6],0) + + return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1) + +def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3): + """ + Assigns detections to tracked object (both represented as bounding boxes) + Returns 3 lists of + 1. matches, + 2. unmatched_detections + 3. unmatched_trackers + """ + if(len(trackers)==0): + return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int) + + iou_matrix = iou_batch(detections, trackers) + + if min(iou_matrix.shape) > 0: + a = (iou_matrix > iou_threshold).astype(np.int32) + if a.sum(1).max() == 1 and a.sum(0).max() ==1: + matched_indices = np.stack(np.where(a), axis=1) + else: + matched_indices = linear_assignment(-iou_matrix) + else: + matched_indices = np.empty(shape=(0,2)) + + unmatched_detections = [] + for d, det in enumerate(detections): + if(d not in matched_indices[:,0]): + unmatched_detections.append(d) + + unmatched_trackers = [] + for t, trk in enumerate(trackers): + if(t not in matched_indices[:,1]): + unmatched_trackers.append(t) + + #filter out matched with low IOU + matches = [] + for m in matched_indices: + if(iou_matrix[m[0], m[1]]= self.min_hits or self.frame_count <= self.min_hits): + ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value + i -= 1 + #remove dead tracklet + # 跟踪失败或离开画面的目标从卡尔曼跟踪器中删除 + if(trk.time_since_update >self.max_age): + self.trackers.pop(i) #pop按键或索引位置删除对应元素 + # 返回当前画面中所有目标的box与id,以二维矩阵形式返回 + if(len(ret) > 0): + #print('####sort.py line282:',len(ret),ret[0].shape, (np.concatenate(ret)).shape) + return np.concatenate(ret) + return np.empty((0,6)) + +def parse_args(): + """Parse input arguments.""" + parser = argparse.ArgumentParser(description='SORT demo') + parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true') + parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data') + parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train') + parser.add_argument("--max_age", + help="Maximum number of frames to keep alive a track without associated detections.", + type=int, default=1) + parser.add_argument("--min_hits", + help="Minimum number of associated detections before track is initialised.", + type=int, default=3) + parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3) + args = parser.parse_args() + return args + +if __name__ == '__main__': + # all train + args = parse_args() + display = args.display + phase = args.phase + total_time = 0.0 + total_frames = 0 + colours = np.random.rand(32, 3) #used only for display + if(display): + if not os.path.exists('mot_benchmark'): + print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n') + exit() + plt.ion() + fig = plt.figure() + ax1 = fig.add_subplot(111, aspect='equal') + + if not os.path.exists('output'): + os.makedirs('output') + pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt') + for seq_dets_fn in glob.glob(pattern): + mot_tracker = Sort(max_age=args.max_age, + min_hits=args.min_hits, + iou_threshold=args.iou_threshold) #create instance of the SORT tracker + seq_dets = np.loadtxt(seq_dets_fn, delimiter=',') + seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0] + + with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file: + print("Processing %s."%(seq)) + for frame in range(int(seq_dets[:,0].max())): + frame += 1 #detection and frame numbers begin at 1 + dets = seq_dets[seq_dets[:, 0]==frame, 2:7] + dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2] + total_frames += 1 + + if(display): + fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame)) + im =io.imread(fn) + ax1.imshow(im) + plt.title(seq + ' Tracked Targets') + + start_time = time.time() + trackers = mot_tracker.update(dets) + cycle_time = time.time() - start_time + total_time += cycle_time + + for d in trackers: + print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file) + if(display): + d = d.astype(np.int32) + ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:])) + + if(display): + fig.canvas.flush_events() + plt.draw() + ax1.cla() + + print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time)) + + if(display): + print("Note: to get real runtime results run without the option: --display") diff --git a/utils/SendLog/platformQueryOfftask.json b/utils/SendLog/platformQueryOfftask.json new file mode 100644 index 0000000..70aadd1 --- /dev/null +++ b/utils/SendLog/platformQueryOfftask.json @@ -0,0 +1 @@ +{"code": 0, "msg": "操作成功", "data": []} \ No newline at end of file diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000..aa3ddf0 --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..5777746 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,160 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 0000000..faad8d2 --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 0000000..890606b --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/utils/datasets.py b/utils/datasets.py new file mode 100644 index 0000000..ed44569 --- /dev/null +++ b/utils/datasets.py @@ -0,0 +1,1074 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): # index, source + assert i==0 + # Start thread to read frames from video stream + print(f'{i + 1}/{n}: {s}... ', end='') + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.cap = cap + _, self.imgs[i] = cap.read() # guarantee first frame + + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') + + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + frames=2 + # Read next stream frame in a daemon thread + n = 0 + iframe=0 + while cap.isOpened(): + n += 1 + _, self.imgs[index] = cap.read() + iframe +=1 + '''cap.grab() + if n == frames: # read every 4th frame + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 + n = 0''' + #print('###sleep:%.1f ms ,index:%d ,n:%d, iframe:%d'%(1/self.fps*1000,index,n,iframe) ) + #time.sleep(1 / self.fps) # wait time + return self.imgs + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + #img0 = self.imgs.copy() + + img0 = self.update(0,self.cap).copy() + if not isinstance(img0[0],np.ndarray): + #print('####video stream :%s error or video ends#####',self.sources) + return False, None, None, None + #if cv2.waitKey(1) == ord('q'): # q to quit + # cv2.destroyAllWindows() + # raise StopIteration + + # Letterbox + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.close() + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit('../coco128') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md new file mode 100644 index 0000000..0cdc51b --- /dev/null +++ b/utils/flask_rest_api/README.md @@ -0,0 +1,51 @@ +# Flask REST API +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +``` + +The model inference results are returned: + +```shell +[{'class': 0, + 'confidence': 0.8197850585, + 'name': 'person', + 'xmax': 1159.1403808594, + 'xmin': 750.912902832, + 'ymax': 711.2583007812, + 'ymin': 44.0350036621}, + {'class': 0, + 'confidence': 0.5667674541, + 'name': 'person', + 'xmax': 1065.5523681641, + 'xmin': 116.0448303223, + 'ymax': 713.8904418945, + 'ymin': 198.4603881836}, + {'class': 27, + 'confidence': 0.5661227107, + 'name': 'tie', + 'xmax': 516.7975463867, + 'xmin': 416.6880187988, + 'ymax': 717.0524902344, + 'ymin': 429.2020568848}] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py new file mode 100644 index 0000000..ff21f30 --- /dev/null +++ b/utils/flask_rest_api/example_request.py @@ -0,0 +1,13 @@ +"""Perform test request""" +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +TEST_IMAGE = "zidane.jpg" + +image_data = open(TEST_IMAGE, "rb").read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py new file mode 100644 index 0000000..9d88f61 --- /dev/null +++ b/utils/flask_rest_api/restapi.py @@ -0,0 +1,38 @@ +""" +Run a rest API exposing the yolov5s object detection model +""" +import argparse +import io + +import torch +from PIL import Image +from flask import Flask, request + +app = Flask(__name__) + +DETECTION_URL = "/v1/object-detection/yolov5s" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(): + if not request.method == "POST": + return + + if request.files.get("image"): + image_file = request.files["image"] + image_bytes = image_file.read() + + img = Image.open(io.BytesIO(image_bytes)) + + results = model(img, size=640) + data = results.pandas().xyxy[0].to_json(orient="records") + return data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + args = parser.parse_args() + + model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache + app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000..fd1d11a --- /dev/null +++ b/utils/general.py @@ -0,0 +1,646 @@ +# YOLOv5 general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") + print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = (prediction[..., 4] > conf_thres) & ( prediction[..., 4] < 1.0000001 ) # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output +def overlap_box_suppression(prediction, ovlap_thres = 0.6): + """Runs overlap_box_suppression on inference results + delete the box that overlap of boxes bigger than ovlap_thres + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + def box_iob(box1, box2): + def box_area(box): + return (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + + area1 = box_area(box1) # (N,) + area2 = box_area(box2) # (M,) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] # N中一个和M个比较; + rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] + wh = (rb - lt).clamp(min=0) #小于0的为0 clamp 钳;夹钳; + inter = wh[:, :, 0] * wh[:, :, 1] + + return torch.squeeze(inter / area1), torch.squeeze(inter / area2) + + output = [torch.zeros((0, 6), device=prediction[0].device)] * len(prediction) + for i, x in enumerate(prediction): + keep = [] # 最终保留的结果, 在boxes中对应的索引; + boxes = x[:, 0:4] + scores = x[:, 4] + cls = x[:, 5] + idxs = scores.argsort() + while idxs.numel() > 0: + keep_idx = idxs[-1] + keep_box = boxes[keep_idx][None, ] # [1, 4] + keep.append(keep_idx) + if idxs.size(0) == 1: + break + idxs = idxs[:-1] # 将得分最大框 从索引中删除; 剩余索引对应的框 和 得分最大框 计算iob; + other_boxes = boxes[idxs] + this_cls = cls[keep_idx] + other_cls = cls[idxs] + iobs1, iobs2 = box_iob(keep_box, other_boxes) # 一个框和其余框比较 1XM + idxs = idxs[((iobs1 <= ovlap_thres) & (iobs2 <= ovlap_thres)) | (other_cls != this_cls)] + keep = idxs.new(keep) # Tensor + output[i] = x[keep] + return output + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/utils/get_offline_url.py b/utils/get_offline_url.py new file mode 100644 index 0000000..934e367 --- /dev/null +++ b/utils/get_offline_url.py @@ -0,0 +1,106 @@ +from PIL import Image +import numpy as np +import cv2 +import base64 +import io,os +import requests +import time,json +import string,random +import glob +##for CeKanYuan +def get_offlineUrls(taskUrl,offlineFile,jsonfile='SendLog/platformQueryOfftask.json'): + with open(offlineFile,'r') as fp: + lines=fp.readlines() + doneCodes=[line.strip().split(' ')[2] for line in lines] + try: + res = requests.get(taskUrl,timeout=10).json() + offlines=res['data'] ##offlines[0]['code'],offlines[0]['videoUrl'] + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###line43 %s read taskUrl:%s error:%s '%(timestr,taskUrl,ee)) + offlines=[] + outOfflines=[] + for off in offlines: + off['port']=1935; + off.update({'name':'off-' +off.pop("code")}) + if off['name'] in doneCodes: + continue + off.update({'url': off.pop("videoUrl")}) + outOfflines.append(off) + #off['url']=off['videoUrl'] + + return outOfflines +def platurlToJsonfile(taskUrl,jsonfile='SendLog/platformQuery.json'): + try: + res = requests.get(taskUrl,timeout=10).json() + offlines=res['data'] ##offlines[0]['code'],offlines[0]['videoUrl'] + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###line43 %s read taskUrl:%s error:%s '%(timestr,taskUrl,ee)) + +def get_websource_fromTxt(txtfile): + with open(txtfile,'r') as fp: + lines = fp.readlines() + sources=[ ] + for line in lines: + sous={} + try: + sps = line.strip().split(' ') + sous['url']=sps[0];sous['port']=sps[1] + + #webs.append(sps[0]) + if 'rtmp' in sps[0]: + name = sps[0].split('/')[4] + + else: + name = sps[0][-3:] + sous['name']='live-'+name.replace('_','') + sous['port']=sps[1] + sources.append(sous) + except Exception as ee: + + print('####format error : %s, line:%s , in file:%s#####'%(ee,line,txtfile)) + assert len(sources)>0 + return sources + +def update_websource_offAndLive(platform_query_url,sourceFile,offlineFile,jsonfile='SendLog/platformQuery.json'): + + #platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo' + txtSource=get_websource_fromTxt(sourceFile) + try: + res = requests.get(platform_query_url,timeout=10).json() + questionUrl = res['data']['questionUrl'] ###直播流时,问题图片的推送地址 + offlineUrl = res['data']['offlineUrl'] ###http离线视频时,问题图片的推送地址 + taskUrl = res['data']['taskUrl'] ###http离线视频时,离线视频存放的地址 + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('######line83: %s: file:geturlPlatform: error %s ,url:%s #####'%(timestr,ee,platform_query_url)) + taskUrl='http://121.40.249.52:9050/api/analyse/getAiInspectionList' + if taskUrl: + offlines=get_offlineUrls(taskUrl,offlineFile) + txtSource.extend(offlines) + #[{'url': 'rtmp://demoplay.yunhengzhizao.cn/live/THSA_HD5M', 'port': '1935', 'name': 'live-THSAHD5M'}] + outlist=[] + for sourss in txtSource : + source_url = sourss['url'] + vid = cv2.VideoCapture(source_url) + if vid.isOpened(): + outlist.append( sourss ) + return outlist + #print('##line65:',txtSource) + + + +if __name__=='__main__': + platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo' + sourceFile='../config/source.txt' + offlineFile='../mintors/offlines/doneCodes.txt' + jsonfile='../SendLog/platformQuery.json' + txtSource=update_websource_offAndLive(platform_query_url,sourceFile,offlineFile,jsonfile=jsonfile) + print(txtSource) diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..5fcc305 --- /dev/null +++ b/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==18.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..ac29d10 --- /dev/null +++ b/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/utils/google_utils.py b/utils/google_utils.py new file mode 100644 index 0000000..0a7ca3b --- /dev/null +++ b/utils/google_utils.py @@ -0,0 +1,122 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='ultralytics/yolov5'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000..9e78df1 --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,216 @@ +# Loss functions + +import torch +import torch.nn as nn + +from utils.general import bbox_iou +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLoss, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..666b8c7 --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,223 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..907091b --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,641 @@ +# Plotting utils + +import glob +import math +import os,sys +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont +from scipy.signal import butter, filtfilt,savgol_filter + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +#matplotlib.use('Agg') # for writing to files only + +def smooth_outline(contours,p1,p2): + arcontours=np.array(contours) + coors_x=arcontours[0,:,0,0] + coors_y=arcontours[0,:,0,1] + coors_x_smooth= savgol_filter(coors_x,p1,p2) + coors_y_smooth= savgol_filter(coors_y,p1,p2) + arcontours[0,:,0,0] = coors_x_smooth + arcontours[0,:,0,1] = coors_y_smooth + return arcontours +def smooth_outline_auto(contours): + cnt = len(contours[0]) + p1 = int(cnt/12)*2+1 + p2 =3 + if p10 + return webs,ports,streamNames + + +def get_label_array( color=None, label=None,outfontsize=None,fontpath="conf/platech.ttf"): + + # Plots one bounding box on image 'im' using PIL + fontsize = outfontsize + font = ImageFont.truetype(fontpath, fontsize,encoding='utf-8') + + txt_width, txt_height = font.getsize(label) + im = np.zeros((txt_height,txt_width,3),dtype=np.uint8) + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + draw.rectangle([0, 0 , txt_width, txt_height ], fill=tuple(color)) + draw.text(( 0 , -3 ), label, fill=(255, 255, 255), font=font) + im_array = np.asarray(im) + + if outfontsize: + scaley = outfontsize / txt_height + im_array= cv2.resize(im_array,(0,0),fx = scaley ,fy =scaley) + return im_array +def get_label_arrays(labelnames,colors,outfontsize=40,fontpath="conf/platech.ttf"): + label_arraylist = [] + if len(labelnames) > len(colors): + print('#####labelnames cnt > colors cnt#####') + for ii,labelname in enumerate(labelnames): + + color = colors[ii%20] + label_arraylist.append(get_label_array(color=color,label=labelname,outfontsize=outfontsize,fontpath=fontpath)) + + return label_arraylist +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + + '''image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + pil_image = Image.fromarray(image) + draw = ImageDraw.Draw(pil_image) + font = ImageFont.truetype('./font/platech.ttf', 40, encoding='utf-8') + for info in infos: + detect = info['bndbox'] + text = ','.join(list(info['attributes'].values())) + temp = -50 + if info['name'] == 'vehicle': + temp = 20 + draw.text((detect[0], detect[1] + temp), text, (0, 255, 255), font=font) + if 'scores' in info: + draw.text((detect[0], detect[3]), info['scores'], (0, 255, 0), font=font) + if 'pscore' in info: + draw.text((detect[2], detect[3]), str(round(info['pscore'],3)), (0, 255, 0), font=font) + image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR) + for info in infos: + detect = info['bndbox'] + cv2.rectangle(image, (detect[0], detect[1]), (detect[2], detect[3]), (0, 255, 0), 1, cv2.LINE_AA) + return image''' + +'''def plot_one_box_PIL(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + + + + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + font = ImageFont.truetype('./font/platech.ttf', t_size, encoding='utf-8') + draw.text((c1[0], c1[1] - 2), label, (0, 255, 0), font=font) + + #cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + return np.array(im) ''' + +def plot_one_box(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + + if label: + fontsize = max(round(max(im.size) / 40), 12) + font = ImageFont.truetype("../AIlib2/conf/platech.ttf", fontsize,encoding='utf-8') + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + im_array = np.asarray(im) + + return np.asarray(im) + +def draw_painting_joint(box,img,label_array,score=0.5,color=None,font={ 'line_thickness':None,'boxLine_thickness':None, 'fontSize':None},socre_location="leftTop"): + #如果box[0]不是list or 元组,则box是[ (x0,y0),(x1,y1),(x2,y2),(x3,y3)]四点格式 + if isinstance(box[0], (list, tuple,np.ndarray ) ): + ###先把中文类别字体赋值到img中 + lh, lw, lc = label_array.shape + imh, imw, imc = img.shape + if socre_location=='leftTop': + x0 , y1 = box[0][0],box[0][1] + elif socre_location=='leftBottom': + x0,y1=box[3][0],box[3][1] + else: + print('plot.py line217 ,label_location:%s not implemented '%( socre_location )) + sys.exit(0) + + x1 , y0 = x0 + lw , y1 - lh + if y0<0:y0=0;y1=y0+lh + if y1>imh: y1=imh;y0=y1-lh + if x0<0:x0=0;x1=x0+lw + if x1>imw:x1=imw;x0=x1-lw + img[y0:y1,x0:x1,:] = label_array + pts_cls=[(x0,y0),(x1,y1) ] + + #把四边形的框画上 + box_tl= font['boxLine_thickness'] or round(0.002 * (imh + imw) / 2) + 1 + cv2.polylines(img, [box], True,color , box_tl) + + ####把英文字符score画到类别旁边 + tl = font['line_thickness'] or round(0.002*(imh+imw)/2)+1#line/font thickness + label = ' %.2f'%(score) + tf = max(tl , 1) # font thickness + fontScale = font['fontSize'] or tl * 0.33 + t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] + + + #if socre_location=='leftTop': + p1,p2= (pts_cls[1][0], pts_cls[0][1]),(pts_cls[1][0]+t_size[0],pts_cls[1][1]) + cv2.rectangle(img, p1 , p2, color, -1, cv2.LINE_AA) + p3 = pts_cls[1][0],pts_cls[1][1]-(lh-t_size[1])//2 + + cv2.putText(img, label,p3, 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + return img + else:####两点格式[x0,y0,x1,y1] + try: + box = [int(xx.cpu()) for xx in box] + except: + box=[ int(x) for x in box] + ###先把中文类别字体赋值到img中 + lh, lw, lc = label_array.shape + imh, imw, imc = img.shape + if socre_location=='leftTop': + x0 , y1 = box[0:2] + elif socre_location=='leftBottom': + x0,y1=box[0],box[3] + else: + print('plot.py line217 ,socre_location:%s not implemented '%( socre_location )) + sys.exit(0) + x1 , y0 = x0 + lw , y1 - lh + if y0<0:y0=0;y1=y0+lh + if y1>imh: y1=imh;y0=y1-lh + if x0<0:x0=0;x1=x0+lw + if x1>imw:x1=imw;x0=x1-lw + img[y0:y1,x0:x1,:] = label_array + + + + ###把矩形框画上,指定颜色和线宽 + tl = font['line_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + box_tl= font['boxLine_thickness'] or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(img, c1, c2, color, thickness=box_tl, lineType=cv2.LINE_AA) + + ###把英文字符score画到类别旁边 + label = ' %.2f'%(score) + tf = max(tl , 1) # font thickness + fontScale = font['fontSize'] or tl * 0.33 + t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] + + if socre_location=='leftTop': + c2 = c1[0]+ lw + t_size[0], c1[1] - lh + cv2.rectangle(img, (int(box[0])+lw,int(box[1])) , c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0]+lw, c1[1] - (lh-t_size[1])//2 ), 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + elif socre_location=='leftBottom': + c2 = box[0]+ lw + t_size[0], box[3] - lh + cv2.rectangle(img, (int(box[0])+lw,int(box[3])) , c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, ( box[0] + lw, box[3] - (lh-t_size[1])//2 ), 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + #print('#####line224 fontScale:',fontScale,' thickness:',tf,' line_thickness:',font['line_thickness'],' boxLine thickness:',box_tl) + return img + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..9991e5e --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,303 @@ +# YOLOv5 PyTorch utils + +import datetime +import logging +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError as e: + return '' # not a git repository + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py new file mode 100644 index 0000000..d7a521f --- /dev/null +++ b/utils/wandb_logging/log_dataset.py @@ -0,0 +1,24 @@ +import argparse + +import yaml + +from wandb_utils import WandbLogger + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py new file mode 100644 index 0000000..d8f50ae --- /dev/null +++ b/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,306 @@ +import json +import sys +from pathlib import Path + +import torch +import yaml +from tqdm import tqdm + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset + +try: + import wandb + from wandb import init, finish +except ImportError: + wandb = None + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + else: + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict + + def setup_training(self, opt, data_dict): + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict + + def download_dataset_artifact(self, path, alias): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + if self.wandb_run: + wandb.log(self.log_dict) + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + if self.wandb_run: + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() diff --git a/utilsK/GPUtils.py b/utilsK/GPUtils.py new file mode 100644 index 0000000..bcbafff --- /dev/null +++ b/utilsK/GPUtils.py @@ -0,0 +1,501 @@ +#@@ -1,43 +1,43 @@ +# GPUtil - GPU utilization +# +# A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi +# +# Author: Anders Krogh Mortensen (anderskm) +# Date: 16 January 2017 +# Web: https://github.com/anderskm/gputil +# +# LICENSE +# +# MIT License +# +# Copyright (c) 2017 anderskm +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from subprocess import Popen, PIPE +from distutils import spawn +import os +import math +import random +import time +import sys +import platform +import subprocess +import numpy as np + + +__version__ = '1.4.0' +class GPU: + def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu): + self.id = ID + self.uuid = uuid + self.load = load + self.memoryUtil = float(memoryUsed)/float(memoryTotal) + self.memoryTotal = memoryTotal + self.memoryUsed = memoryUsed + self.memoryFree = memoryFree + self.driver = driver + self.name = gpu_name + self.serial = serial + self.display_mode = display_mode + self.display_active = display_active + self.temperature = temp_gpu + + def __str__(self): + return str(self.__dict__) + + +class GPUProcess: + def __init__(self, pid, processName, gpuId, gpuUuid, gpuName, usedMemory, + uid, uname): + self.pid = pid + self.processName = processName + self.gpuId = gpuId + self.gpuUuid = gpuUuid + self.gpuName = gpuName + self.usedMemory = usedMemory + self.uid = uid + self.uname = uname + + def __str__(self): + return str(self.__dict__) + +def safeFloatCast(strNumber): + try: + number = float(strNumber) + except ValueError: + number = float('nan') + return number + +#def getGPUs(): +def getNvidiaSmiCmd(): + if platform.system() == "Windows": + # If the platform is Windows and nvidia-smi + # could not be found from the environment path, + #@@ -75,57 +94,97 @@ def getGPUs(): + nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive'] + else: + nvidia_smi = "nvidia-smi" + return nvidia_smi + + +def getGPUs(): + # Get ID, processing and memory utilization for all GPUs + nvidia_smi = getNvidiaSmiCmd() + try: + p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE) + stdout, stderror = p.communicate() + p = subprocess.run([ + nvidia_smi, + "--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout;#output = stdout.decode('UTF-8') + # output = output[2:-1] # Remove b' and ' from string added by python + #print(output) + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + #print(lines) + numDevices = len(lines)-1 + GPUs = [] + for g in range(numDevices): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + for i in range(12): + # print(vals[i]) + if (i == 0): + deviceIds = int(vals[i]) + elif (i == 1): + uuid = vals[i] + elif (i == 2): + gpuUtil = safeFloatCast(vals[i])/100 + elif (i == 3): + memTotal = safeFloatCast(vals[i]) + elif (i == 4): + memUsed = safeFloatCast(vals[i]) + elif (i == 5): + memFree = safeFloatCast(vals[i]) + elif (i == 6): + driver = vals[i] + elif (i == 7): + gpu_name = vals[i] + elif (i == 8): + serial = vals[i] + elif (i == 9): + display_active = vals[i] + elif (i == 10): + display_mode = vals[i] + elif (i == 11): + temp_gpu = safeFloatCast(vals[i]); + deviceIds = int(vals[0]) + uuid = vals[1] + gpuUtil = safeFloatCast(vals[2]) / 100 + memTotal = safeFloatCast(vals[3]) + memUsed = safeFloatCast(vals[4]) + memFree = safeFloatCast(vals[5]) + driver = vals[6] + gpu_name = vals[7] + serial = vals[8] + display_active = vals[9] + display_mode = vals[10] + temp_gpu = safeFloatCast(vals[11]); + GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu)) + return GPUs # (deviceIds, gpuUtil, memUtil) + + + +def getGPUProcesses(): + """Get all gpu compute processes.""" + + global gpuUuidToIdMap + gpuUuidToIdMap = {} + try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus + except: + pass + + + nvidia_smi = getNvidiaSmiCmd() + try: + p = subprocess.run([ + nvidia_smi, + "--query-compute-apps=pid,process_name,gpu_uuid,gpu_name,used_memory", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + numProcesses = len(lines) - 1 + processes = [] + for g in range(numProcesses): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + pid = int(vals[0]) + processName = vals[1] + gpuUuid = vals[2] + gpuName = vals[3] + usedMemory = safeFloatCast(vals[4]) + gpuId = gpuUuidToIdMap[gpuUuid] + if gpuId is None: + gpuId = -1 + + # get uid and uname owner of the pid + try: + p = subprocess.run(['ps', f'-p{pid}', '-oruid=,ruser='], + stdout=subprocess.PIPE, encoding='utf8') + uid, uname = p.stdout.split() + uid = int(uid) + except: + uid, uname = -1, '' + + processes.append(GPUProcess(pid, processName, gpuId, gpuUuid, + gpuName, usedMemory, uid, uname)) + return processes + + +def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # order = first | last | random | load | memory + # first --> select the GPU with the lowest ID (DEFAULT) + # last --> select the GPU with the highest ID + # random --> select a random available GPU + # load --> select the GPU with the lowest load + # memory --> select the GPU with the most memory available + # limit = 1 (DEFAULT), 2, ..., Inf + # Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned. + # Get device IDs, load and memory usage + GPUs = getGPUs() + # Determine, which GPUs are available + GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)] + # Discard unavailable GPUs + GPUs = [GPUs[g] for g in availAbleGPUindex] + # Sort available GPUs according to the order argument + if (order == 'first'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False) + elif (order == 'last'): + GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True) + elif (order == 'random'): + GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))] + elif (order == 'load'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False) + elif (order == 'memory'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False) + # Extract the number of desired GPUs, but limited to the total number of available GPUs + GPUs = GPUs[0:min(limit, len(GPUs))] + # Extract the device IDs from the GPUs and return them + deviceIds = [gpu.id for gpu in GPUs] + return deviceIds +#def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False): +# # Determine, which GPUs are available +# GPUavailability = np.zeros(len(GPUs)) +# for i in range(len(GPUs)): +# if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))): +# GPUavailability[i] = 1 +def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # Determine, which GPUs are available + GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs] + return GPUavailability +def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]): + #GPUs = getGPUs() + #firstAvailableGPU = np.NaN + #for i in range(len(GPUs)): + # if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory): + # firstAvailableGPU = GPUs[i].id + # break + #return firstAvailableGPU + for i in range(attempts): + if (verbose): + print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.') + # Get first available GPU + available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + # If an available GPU was found, break for loop. + if (available): + if (verbose): + print('GPU ' + str(available) + ' located!') + break + # If this is not the last attempt, sleep for 'interval' seconds + if (i != attempts-1): + time.sleep(interval) + # Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found + if (not(available)): + raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.') + # Return found GPU + return available +def showUtilization(all=False, attrList=None, useOldCode=False): + GPUs = getGPUs() + if (all): + if (useOldCode): + print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |') + print('------------------------------------------------------------------------------------------------------------------------------') + for gpu in GPUs: + print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'name','name':'Name'}, + {'attr':'serial','name':'Serial'}, + {'attr':'uuid','name':'UUID'}], + [{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0}, + {'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}], + [{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0}, + {'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0}, + {'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}], + [{'attr':'display_mode','name':'Display mode'}, + {'attr':'display_active','name':'Display active'}]] + + else: + if (useOldCode): + print(' ID GPU MEM') + print('--------------') + for gpu in GPUs: + print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}], + ] + + if (not useOldCode): + if (attrList is not None): + headerString = '' + GPUstrings = ['']*len(GPUs) + for attrGroup in attrList: + #print(attrGroup) + for attrDict in attrGroup: + headerString = headerString + '| ' + attrDict['name'] + ' ' + headerWidth = len(attrDict['name']) + minWidth = len(attrDict['name']) + + attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else '' + attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else '' + attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x + for gpu in GPUs: + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:' + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:d}').format(attr) + elif (isinstance(attr,str)): + attrStr = attr; + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = attr.encode('ascii','ignore') + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + minWidth = max(minWidth,len(attrStr)) + + headerString += ' '*max(0,minWidth-headerWidth) + + minWidthStr = str(minWidth - len(attrSuffix)) + + for gpuIdx,gpu in enumerate(GPUs): + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:' + minWidthStr + 'd}').format(attr) + elif (isinstance(attr,str)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr); + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore')) + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + GPUstrings[gpuIdx] += '| ' + attrStr + ' ' + + headerString = headerString + '|' + for gpuIdx,gpu in enumerate(GPUs): + GPUstrings[gpuIdx] += '|' + + headerSpacingString = '-' * len(headerString) + print(headerString) + print(headerSpacingString) + for GPUstring in GPUstrings: + print(GPUstring) + + +# Generate gpu uuid to id map +gpuUuidToIdMap = {} +try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus +except: + pass +def getGPUInfos(): + ###返回gpus:list,一个GPU为一个元素-对象 + ###########:有属性,'id','load','memoryFree', + ###########:'memoryTotal','memoryUsed','memoryUtil','name','serial''temperature','uuid',process + ###其中process:每一个计算进程是一个元素--对象 + ############:有属性,'gpuId','gpuName','gpuUuid', + ############:'gpuid','pid','processName','uid', 'uname','usedMemory' + gpus = getGPUs() + gpuUuidToIdMap={} + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + gpu.process=[] + indexx = [x.id for x in gpus ] + + process = getGPUProcesses() + for pre in process: + pre.gpuid = gpuUuidToIdMap[pre.gpuUuid] + gpuId = indexx.index(pre.gpuid ) + gpus[gpuId].process.append(pre ) + return gpus + +def get_available_gpu(gpuStatus): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + cuda=None + for gpus in gpuStatus: + if len(gpus.process) == 0: + cuda = gpus.id + return str(cuda) + return cuda +def get_whether_gpuProcess(): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + gpuStatus=getGPUInfos() + gpuProcess=True + for gpus in gpuStatus: + if len(gpus.process) != 0: + gpuProcess = False + return gpuProcess + +def get_offlineProcess_gpu(gpuStatus,pidInfos): + gpu_onLine = [] + for gpu in gpuStatus: + for gpuProcess in gpu.process: + pid = gpuProcess.pid + if pid in pidInfos.keys(): + pidType = pidInfos[pid]['type'] + if pidType == 'onLine': + gpu_onLine.append(gpu) + gpu_offLine = set(gpuStatus) - set(gpu_onLine) + return list(gpu_offLine) +def arrange_offlineProcess(gpuStatus,pidInfos,modelMemory=1500): + cudaArrange=[] + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + for gpu in gpu_offLine: + leftMemory = gpu.memoryTotal*0.9 - gpu.memoryUsed + modelCnt = int(leftMemory// modelMemory) + + cudaArrange.extend( [gpu.id] * modelCnt ) + return cudaArrange +def get_potential_gpu(gpuStatus,pidInfos): + ###所有GPU上都有计算。需要为“在线任务”空出一块显卡。 + ###step1:查看所有显卡上是否有“在线任务” + + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + if len(gpu_offLine) == 0 : + return False + + ###step2,找出每张显卡上离线进程的数目 + offLineCnt = [ len(gpu.process) for gpu in gpu_offLine ] + minCntIndex =offLineCnt.index( min(offLineCnt)) + + pids = [x.pid for x in gpu_offLine[minCntIndex].process] + return {'cuda':gpu_offLine[minCntIndex].id,'pids':pids } +if __name__=='__main__': + #pres = getGPUProcesses() + #print('###line404:',pres) + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + print(gpu) + print(gpuUuidToIdMap) + pres = getGPUProcesses() + print('###line404:',pres) + for pre in pres: + print('#'*20) + for ken in ['gpuName','gpuUuid','pid','processName','uid','uname','usedMemory' ]: + print(ken,' ',pre.__getattribute__(ken )) + print(' ') + + diff --git a/utilsK/channel2postUtils.py b/utilsK/channel2postUtils.py new file mode 100644 index 0000000..2cd98fc --- /dev/null +++ b/utilsK/channel2postUtils.py @@ -0,0 +1,306 @@ +import sys +from pathlib import Path +import math +import cv2 +import numpy as np +import torch + +FILE = Path(__file__).absolute() +#sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +''' +修改说明: +1、pars中增加了recScale参数。船舶判断是否悬挂国旗时,需将船舶检测框乘以扩大系数imgScale后,与国旗中心点坐标比较。 + pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} +2、增加expand_rect(preds_boat, recScale, imgSize)函数,在图像范围内,将矩形框扩大recScale倍数。 +3、增加或修改以下两行: + preds_boat_flag_expand=expand_rect(preds_boat_flag[i],pars['recScale'],pars['imgSize']) #新增! + if point_in_rectangle(preds_flag,preds_boat_flag_expand)>=1: #新增后修改! +''' + + + + +def channel2_post_process(predsList,pars): + print('----line24:',predsList) + #pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} + ''' + 后处理程序,将检测结果中未悬挂国旗的船只,其类别改为4,即'unflagged_ship' + 最终类别汇总如下, + ['flag', 'buoy', 'shipname', 'ship','unflagged_ship','uncover']=[0,1,2,3,4,5] + + 输入: + preds 一张图像的检测结果,为嵌套列表,tensor,包括x_y_x_y_conf_class + imgwidth,imgheight 图像的原始宽度及长度 + 输出:检测结果(将其中未悬挂国旗的显示) + ''' + + preds = torch.tensor(predsList[0]) + preds=preds.tolist() + + preds = filter_detection_results(preds,pars) + + + preds=[[*sublist[:-1], int(sublist[-1])] for sublist in preds] #类别从浮点型转为整型 + + #设置空的列表 + output_detection=[] #存放往接口传的类别 + #1、判断类别中哪些有船?取出船检测结果,并取出国旗检测结果。 + # output_detection.append[] 这里将船和国旗以外的类别加进去 + preds_boat=[] + preds_flag=[] + + #jcq: 增加封仓 + preds_uncover = [] + + # 1、处理未封仓 + preds = filter_detection_results(preds,pars) + + for i in range(len(preds)): + if preds[i][5]==pars['boatId']: #识别为船 + preds_boat.append(preds[i]) + + elif preds[i][5]==pars['flagId']: #识别为国旗 + preds_flag.append(preds[i]) + # output_detection.append(preds[i]) + #jcq: + elif preds[i][5]==pars['uncoverId']: #未封仓 + preds_uncover.append(preds[i]) + # output_detection.append(preds[i]) + + else: + output_detection.append(preds[i]) + # pass + + # return output_detection + + #2、船尺寸与图像比较,其中长或宽有一个维度超过图像宽高平均值的1/3,启动国旗检测 + #①if 判断:判断超过1/3的,则取出这些大船,进一步判断是否悬挂国旗 + #不超过1/3的,则output_detection.append[] + + boat_uncover = preds_boat+preds_uncover + for i in range(len(boat_uncover)): + length_bbx,width_bbx=get_rectangle_dimensions(boat_uncover[i]) + length_bbx, width_bbx=int(length_bbx),int(width_bbx) + if length_bbx>(pars['imgSize'][0]+pars['imgSize'][1])* pars['hRation'] or width_bbx>(pars['imgSize'][0]+pars['imgSize'][1])*pars['wRation']: + boat_uncover[i] = unflag(boat_uncover[i], preds_flag, pars) + + return output_detection + boat_uncover + + + +def unflag(boat_uncover,preds_flag,pars): + + preds_boat_flag_expand = expand_rect(boat_uncover, pars['recScale'], pars['imgSize']) # 新增! + if not point_in_rectangle(preds_flag, preds_boat_flag_expand) >= 1: # 新增后修改! + if boat_uncover[5] == pars['uncoverId']: + + boat_uncover[5] = pars['unflagAndcoverId'] # 将类别标签改为6,未挂国旗且未封仓 + else: + boat_uncover[5] = pars['unflagId'] # 将类别标签改为4,即为未悬挂国旗的船只 + + return boat_uncover + + + + + +def center_coordinate(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return center_x, center_y + + +def get_rectangle_dimensions(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + # 计算两点之间的水平距离 + width = math.fabs(boundbxs[2] - boundbxs[0]) + # 计算两点之间的垂直距离 + height = math.fabs(boundbxs[3]- boundbxs[1]) + return width, height + + +def fourcorner_coordinate(boundbxs): + ''' + 通过矩形框对角xyxy坐标,得到矩形框轮廓 + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + wid = boundbxs_x2 - boundbxs_x1 + hei = boundbxs_y2 - boundbxs_y1 + boundbxs_x3 = boundbxs_x1 + wid + boundbxs_y3 = boundbxs_y1 + boundbxs_x4 = boundbxs_x1 + boundbxs_y4 = boundbxs_y1 + hei + contours_rec = [[boundbxs_x1, boundbxs_y1], [boundbxs_x3, boundbxs_y3], [boundbxs_x2, boundbxs_y2], + [boundbxs_x4, boundbxs_y4]] + return contours_rec + +def point_in_rectangle(preds_flag,preds_boat_flag): + ''' + 遍历所有国旗坐标,判断落在检测框中的数量 + 输入: + preds_flag 国旗类别的检测结果列表 + preds_boat_flag 待判定船只的检测结果(单个船只) + 输出:落入检测框的国旗数量 + ''' + iii=0 + boat_contour=fourcorner_coordinate(preds_boat_flag) + boat_contour=np.array(boat_contour,dtype=np.float32) + for i in range(len(preds_flag)): + center_x, center_y = center_coordinate(preds_flag[i]) + if cv2.pointPolygonTest(boat_contour, (center_x, center_y), False)==1: + iii+=1 + else: + pass + return iii + + +def expand_rect(preds_boat, recScale, imgSize): + ''' + 在图像范围内,将矩形框扩大recScale倍数。 + 输入: + preds_boat 国旗类别的检测结果列表 xyxy_conf_class + imgSize 从pars传来的元组 + 输出:调整后的preds_boat + ''' + # preds_boat_1=preds_boat + preds_boat_1=[x for x in preds_boat] + x1, y1 = preds_boat[0],preds_boat[1] + x2, y2 = preds_boat[2],preds_boat[3] + + width = x2 - x1 + height = y2 - y1 + + # 计算新的宽度和高度 + new_width = width * recScale + new_height = height * recScale + + # 计算新的对角坐标 + new_x1 = max(x1 - (new_width - width) / 2, 0) # 确保不会超出左边界 + new_y1 = max(y1 - (new_height - height) / 2, 0) # 确保不会超出上边界 + new_x2 = min(x2 + (new_width - width) / 2, imgSize[0]) # 图像宽度是imgSize[0] + new_y2 = min(y2 + (new_height - height) / 2, imgSize[1]) # 图像高度是imgSize[1] + + preds_boat_1[0]=new_x1 + preds_boat_1[1]=new_y1 + preds_boat_1[2]=new_x2 + preds_boat_1[3]=new_y2 + + return preds_boat_1 + + + + +###jcq : 增加封仓后处理函数 + +def filter_detection_results(results, par): + target_cls = par['target_cls'] # 船只 + filter_cls = par['filter_cls'] # 非封仓 + + # 分离处理与非处理的结果 + non_process = [box for box in results if box[5] not in {target_cls, filter_cls}] + to_process = [box for box in results if box[5] in {target_cls, filter_cls}] + + # 提取目标类别和过滤类别的检测框 + class_target = [box for box in to_process if box[5] == target_cls] # 船只 + class_filter = [box for box in to_process if box[5] == filter_cls] # 非封仓 + + # 处理过滤类别(映射条件) + for i in range(len(class_target)): + t_box = class_target[i] + if any( # 检查是否在任意目标框内部 + (t_box[0] <= f_box[0] and + t_box[1] <= f_box[1] and + t_box[2] >= f_box[2] and + t_box[3] >= f_box[3]) + for f_box in class_filter + ): + class_target[i][5] = par['uncoverId'] # 映射类别4->5 + + # 合并结果(保留非处理类别) + return non_process + class_target + + +def filter_detection_results_uncover(results, par): + target_cls = par['target_cls'] + filter_cls = par['filter_cls'] + + # 分离处理与非处理的结果 + non_process = [box for box in results if box[5] not in {target_cls, filter_cls}] + to_process = [box for box in results if box[5] in {target_cls, filter_cls}] + + # 提取目标类别和过滤类别的检测框 + class_target = [box for box in to_process if box[5] == target_cls] + class_filter = [box for box in to_process if box[5] == filter_cls] + + processed = [] + # 处理过滤类别(映射条件) + for f_box in class_filter: + if any( # 检查是否在任意目标框内部 + (f_box[0] >= t_box[0] and + f_box[1] >= t_box[1] and + f_box[2] <= t_box[2] and + f_box[3] <= t_box[3]) + for t_box in class_target + ): + new_box = f_box.copy() + new_box[5] = 5 # 映射类别4->5 + processed.append(new_box) + + # 保留所有目标类别检测框 + processed += class_target + + # 合并结果(保留非处理类别) + return non_process + processed + + + + + + + +if __name__ == "__main__": + + # 对应DJI_20230306140129_0001_Z_165.jpg检测结果 + # preds=[[6.49000e+02, 2.91000e+02, 1.07900e+03, 7.33000e+02, 9.08165e-01, 3.00000e+00], + # [8.11000e+02, 2.99000e+02, 1.31200e+03, 7.65000e+02, 8.61268e-01, 3.00000e+00], + # [7.05000e+02, 1.96000e+02, 7.19000e+02, 2.62000e+02, 5.66877e-01, 0.00000e+00]] + + + # 对应DJI_20230306152702_0001_Z_562.jpg检测结果 + preds=[[7.62000e+02, 7.14000e+02, 1.82800e+03, 9.51000e+02, 9.00902e-01, 3.00000e+00], + [2.00000e+01, 3.45000e+02, 1.51300e+03, 6.71000e+02, 8.81440e-01, 3.00000e+00], + [8.35000e+02, 8.16000e+02, 8.53000e+02, 8.30000e+02, 7.07651e-01, 0.00000e+00], + [1.35600e+03, 4.56000e+02, 1.42800e+03, 4.94000e+02, 6.70549e-01, 2.00000e+00]] + print('before :\n ',preds) + #preds=torch.tensor(preds) #返回的预测结果 + imgwidth=1920 + imgheight=1680 + pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} + # 'smallId':0(国旗),'bigId':3(船只),wRation和hRation表示判断的阈值条件,newId--新目标的id + yyy=channel2_post_process([preds],pars) #送入后处理函数 + + print('after :\n ',yyy) + + + + + + diff --git a/utilsK/channel2postUtils.py.jcq b/utilsK/channel2postUtils.py.jcq new file mode 100644 index 0000000..39b738c --- /dev/null +++ b/utilsK/channel2postUtils.py.jcq @@ -0,0 +1,294 @@ +import sys +from pathlib import Path +import math +import cv2 +import numpy as np +import torch + +FILE = Path(__file__).absolute() +#sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +''' +修改说明: +1、pars中增加了recScale参数。船舶判断是否悬挂国旗时,需将船舶检测框乘以扩大系数imgScale后,与国旗中心点坐标比较。 + pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} +2、增加expand_rect(preds_boat, recScale, imgSize)函数,在图像范围内,将矩形框扩大recScale倍数。 +3、增加或修改以下两行: + preds_boat_flag_expand=expand_rect(preds_boat_flag[i],pars['recScale'],pars['imgSize']) #新增! + if point_in_rectangle(preds_flag,preds_boat_flag_expand)>=1: #新增后修改! +''' + +def channel2_post_process(predsList,pars): + + #pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} + ''' + 后处理程序,将检测结果中未悬挂国旗的船只,其类别改为4,即'unflagged_ship' + 最终类别汇总如下, + ['flag', 'buoy', 'shipname', 'ship','unflagged_ship']=[0,1,2,3,4] + + 输入: + preds 一张图像的检测结果,为嵌套列表,tensor,包括x_y_x_y_conf_class + imgwidth,imgheight 图像的原始宽度及长度 + 输出:检测结果(将其中未悬挂国旗的显示) + ''' + + preds = torch.tensor(predsList[0]) + preds=preds.tolist() + print('---line36:',preds) + preds = filter_detection_results(preds,pars) + + + preds=[[*sublist[:-1], int(sublist[-1])] for sublist in preds] #类别从浮点型转为整型 + + #设置空的列表 + output_detection=[] #存放往接口传的类别 + #1、判断类别中哪些有船?取出船检测结果,并取出国旗检测结果。 + # output_detection.append[] 这里将船和国旗以外的类别加进去 + preds_boat=[] + preds_flag=[] + + #jcq: 增加封仓 + preds_uncover = [] + + for i in range(len(preds)): + if preds[i][5]==pars['bigId']: #识别为船 + preds_boat.append(preds[i]) + elif preds[i][5]==pars['smallId']: #识别为国旗 + preds_flag.append(preds[i]) + output_detection.append(preds[i]) + + #jcq: + elif preds[i][5]==pars['uncoverId']: #识封仓 + preds_uncover.append(preds[i]) + output_detection.append(preds[i]) + + else: + output_detection.append(preds[i]) + # pass + + # return output_detection + + #jcq: 自动检测船只 - 0311 + #2、船尺寸与图像比较,其中长或宽有一个维度超过图像宽高平均值的1/3,启动国旗检测 + #①if 判断:判断超过1/3的,则取出这些大船,进一步判断是否悬挂国旗 + #不超过1/3的,则output_detection.append[] + + preds_boat_flag=[] + for i in range(len(preds_boat)): + length_bbx,width_bbx=get_rectangle_dimensions(preds_boat[i]) + length_bbx, width_bbx=int(length_bbx),int(width_bbx) + if length_bbx>(pars['imgSize'][0]+pars['imgSize'][1])* pars['hRation'] or width_bbx>(pars['imgSize'][0]+pars['imgSize'][1])*pars['wRation']: + preds_boat_flag.append(preds_boat[i]) + else: + output_detection.append(preds_boat[i]) + + + #②将大船的框与国旗检测结果的中心点坐标做比较。 + #若没有一个在,则输出此船未悬挂国旗(船舶类别名称改完未悬挂国旗就行,即将0、1、2、3中的0替换为4的类别) + # 未悬挂国旗的,则output_detection.append[xyxy_4_conf] + #若有国旗在,则不输出是否悬挂国旗,则output_detection.append[xyxy_0_conf] + + #jcq : 判断是否有国旗 - 具体判断方法使用交叉比 + for i in range(len(preds_boat_flag)): + preds_boat_flag_expand=expand_rect(preds_boat_flag[i],pars['recScale'],pars['imgSize']) #新增! + if point_in_rectangle(preds_flag,preds_boat_flag_expand)>=1: #新增后修改! + output_detection.append(preds_boat_flag[i]) + else: + temp_preds_boat_flag=preds_boat_flag[i] + temp_preds_boat_flag[5]=pars['newId'] #将类别标签改为4,即为未悬挂国旗的船只 + output_detection.append(temp_preds_boat_flag) + + + #jcq: 将大船的框与封仓检测结果的中心点坐标做比较。 -- 该步可以暂时不做,因为船只比封仓更容易检测出结果 + # 若未封仓,则输出此船未封仓(船舶类别名称改船只封仓,即将0、1、2、3中的3替换为未封仓的类别) + # 未封仓的,则output_detection.append[xyxy_4_conf] + # 若有国旗在,则不输出是否悬挂国旗,则output_detection.append[xyxy_0_conf] + + # for i in range(len(preds_boat_flag)): + # preds_boat_flag_expand=expand_rect(preds_boat_flag[i],pars['recScale'],pars['imgSize']) #新增! + # if point_in_rectangle(preds_flag,preds_boat_flag_expand)>=1: #新增后修改! + # output_detection.append(preds_boat_flag[i]) + # else: + # temp_preds_boat_flag=preds_boat_flag[i] + # temp_preds_boat_flag[5]=pars['newId'] #将类别标签改为4,即为未悬挂国旗的船只 + # output_detection.append(temp_preds_boat_flag) + + + + + return output_detection + + + + + + + + +def center_coordinate(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return center_x, center_y + + +def get_rectangle_dimensions(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + # 计算两点之间的水平距离 + width = math.fabs(boundbxs[2] - boundbxs[0]) + # 计算两点之间的垂直距离 + height = math.fabs(boundbxs[3]- boundbxs[1]) + return width, height + + +def fourcorner_coordinate(boundbxs): + ''' + 通过矩形框对角xyxy坐标,得到矩形框轮廓 + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + wid = boundbxs_x2 - boundbxs_x1 + hei = boundbxs_y2 - boundbxs_y1 + boundbxs_x3 = boundbxs_x1 + wid + boundbxs_y3 = boundbxs_y1 + boundbxs_x4 = boundbxs_x1 + boundbxs_y4 = boundbxs_y1 + hei + contours_rec = [[boundbxs_x1, boundbxs_y1], [boundbxs_x3, boundbxs_y3], [boundbxs_x2, boundbxs_y2], + [boundbxs_x4, boundbxs_y4]] + return contours_rec + +def point_in_rectangle(preds_flag,preds_boat_flag): + ''' + 遍历所有国旗坐标,判断落在检测框中的数量 + 输入: + preds_flag 国旗类别的检测结果列表 + preds_boat_flag 待判定船只的检测结果(单个船只) + 输出:落入检测框的国旗数量 + ''' + iii=0 + boat_contour=fourcorner_coordinate(preds_boat_flag) + boat_contour=np.array(boat_contour,dtype=np.float32) + for i in range(len(preds_flag)): + center_x, center_y = center_coordinate(preds_flag[i]) + if cv2.pointPolygonTest(boat_contour, (center_x, center_y), False)==1: + iii+=1 + else: + pass + return iii + + +def expand_rect(preds_boat, recScale, imgSize): + ''' + 在图像范围内,将矩形框扩大recScale倍数。 + 输入: + preds_boat 国旗类别的检测结果列表 xyxy_conf_class + imgSize 从pars传来的元组 + 输出:调整后的preds_boat + ''' + # preds_boat_1=preds_boat + preds_boat_1=[x for x in preds_boat] + x1, y1 = preds_boat[0],preds_boat[1] + x2, y2 = preds_boat[2],preds_boat[3] + + width = x2 - x1 + height = y2 - y1 + + # 计算新的宽度和高度 + new_width = width * recScale + new_height = height * recScale + + # 计算新的对角坐标 + new_x1 = max(x1 - (new_width - width) / 2, 0) # 确保不会超出左边界 + new_y1 = max(y1 - (new_height - height) / 2, 0) # 确保不会超出上边界 + new_x2 = min(x2 + (new_width - width) / 2, imgSize[0]) # 图像宽度是imgSize[0] + new_y2 = min(y2 + (new_height - height) / 2, imgSize[1]) # 图像高度是imgSize[1] + + preds_boat_1[0]=new_x1 + preds_boat_1[1]=new_y1 + preds_boat_1[2]=new_x2 + preds_boat_1[3]=new_y2 + + return preds_boat_1 + + + + +###jcq : 增加封仓后处理函数 +def filter_detection_results(results, par): + target_cls = par['target_cls'] + filter_cls = par['filter_cls'] + + class_target_boxes = [result for result in results if result[5] == target_cls] + class_filter_boxes = [result for result in results if result[5] == filter_cls] + filtered_results = [] + + for box_filter in class_filter_boxes: + is_inside = False + for box_target in class_target_boxes: + # 判断filter_cls的框是否完全在target_cls的框内部 + if (box_filter[0] >= box_target[0] and + box_filter[1] >= box_target[1] and + box_filter[2] <= box_target[2] and + box_filter[3] <= box_target[3]): + is_inside = True + break + if is_inside: + filtered_results.append(box_filter) + + # 保留所有的target_cls的框以及符合条件的filter_cls的框 + filtered_results += [result for result in class_target_boxes] + + # 将类别为4的结果映射为5 + for i, result in enumerate(filtered_results): + if result[5] == 4: + # 将列表转换为元组,连接后再转换回列表 + new_result = list(tuple(result[:-1]) + (5,)) + filtered_results[i] = new_result + + return filtered_results + + + +if __name__ == "__main__": + + # 对应DJI_20230306140129_0001_Z_165.jpg检测结果 + # preds=[[6.49000e+02, 2.91000e+02, 1.07900e+03, 7.33000e+02, 9.08165e-01, 3.00000e+00], + # [8.11000e+02, 2.99000e+02, 1.31200e+03, 7.65000e+02, 8.61268e-01, 3.00000e+00], + # [7.05000e+02, 1.96000e+02, 7.19000e+02, 2.62000e+02, 5.66877e-01, 0.00000e+00]] + + + # 对应DJI_20230306152702_0001_Z_562.jpg检测结果 + preds=[[7.62000e+02, 7.14000e+02, 1.82800e+03, 9.51000e+02, 9.00902e-01, 3.00000e+00], + [2.00000e+01, 3.45000e+02, 1.51300e+03, 6.71000e+02, 8.81440e-01, 3.00000e+00], + [8.35000e+02, 8.16000e+02, 8.53000e+02, 8.30000e+02, 7.07651e-01, 0.00000e+00], + [1.35600e+03, 4.56000e+02, 1.42800e+03, 4.94000e+02, 6.70549e-01, 2.00000e+00]] + print('before :\n ',preds) + #preds=torch.tensor(preds) #返回的预测结果 + imgwidth=1920 + imgheight=1680 + pars={'imgSize':(imgwidth,imgheight),'wRation':1/6.0,'hRation':1/6.0,'smallId':0,'bigId':3,'newId':4,'recScale':1.2} + # 'smallId':0(国旗),'bigId':3(船只),wRation和hRation表示判断的阈值条件,newId--新目标的id + yyy=channel2_post_process([preds],pars) #送入后处理函数 + + print('after :\n ',yyy) + + + + + + diff --git a/utilsK/crackUtils.py b/utilsK/crackUtils.py new file mode 100644 index 0000000..56316f6 --- /dev/null +++ b/utilsK/crackUtils.py @@ -0,0 +1,36 @@ +import numpy as np +from skimage.morphology import medial_axis +def Crack_measure(_mask_cv_gray,par={'dsx':(123-30)*1000/35*0.004387636 } ): + '''裂缝实际尺寸测量''' + '''输入:单个裂缝分割图像 + 过程:。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + # 图像转化 + + dsx = par['dsx'] + ###READ + img = np.array(_mask_cv_gray.astype(np.int32)) + image0 = binary = img + ###SKELETONIZATION + img_skeletonized, distance = medial_axis(image0, return_distance=True) + #print(img_skeletonized) + img_skeletonized = np.array(img_skeletonized.astype(np.int32)) + ###COMPUTING WIDTH + dist_on_skel = distance * img_skeletonized + + width = dist_on_skel[dist_on_skel != 0] * 2 + for i in range(len(width)): + if width[i] <= 2.0: + width[i] = width[i] + else: + width[i] = width[i] - 2 + ###OUTPUT + real_length = np.count_nonzero(img_skeletonized) *dsx # Each pixel remaining after + if len(width)==0: + return [0,0,0,0] + real_mean_width = np.mean(width)*dsx + real_max_width = np.max(width)*dsx + real_min_width = np.min(width)*dsx + + return [real_length,real_mean_width,real_max_width,real_min_width] \ No newline at end of file diff --git a/utilsK/crowdGather.py b/utilsK/crowdGather.py new file mode 100644 index 0000000..1e6d490 --- /dev/null +++ b/utilsK/crowdGather.py @@ -0,0 +1,165 @@ +import sys +from pathlib import Path +import math +import cv2 +import numpy as np +import torch +import math +import time + +FILE = Path(__file__).absolute() +#sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + +def calculate_distance(point1, point2): + """计算两个点之间的欧氏距离""" + point= center_coordinate(point1) + point=np.array(point) + other_point = center_coordinate(point2) + other_point = np.array(other_point) + return np.linalg.norm(point - other_point) + +def find_clusters(preds, min_distance): + """按照最小距离将点分成簇""" + points=preds + points=np.array(points) + clusters = [] + used_points = set() + for i, point in enumerate(points): + if i not in used_points: # 如果该点未被使用过 + cluster = [point] + used_points.add(i) + # 寻找与该点距离小于等于min_distance的其他点 + for j, other_point in enumerate(points): + if j not in used_points: + if all(calculate_distance(point, other_point) <= min_distance + for point in cluster): + cluster.append(other_point) + used_points.add(j) + clusters.append(cluster) + return clusters + +def center_coordinate(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return [center_x, center_y] + +def get_bounding_rectangle(rectangles): + ''' + 通过输入多个矩形的对角坐标,得到这几个矩形的外包矩形对角坐标 + 输入:点簇列表 (嵌套列表) + 输出:多个矩形的外包矩形对角坐标 (列表) + ''' + min_x, max_x, min_y, max_y = float('inf'), float('-inf'), float('inf'), float('-inf') + for rect in rectangles: + x1, y1, x2, y2,c1,t1 = rect + min_x = min(min_x, min(x1, x2)) + max_x = max(max_x, max(x1, x2)) + min_y = min(min_y, min(y1, y2)) + max_y = max(max_y, max(y1, y2)) + return [min_x, min_y, max_x, max_y] + +def calculate_score(input_value): + ''' + 计算人群聚集置信度,检测出3-10人内,按照0.85-1的上升趋势取值; + 当检测超过10人,直接判断分数为1. + ''' + + if input_value == 3: + output_value=0.85 + elif input_value == 4: + output_value=0.9 + elif 5<= input_value <=10: + output_value = 0.9+(input_value-4)*0.015 + else: + output_value=1 + return output_value + + +def gather_post_process(predsList, pars): + ''' + 后处理程序,针对检测出的pedestrian,进行人员聚集的算法检测,按照类别'crowd_people'增加predsList + ①原类别: + ['ForestSpot', 'PestTree', 'pedestrian', 'fire', 'smog','cloud']=[0,1,2,3,4,5] + ②处理后的类别汇总: + ['ForestSpot', 'PestTree', 'pedestrian', 'fire', 'smog','cloud','crowd_people']=[0,1,2,3,4,5,6] + + 输入: + preds 一张图像的检测结果,为嵌套列表,tensor,包括x_y_x_y_conf_class + imgwidth,imgheight 图像的原始宽度及长度 + 输出:检测结果(将其中未悬挂国旗的显示) + ''' + t0=time.time() + predsList = predsList[0] + predsList = [x for x in predsList if int(x[5]) !=5 ]##把类别“云朵”去除 + # 1、过滤掉类别2以外的目标,只保留行人 + preds = [ x for x in predsList if int(x[5]) ==pars['pedestrianId'] ] + + + if len(preds)< pars['crowdThreshold']: + return predsList,'gaher postTime:No gathering' + preds = np.array(preds) + longs = np.mean(np.max(preds[:,2:4]-preds[:,0:2])) + distanceThreshold = pars['distancePersonScale']*longs + # 2、查找点簇 + clusters = find_clusters(preds, distanceThreshold) + + clusters_crowd = [] + # 3、输出点簇信息,点簇中数量超过阈值,判断人员聚集 + for i, cluster in enumerate(clusters): + if len(cluster) >= pars['crowdThreshold']: # 超过一定人数,即为人员聚集 + #print(f"Cluster {i + 1}: {len(cluster)} points") + clusters_crowd.append(cluster) + #print(clusters_crowd) + + # 4、根据得到的人员聚集点簇,合并其他类别检测结果 + for i in range(len(clusters_crowd)): + xyxy = get_bounding_rectangle(clusters_crowd[i]) # 人群聚集包围框 + #score = calculate_score(len(clusters_crowd[i])) # 人群聚集置信度 + score = len(clusters_crowd[i]) + xyxy.append(score) # 人群聚集置信度 + xyxy.append(pars['gatherId']) # 人群聚集类别 + predsList.append(xyxy) + + # 5、输出最终类别,共7类,用于绘图显示 + output_predslist = predsList + #print('craoGaher line131:',output_predslist) + t1=time.time() + + return output_predslist,'gaher postTime:%.1f ms'%( (t1-t0)*1000 ) + +if __name__ == "__main__": + t1 = time.time() + # 对应vendor1_20240529_99.jpg检测结果 + preds=[[224.19933, 148.30751, 278.19156, 199.87828, 0.87625, 2.00000], + [362.67139, 161.25760, 417.72357, 211.51706, 0.86919, 2.00000], + [437.00131, 256.19083, 487.88870, 307.72897, 0.85786, 2.00000], + [442.64606, 335.78168, 493.75720, 371.41418, 0.85245, 2.00000], + [324.58362, 256.18488, 357.72626, 294.08929, 0.84512, 2.00000], + [343.59781, 301.06506, 371.04105, 350.01086, 0.84207, 2.00000], + [301.35858, 210.64088, 332.64862, 250.78883, 0.84063, 2.00000], + [406.02994, 216.91214, 439.44455, 249.26077, 0.83698, 2.00000], + [321.53494, 99.68467, 354.67477, 135.53226, 0.82515, 2.00000], + [253.97131, 202.65234, 302.06055, 233.30634, 0.81498, 2.00000], + [365.62521, 66.42108, 442.02292, 127.37558, 0.79556, 1.00000]] + #preds=torch.tensor(preds) #返回的预测结果 + imgwidth=1920 + imgheight=1680 + pars={'imgSize':(imgwidth,imgheight),'pedestrianId':2,'crowdThreshold':4,'gatherId':6,'distancePersonScale':2.0} + ''' + pedestrianId 为行人识别的类别; + crowdThreshold为设置的判断人员聚集的人数阈值,默认4人为聚集 + distanceThreshold为设置的判断人员聚集的距离阈值,为了测试默认300像素内为聚集(可自行设置) + ''' + yyy=gather_post_process(preds,pars) #送入后处理函数 + t2 = time.time() + ttt = t2 - t1 + print('时间', ttt * 1000) diff --git a/utilsK/cthcUtils.py b/utilsK/cthcUtils.py new file mode 100644 index 0000000..334d27f --- /dev/null +++ b/utilsK/cthcUtils.py @@ -0,0 +1,110 @@ +import numpy as np +import time, cv2 + + +def ms(t1, t0): + return (t1 - t0) * 1000.0 + + +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return center_x, center_y + + +def mixCthc_postprocess(preds, _mask_cv,pars=None): + '''考虑船上人过滤''' + '''输入:危化品的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断危化品是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终危化品(坐标、类别、置信度), + ''' + '''1、最大分隔路面作为判断依据''' + # zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + # speedroad = _mask_cv.copy() + # speedroad = _mask_cv[speedroad==1] + # _mask_cv[0] = _mask_cv[1] + original_height = _mask_cv.shape[0] + original_width = _mask_cv.shape[1] + + zoom_factor = original_width / 480.0 + + zoom_height = int(original_height / zoom_factor) + zoom_width = int(original_width / zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width, zoom_height)) # 缩小原图,宽在前,高在后 + t4 = time.time() + print('+' * 10, '_mask_cv shape信息', _mask_cv.shape) + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape) == 3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + # print('+'*10,'路面分隔信息',len(contour_info)) + '''新增模块::如果路面为空,则返回原图、无危化品等。''' + if contour_info == []: + # final_img=_img_cv + final_Cthc_filterroad = [] + timeInfos = 0 + + return final_Cthc_filterroad, timeInfos + else: + # print(contour_info[0]) + max_contour = contour_info[0] + max_contour = max_contour[0] * zoom_factor # contours恢复原图尺寸 + max_contour = max_contour.astype(np.int32) + # print(max_contour) + t7 = time.time() + + '''2.1、preds中Cthc取出,car取出。''' + init_Cthc = [] + # init_car = [] + for i in range(len(preds)): + if preds[i][5] == 0: + init_Cthc.append(preds[i]) + # elif preds[i][5] == 3: + # init_car.append(preds[i]) + # person + + # points = max_contour.reshape((-1, 1, 2)) + # cv2.polylines(image, [points], isClosed=True, color=(0, 255, 0), thickness=2) + + '''3、preds中Cthc,通过1中路面过滤''' + init_Cthc_filterroad = init_Cthc + final_Cthc_filterroad = [] + for i in range(len(init_Cthc_filterroad)): + center_x, center_y = center_coordinate(init_Cthc_filterroad[i]) + # print('#'*20,'line176:',len(max_contour),np.array(max_contour).shape,(center_x, center_y)) + # 返回 1、-1 或 0,分别对应点在多边形内部、外部或边界上的情况 + flag = cv2.pointPolygonTest(max_contour, (int(center_x), int(center_y)), + False) # 若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag == 1: + final_Cthc_filterroad.append(init_Cthc_filterroad[i]) + else: + pass + t9 = time.time() + + timeInfos = ' findMaxroad:%.1f releJudge:%.1f' % (ms(t6, t4), ms(t9, t6)) + + return final_Cthc_filterroad, timeInfos # 返回最终绘制的结果图、危化品(坐标、类别、置信度) + + diff --git a/utilsK/drownUtils.py b/utilsK/drownUtils.py new file mode 100644 index 0000000..177152a --- /dev/null +++ b/utilsK/drownUtils.py @@ -0,0 +1,226 @@ +import numpy as np +import time,cv2 +def ms(t1,t0): + return (t1-t0)*1000.0 +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) + +def mixDrowing_water_postprocess(preds,_mask_cv,pars ): + '''考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + #zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + + zoom_factor = original_width/480.0 + + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + max_contour = max_contour.astype(np.int32) + #print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][5]==0 or preds[i][5]==1: + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][5]==0: + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + #print('#'*20,'line176:',len(max_contour),np.array(max_contour).shape,(center_x, center_y)) + flag = cv2.pointPolygonTest(max_contour, (int(center_x), int(center_y)), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + # print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + final_output_luoshui=final_head_person_filterboat + t11 = time.time() + + #timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + # %((t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + timeInfos=' findMaxWater:%.1f releJudge:%.1f'%( ms(t6,t4) ,ms(t11,t6) ) + return final_output_luoshui,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) +def mixDrowing_water_postprocess_N(predList,pars ): + preds,_mask_cv = predList[0:2] + return mixDrowing_water_postprocess(preds,_mask_cv,pars ) + diff --git a/utilsK/illParkingUtils.py b/utilsK/illParkingUtils.py new file mode 100644 index 0000000..d12b12d --- /dev/null +++ b/utilsK/illParkingUtils.py @@ -0,0 +1,275 @@ +''' +这个版本增加了船舶过滤功能 +''' +import time +import numpy as np +import cv2 + + + + +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + + + +def expand_rectangle(rec,imgSize,ex_width,ex_height): + ''' + 矩形框外扩,且不超过图像范围 + 输入:矩形框xyxy(左上和右下坐标),图像,外扩宽度大小,外扩高度大小 + 输出:扩后的矩形框坐标xyxy + ''' + #img_height=img.shape[0];img_width=img.shape[1] + img_width,img_height = imgSize[0:2] + #print('高、宽',img_height,img_width) + x1=rec[0] + y1=rec[1] + x3=rec[2] + y3=rec[3] + + x1=x1-ex_width if x1-ex_width >= 0 else 0 + y1=y1-ex_height if y1-ex_height >= 0 else 0 + x3=x3+ex_width if x3+ex_width <= img_width else img_width + y3=y3+ex_height if y3+ex_height <=img_height else img_height + xyxy=[x1,y1,x3,y3] + + return xyxy + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) + + +def ms(t2,t1): + + return '%.1f' %( (t2-t1)*1000.0) +def illParking_postprocess(pred,cvMask,pars): + #pred:直接预测结果,不要原图。预测结果[0,1,2,...],不是[车、T角点,L角点] + #mask_cv:分割结果图,numpy格式(H,W),结果是int,[0,1,2,...] + #pars: 其它参数,dict格式 + '''三个标签:车、T角点,L角点''' + '''输入:落水人员的结果(类别+坐标)、原图 + + 过程:将车辆识别框外扩,并按contours形成区域。 + T角点与L角点的坐标合并为列表。 + 判断每个车辆contours区域内有几个角点,少于2个则判断违停。 + 返回:最终违停车辆标记结果图、违停车辆信息(坐标、类别、置信度)。 + ''' + #输入的是[cls,x0,y0,x1,y1,score]---> [x0,y0,x1,y1,cls,score] + #输出的也是[cls,x0,y0,x1,y1,score] + #pred = [ [ int(x[4]) ,*x[1:5], x[5] ] for x in pred] + + #pred = [[ *x[1:5],x[0], x[5] ] for x in pred] + pred = [[ *x[0:4],x[5], x[4] ] for x in pred] + + ##统一格式 + imgSize=pars['imgSize'] + '''1、pred中车辆识别框形成列表,T角点与L角点形成列表''' + tW1=time.time() + init_vehicle=[] + init_corner = [] + + for i in range(len(pred)): + #if pred[i][4]=='TCorner' or pred[i][4]=='LCorner': #vehicle、TCorner、LCorner + if pred[i][4]==1 or pred[i][4]==2: #vehicle、TCorner、LCorner + init_corner.append(pred[i]) + else: + init_vehicle.append(pred[i]) + + + '''2、init_corner中心点坐标计算,并形成列表。''' + tW2 = time.time() + center_corner=[] + for i in range(len(init_corner)): + center_corner.append(center_coordinate(init_corner[i])) + + + '''3、遍历每个车辆识别框,扩充矩形区域,将矩形区域形成contours,判断扩充区域内的。''' + tW3 = time.time() + final_weiting=[] #违停车辆列表 + '''遍历车辆列表,扩大矩形框形成contours''' + for i in range(len(init_vehicle)): + boundbxs1=[init_vehicle[i][0],init_vehicle[i][1],init_vehicle[i][2],init_vehicle[i][3]] + width_boundingbox=init_vehicle[i][2]-init_vehicle[i][0] #框宽度 + height_boundingbox=init_vehicle[i][2] - init_vehicle[i][0] #框长度 + #当框长大于宽,则是水平方向车辆;否则认为是竖向车辆 + if width_boundingbox>=height_boundingbox: + ex_width=0.4*(init_vehicle[i][2]-init_vehicle[i][0]) #矩形扩充宽度,取车宽0.4倍 #膨胀系数小一些。角点设成1个。 + ex_height=0.2*(init_vehicle[i][2]-init_vehicle[i][0]) #矩形扩充宽度,取车长0.2倍 + boundbxs1 = expand_rectangle(boundbxs1, imgSize, ex_width, ex_height) # 扩充后矩形对角坐标 + else: + ex_width=0.2*(init_vehicle[i][2]-init_vehicle[i][0]) #竖向,不需要改变变量名称,将系数对换下就行。(坐标点顺序还是1234不变) + ex_height=0.4*(init_vehicle[i][2]-init_vehicle[i][0]) # + boundbxs1 = expand_rectangle(boundbxs1, imgSize, ex_width, ex_height) # 扩充后矩形对角坐标 + contour_temp=fourcorner_coordinate(boundbxs1) #得到扩充后矩形框的contour + contour_temp_=np.array(contour_temp)#contour转为array + contour_temp_=np.float32(contour_temp_) + + '''遍历角点识别框中心坐标是否在contours内,在则计1''' + zzz=0 + for j in range(len(center_corner)): + flag = cv2.pointPolygonTest(contour_temp_, (center_corner[j][0], center_corner[j][1]), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==+1: + zzz+=1 + '''contours框内小于等于1个角点,认为不在停车位内''' + # if zzz<=1: + if zzz<1: + final_weiting.append(init_vehicle[i]) + #print('t7-t6',t7-t6) + #print('final_weiting',final_weiting) + + '''4、绘制保存检违停车辆图像''' + + tW4=time.time() + ''' + colors = Colors() + if final_weiting is not None: + for i in range(len(final_weiting)): + lbl='illegal park' + xyxy=[final_weiting[i][0],final_weiting[i][1],final_weiting[i][2],final_weiting[i][3]] + c = int(5) + plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3) + final_img=_img_cv + ''' + tW5=time.time() + # cv2.imwrite('final_result.png', _img_cv) + + + timeStr = ' step1:%s step2:%s step3:%s save:%s'%( ms(tW2,tW1), ms(tW3,tW2),ms(tW4,tW3), ms(tW5,tW4) ) + + #final_weiting-----[x0,y0,x1,y1,cls,score] + #输出的也是outRe----[cls,x0,y0,x1,y1,score] + + #outRes = [ [ 3 ,*x[0:4], x[5] ] for x in final_weiting]###违停用3表示 + + outRes = [ [ *x[0:4], x[5],3 ] for x in final_weiting]###违停用3表示 + + return outRes,timeStr #返回最终绘制的结果图、违停车辆(坐标、类别、置信度) + +def illParking_postprocess_N(predList,pars): + pred=predList[0] + cvMask=None + return illParking_postprocess(pred,cvMask,pars) +def AI_process(model, args1,path1): + '''对原图进行目标检测''' + '''输入:检测模型、配置参数、路径 + 返回:返回目标检测结果、原图像, + ''' + '''检测图片''' + t3=time.time() + _img_cv = cv2.imread(path1) # 将这里的送入yolov5 + t4 = time.time() + pred = model.detect(_img_cv) # 检测结果 + t5 = time.time() + #print('t5-t4', t5-t4) + #print('t4-t3', t4-t3) + return pred, _img_cv #返回目标检测结果、原图像 + +def main(): + + '''配置参数''' + + args1={'cuda':'0','input_dir':'input_dir','output_dir':'output_dir'} + + dete_weights='weights/weiting20230727.pt' + '''分割模型权重路径''' + + '''初始化目标检测模型''' + model = Detector(dete_weights) + + names=['vehicle', 'TCorner', 'LCorner'] + t1=time.time() + '''图像测试''' + folders = os.listdir(args1['input_dir']) + for i in range(len(folders)): + path1 = args1['input_dir'] + '/' + folders[i] + print('-'*100,path1) + '''对原图进行目标检测''' + pred, _img_cv=AI_process(model, args1,path1) + H,W = _img_cv.shape[0:2] + imgSize = (W,H);pars={'imgSize':imgSize} + #preds = [[ names.index(x[4]),*x[0:4], float(x[5].cpu()) ] for x in pred[1]] + preds = [[ *x[0:4], names.index(x[4]),float(x[5].cpu()) ] for x in pred[1]] + # print('pred', pred) + final_weiting,timeStr = illParking_postprocess(preds,None,pars) + + '''进入后处理,判断是否有违章停车''' + #final_img,final_weiting=AI_postprocess(pred, _img_cv) + #cv2.imwrite('./outdir/final_result'+str(i)+'.png', final_img) + + t2=time.time() + print('耗时',t2-t1) + +if __name__ == "__main__": + main() + diff --git a/utilsK/jkmUtils.py b/utilsK/jkmUtils.py new file mode 100644 index 0000000..276d309 --- /dev/null +++ b/utilsK/jkmUtils.py @@ -0,0 +1,705 @@ +# YOLOv5 general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import torch +import torchvision +import yaml + + + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") + print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path +def numpy_detect(x,nc=None): + # anchor_grid为先验眶 + anchor_grid = [10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0] + anchor_grid = np.array(anchor_grid).reshape(3,1,-1,1,1,2) + stride = np.array([8, 16, 32]) + grid = [make_grid(80,80), make_grid(40,40), make_grid(20,20)] + + z = [] + + for i in range(3): + y = numpy_sigmoid(x[i]) + + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + grid[i]) * stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * anchor_grid[i] # wh + + z.append(y.reshape(1, -1, nc + 5)) + + res = np.concatenate(z, 1) + + return res + +def numpy_sigmoid(x): + return 1/(1+np.exp(-x)) + +def make_grid(nx=20,ny=20): + xv,yv = np.meshgrid(np.arange(nx), np.arange(ny)) + res = np.stack((xv,yv), 2).reshape(1,1,nx,ny,2).astype(np.float32) + return res + +def img_pad(img, size, pad_value=[114,114,114]): + H,W,_ = img.shape + r = max(H/size[0], W/size[1]) + img_r = cv2.resize(img, (int(W/r), int(H/r))) + tb = size[0] - img_r.shape[0] + lr = size[1] - img_r.shape[1] + top = int(tb/2) + bottom = tb - top + left = int(lr/2) + right = lr - left + pad_image = cv2.copyMakeBorder(img_r, top, bottom, left, right, cv2.BORDER_CONSTANT,value=pad_value) + return pad_image,(top, left,r) +def scale_back(boxes,padInfos): + top, left,r = padInfos[0:3] + + boxes[:,0] = (boxes[:,0] - left) * r + + boxes[:,2] = (boxes[:,2] - left) * r + boxes[:,1] = (boxes[:,1] - top) * r + boxes[:,3] = (boxes[:,3] - top) * r + return boxes + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 +def pre_process(im0,device): + img, padInfos = img_pad(im0, size=(640,640,3)) + img = img[:, :, ::-1].transpose(2, 0, 1) + img = np.ascontiguousarray(img, dtype=np.float32) + img = torch.from_numpy(img).to(device) + img /= 255.0 + t3=time.time() + if img.ndimension() == 3: + img = img.unsqueeze(0) + return img ,padInfos + + +def post_process(pred,padInfos,device,conf_thres=0.4, iou_thres=0.45,nc=9): + pred = [x.data.cpu().numpy() for x in pred] + pred = numpy_detect(pred, nc) + pred = torch.tensor(pred).to(device) + pred = non_max_suppression(pred, conf_thres, iou_thres) + + for i, det in enumerate(pred): # only one image + if len(det): + det[:, :4] = scale_back( det[:, :4],padInfos) + print() + else: + det = [] + if len(det)!=0: + det=det.cpu().numpy() + else: det=[] + return det +def get_return_data(img,boxes,modelType='code',plate_dilate=(0.1,0.1)): + #name ['greeCode', 'yellowCode', 'redCode', 'hsImage', 'NameImage', 'word'] + #"greeCode","yellowCode","redCode","hsImage","hs48Image","NameImage","phoneNumberImage","word" , "phone" + # 0 , 1 , 2 , 3 , 4 , 5 , 6, 7 , 8 + ##type: 0--车牌,1--健康码或者行程卡,2--其它 + results=[] + h,w,c = img.shape + fx = float(900.0/h) + if modelType !='plate': + etc={'type':0,'color':'green','nameImage':'','phoneNumberImage':'','cityImage':'','hsImage':'','plateImage':''} + cols=[];cols_scores=[] + score_hsImage= 0.0 + for box in boxes: + pts=box[0:4]; pts=[int(x) for x in pts]; x0,y0,x1,y1=pts[0:4] + x0=max(0,x0);y0=max(0,y0);x1=min(x1,w);y1=min(y1,h) + typee=int(box[5]);score=str(box[4]) + if typee==7 or typee == 8: + etc['type']=1 #检测到城市,或者行程卡上的手机号 ,为行程卡 ,##苏康码也定义为1 + if typee==9: + etc['type']=2 #检测到苏康码字图像,则为苏康码 + if typee in [3,4,5,6,7,8]: + image_corp = cv2.resize(img[y0:y1,x0:x1],None,fx=fx,fy=fx) + if typee==3 or typee==4: + if score_hsImage < float(score): + etc['hsImage']= [image_corp,score] ; score_hsImage = float(score) + if typee==5: etc['nameImage']= [image_corp,score] + if typee==6: etc['phoneNumberImage']= [image_corp,score] + if typee==7: etc['cityImage']= [image_corp,score] + if typee==8: etc['phoneNumberImage']= [image_corp,score] + + if typee in [0,1,2]: + if typee==0: cols.append('green');cols_scores.append(float(score)) + if typee==1: cols.append('yellow');cols_scores.append(float(score)) + if typee==2: cols.append('red');cols_scores.append(float(score)) + + if len(cols)>0: + maxid=cols_scores.index(max(cols_scores)) + etc['color']=cols[maxid] + else: + etc={'type':0,'plateImage':'','color':'green'} + score_list=[ float(b[4]) for b in boxes ]; + if len(score_list)>0: + maxid =score_list.index(max(score_list)) + box= boxes[maxid] + pts=box[0:4]; pts=[int(x) for x in pts]; x0,y0,x1,y1=pts[0:4] + per=plate_dilate + x_delta = int((x1-x0)*per[0]/2); y_delta = int((y1-y0)*per[1]/2); + #print( 'x0,y0,x1,y1,x_delta,y_delta:' , x0,y0,x1,y1,x_delta,y_delta,' h, w',h,w) + x0 = max(x0- x_delta,0); x1 = min(x1+x_delta,w) + y0 = max(y0-y_delta,0); y1 = min(y1+y_delta,h) + #print( 'x0,y0,x1,y1,x_delta,y_delta:' , x0,y0,x1,y1,x_delta,y_delta,' h, w',h,w) + image_corp = cv2.resize(img[y0:y1,x0:x1],None,fx=fx,fy=fx) + etc['plateImage']= [image_corp,str(max(score_list))] + etc['type'] = 3 + results.append(etc) + return etc diff --git a/utilsK/masterUtils.py b/utilsK/masterUtils.py new file mode 100644 index 0000000..236c64d --- /dev/null +++ b/utilsK/masterUtils.py @@ -0,0 +1,303 @@ +from kafka import KafkaProducer, KafkaConsumer,TopicPartition +from kafka.errors import kafka_errors +import os,cv2,sys,json,time +import numpy as np +import requests +def query_channel_status(channelIndex): + channel_query_api='https://streaming.t-aaron.com/livechannel/getLiveStatus/%s'%(channelIndex) + #https://streaming.t-aaron.com/livechannel/getLiveStatus/LC001 + try: + res = requests.get(channel_query_api,timeout=10).json() + if res['data']['status']==2:#1空闲中 2使用中 3停用 4待关闭 + taskEnd=False + else: + taskEnd=True + infos='channel_query_api connected' + except Exception as e: + taskEnd=True + infos='channel_query_api not connected:%s'%(e) + return infos, taskEnd + +def query_request_status(request_url): + #channel_query_api='https://streaming.t-aaron.com/livechannel/getLiveStatus/%s'%(channelIndex) + channel_request_api=request_url + + try: + res = requests.get(channel_request_api,timeout=10).json() + if res['data']['status']==5:#5:执行中 10:待停止分析 15:执行结束 + taskEnd=False + else: + taskEnd=True + infos='channel_request_api connected' + except Exception as e: + taskEnd=True + infos='channel_request_api not connected:%s'%(e) + return infos, taskEnd + +def get_needed_objectsIndex(object_config): + needed_objectsIndex=[] + + for model in object_config: + try: + needed_objectsIndex.append(int(model['id'])) + except Exception as e: + a=1 + allowedList_str=[str(x) for x in needed_objectsIndex] + allowedList_string=','.join(allowedList_str) + + return needed_objectsIndex , allowedList_string + + +def get_infos(taskId, msgId,msg_h,key_str='waiting stream or video, send heartbeat'): + outStrList={} + outStrList['success']= '%s, taskId:%s msgId:%s send:%s'%(key_str,taskId, msgId,msg_h); + outStrList['failure']='kafka ERROR, %s'%(key_str) + outStrList['Refailure']='kafka Re-send ERROR ,%s'%(key_str) + return outStrList +def writeTxtEndFlag(outImaDir,streamName,imageTxtFile,endFlag='结束'): +#time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + EndUrl='%s/%s_frame-9999-9999_type-%s_9999999999999999_s-%s_AI.jpg'%(outImaDir,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),endFlag,streamName) + EndUrl = EndUrl.replace(' ','-').replace(':','-') + img_end=np.zeros((100,100),dtype=np.uint8);cv2.imwrite(EndUrl,img_end) + if imageTxtFile: + EndUrl_txt = EndUrl.replace('.jpg','.txt') + fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close() + + EndUrl='%s/%s_frame-9999-9999_type-%s_9999999999999999_s-%s_OR.jpg'%(outImaDir,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),endFlag,streamName) + EndUrl = EndUrl.replace(' ','-').replace(':','-') + ret = cv2.imwrite(EndUrl,img_end) + if imageTxtFile: + EndUrl_txt = EndUrl.replace('.jpg','.txt') + fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close() +def get_current_time(): + """[summary] 获取当前时间 + + [description] 用time.localtime()+time.strftime()实现 + :returns: [description] 返回str类型 + """ + ct = time.time() + local_time = time.localtime(ct) + data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time) + data_secs = (ct - int(ct)) * 1000 + time_stamp = "%s.%03d" % (data_head, data_secs) + return time_stamp + + + +def send_kafka(producer,par,msg,outStrList,fp_log,logger,line='000',thread='detector',printFlag=False ): + future = producer.send(par['topic'], msg) + try: + record_metadata = future.get() + outstr=outStrList['success'] + + #outstr=wrtiteLog(fp_log,outstr);print( outstr); + writeELK_log(outstr,fp_log,level='INFO',thread=thread,line=line,logger=logger,printFlag=printFlag) + + except Exception as e: + outstr='%s , warning: %s'%( outStrList['failure'],str(e)) + writeELK_log(outstr,fp_log,level='WARNING',thread=thread,line=line,logger=logger,printFlag=printFlag) + try: + producer.close() + producer = KafkaProducer(bootstrap_servers=par['server'], value_serializer=lambda v: v.encode('utf-8')).get() + future = producer.send(par['topic'], msg).get() + except Exception as e: + outstr='%s, error: %s'%( outStrList['Refailure'],str(e)) + #outstr=wrtiteLog(fp_log,outstr);print( outstr); + writeELK_log(outstr,fp_log,level='ERROR',thread=thread,line=line,logger=logger,printFlag=printFlag) + +def check_time_interval(time0_beg,time_interval): + time_2 = time.time() + if time_2 - time0_beg>time_interval: + return time_2,True + else: + return time0_beg,False +def addTime(strs): + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + + outstr='\n %s %s '%(timestr,strs) + return + + +def get_file(): + print("文件名 :",__file__,sys._getframe().f_lineno) + print("函数名: ", sys._getframe().f_code.co_name) + print("模块名: ", sys._getframe().f_back.f_code.co_name) + +def writeELK_log(msg,fp,level='INFO',thread='detector',logger='kafka_yolov5',line=9999,newLine=False,printFlag=True): + #timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + timestr=get_current_time() + outstr='%s [%s][%s][%d][%s]- %s'%(timestr,level,thread,line,logger,msg) + + if newLine: + outstr = '\n'+outstr + + fp.write(outstr+'\n') + fp.flush() + if printFlag: + print(outstr) + return outstr + + +def wrtiteLog(fp,strs,newLine=False): + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + if newLine: + outstr='\n %s %s '%(timestr,strs) + else: + outstr='%s %s '%(timestr,strs) + fp.write(outstr+'\n') + fp.flush() + return outstr + +def create_logFile(logdir='logdir',name=None): + if name: + logname =logdir+'/'+ name + else: + logname =logdir+'/'+ time.strftime("%Y-%m-%d.txt", time.localtime()) + if os.path.exists(logname): + fp_log = open(logname,'a+') + else: + fp_log = open(logname,'w') + return fp_log +def get_boradcast_address(outResource): + #rtmp://live.push.t-aaron.com/live/THSB,阿里云,1945 + #rtmp://demopush.yunhengzhizao.cn/live/THSB,腾讯云,1935 + if '1945' in outResource: + return 'rtmp://live.play.t-aaron.com/live/THSB' + else: + return 'rtmp://demoplay.yunhengzhizao.cn/live/THSB_HD5M' +def save_message(kafka_dir,msg): + outtxt=os.path.join(kafka_dir,msg['request_id']+'.json') + assert os.path.exists(kafka_dir) + with open(outtxt,'w') as fp: + json.dump(msg,fp,ensure_ascii=False) + + + +def get_push_address(outResource): + #rtmp://live.push.t-aaron.com/live/THSB,阿里云,1945 + #rtmp://demopush.yunhengzhizao.cn/live/THSB,腾讯云,1935 + #终端推流地址:rtmp://live.push.t-aaron.com/live/THSAa + #终端拉流地址:rtmp://live.play.t-aaron.com/live/THSAa_hd + #AI推流地址:rtmp://live.push.t-aaron.com/live/THSBa + #AI拉流地址:rtmp://live.play.t-aaron.com/live/THSBa_hd + + if 't-aaron' in outResource: + if 'THSBa' in outResource: port=1975 + elif 'THSBb' in outResource: port=1991 + elif 'THSBc' in outResource: port=1992 + elif 'THSBd' in outResource: port=1993 + elif 'THSBe' in outResource: port=1994 + elif 'THSBf' in outResource: port=1995 + elif 'THSBg' in outResource: port=1996 + elif 'THSBh' in outResource: port=1997 + else: port=1945 + else: port=1935 + return 'rtmp://127.0.0.1:%d/live/test'%(port) + return outResource +def getAllRecord_poll(consumer): + msgs = consumer.poll(5000) + keys=msgs.keys() + out = [ msgs[x] for x in keys] + out = [y for x in out for y in x] + + + for key in keys: + out.extend(msgs[key]) + return out +def getAllRecords(consumer,topics): + leftCnt = 0 + for topic in topics[0:2]: + leftCnt+=get_left_cnt(consumer,topic) + out = [] + if leftCnt == 0: + return [] + for ii,msg in enumerate(consumer): + consumer.commit() + out.append(msg) + if ii== (leftCnt-1): + break###断流或者到终点 + return out + +def get_left_cnt(consumer,topic): + partitions = [TopicPartition(topic, p) for p in consumer.partitions_for_topic(topic)] + + # total + toff = consumer.end_offsets(partitions) + toff = [(key.partition, toff[key]) for key in toff.keys()] + toff.sort() + + # current + coff = [(x.partition, consumer.committed(x)) for x in partitions] + coff.sort() + + # cal sum and left + toff_sum = sum([x[1] for x in toff]) + cur_sum = sum([x[1] for x in coff if x[1] is not None]) + left_sum = toff_sum - cur_sum + + return left_sum +def view_bar(num, total,time1,prefix='prefix'): + rate = num / total + time_n=time.time() + rate_num = int(rate * 30) + rate_nums = np.round(rate * 100) + r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 ) + sys.stdout.write(r) + sys.stdout.flush() +def get_total_cnt(inSource): + cap=cv2.VideoCapture(inSource) + assert cap.isOpened() + cnt=cap.get(7) + fps = cap.get(cv2.CAP_PROP_FPS) + cap.release() + return cnt,fps +def check_stream(inSource,producer,par,msg,outStrList ,fp_log,logger,line='000',thread='detector',timeMs=120,): + cnt =(timeMs-1)//10 + 1 + Stream_ok=False + + for icap in range(cnt): + cap=cv2.VideoCapture(inSource) + + if cap.isOpened() and get_fps_rtmp(inSource,video=False)[0] : + Stream_ok=True ;cap.release();break; + #Stream_ok,_= get_fps_rtmp(inSource,video=False) + #if Stream_ok:cap.release();break; + else: + Stream_ok=False + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + outstr='Waiting stream %d s'%(10*icap) + writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=line,logger=logger) + time.sleep(10) + if icap%3==0: + send_kafka(producer,par,msg,outStrList,fp_log,logger=logger,line=line,thread=thread ) + + + return Stream_ok + + + + +def get_fps_rtmp(inSource,video=False): + cap=cv2.VideoCapture(inSource) + if not cap.isOpened(): + print('#####error url:',inSource) + return False,[0,0,0,0] + + fps = cap.get(cv2.CAP_PROP_FPS) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH ) + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + cnt = 0 + if video: cnt=cap.get(7) + + if width*height==0 or fps>30: + return False,[0,0,0,0] + cap.release() + try: + outx = [fps,width,height,cnt] + outx = [int(x+0.5) for x in outx] + + return True,outx + except: + return False, [0,0,0,0] + + diff --git a/utilsK/modelEval.py b/utilsK/modelEval.py new file mode 100644 index 0000000..7fb7c1f --- /dev/null +++ b/utilsK/modelEval.py @@ -0,0 +1,110 @@ +import sys +sys.path.extend(['/home/thsw2/WJ/src/yolov5']) +import utils,json,time,torch +import numpy as np +from segutils.segmodel import SegModel,get_largest_contours +from models.experimental import attempt_load +from utils.torch_utils import select_device, load_classifier, time_synchronized +import subprocess as sp +import cv2 +from utils.datasets import LoadStreams, LoadImages +from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str +def get_total_cnt(inSource): + cap=cv2.VideoCapture(inSource) + cnt=cap.get(7) + cap.release() + return cnt +def onlineModelProcess(parIn ): + streamName = parIn['streamName'] + childCallback=parIn['callback'] + try: + + inSource,outSource=parIn['inSource'],parIn['outSource'] + weights='../yolov5/weights/1230_last.pt' + device = select_device('0') + half = device.type != 'cpu' # half precision only supported on CUDA + model = attempt_load(weights, map_location=device) # load FP32 model + if half: model.half() + seg_nclass = 2 + weights = '../yolov5/weights/segmentation/BiSeNet/checkpoint.pth' + segmodel = SegModel(nclass=seg_nclass,weights=weights,device=device) + jsonfile='../yolov5/config/queRiver.json' + with open(jsonfile,'r') as fp: + parAll = json.load(fp) + + + resource=parAll['prep_process']['source'] + if outSource: + command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24', + '-s', "{}x{}".format(parAll["push_process"]['OutVideoW'],parAll["push_process"]['OutVideoH']),# 图片分辨率 + '-r', str(30),# 视频帧率 + '-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p', + '-f', 'flv',outSource + ] + txtname='mintors/%s.txt'%( time.strftime("%Y-%m-%d", time.localtime()) ) + fp_out = open( txtname,'a+' ) + outstr='%s stream:%s starts \n'%( time_str(),parAll['push_process']['rtmpUrl']) + fp_out.write(outstr);fp_out.flush() + + + + + # 管道配置,其中用到管道 + if outSource: + ppipe = sp.Popen(command, stdin=sp.PIPE) + + ##后处理参数 + par=parAll['post_process'] + conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] + labelnames=par['labelnames'] + rainbows=par['rainbows'] + fpsample = par['fpsample'] + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) + + dataset = LoadStreams(inSource, img_size=640, stride=32) + if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')): + totalcnt=get_total_cnt(inSource) + childCallback.send('####model load success####') + iframe = 0;post_results=[];time_beg=time.time() + print('###line71 modelEval.py',totalcnt,len(dataset), inSource) + for path, img, im0s, vid_cap in dataset: + print(path) + if not path:childCallback.send('####strem ends####'); break###断流或者到终点 + if not outSource:###如果不推流,则显示进度条 + view_bar(iframe,totalcnt,time_beg ) + time0=time.time() + iframe +=1 + time1=time.time() + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + time2 = time.time() + pred = model(img,augment=False)[0] + time3 = time.time() + seg_pred,segstr = segmodel.eval(im0s[0] ) + + time4 = time.time() + datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe] + + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) + ##每隔 fpsample帧处理一次,如果有问题就保存图片 + if (iframe % fpsample == 0) and (len(post_results)>0) : + parImage=save_problem_images(post_results,iframe,names,streamName=streamName) + + #parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid + #parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); + #parOut['time_str'] = time_str;parOut['type'] = names[cls_max] + + + + post_results=[] + + if len(p_result[2] )>0: ## + post_results.append(p_result) + + image_array = p_result[1] + if outSource: + ppipe.stdin.write(image_array.tostring()) + except Exception as e: + childCallback.send(e) #将异常通过管道送出 diff --git a/utilsK/noParkingUtils.py b/utilsK/noParkingUtils.py new file mode 100644 index 0000000..e2415e3 --- /dev/null +++ b/utilsK/noParkingUtils.py @@ -0,0 +1,464 @@ +# 耗时最短代码 +import numpy as np +import cv2,time,math +import matplotlib.pyplot as plt + + +def get_ms(time2, time1): + return (time2-time1)*1000.0 + + +# 计算一点到二次函数曲线的距离,二次函数的表达式为x = a*(y**2) + b*y + c +def point2QF(a, b, c, y, x): # 坐标点(y, x) + distance = abs(x - a*(y**2) - b*y - c) / math.sqrt(1 + ((2*a*y + b)**2)) + return distance + + +# 存储所有speedRoad的contours +def storageRoad(contours, pars): + allRoadCnt = [] # 存储所有speedRoad的contours + for cnt in contours: # 道路 + if len(cnt) >= 6: + rect = cv2.minAreaRect(cnt) + if rect[1][0] * rect[1][1] > pars['RoadArea']: # 过滤掉面积小于阈值的speedRoad + allRoadCnt.append(cnt) + return allRoadCnt + + +# 返回符合标准的lane的个数及contours +def storageLane(contours, pars): + """ + contours:lane分割后的原始contours + newLaneContours:符合标准的lane的contours + laneNumber:符合标准的lane的个数 + 符合标准的lane定义如下: + (1)contours中的坐标点个数不小于6 + (2)lane最小外接矩形的面积大于阈值laneArea + (3)lane最小外接矩形的最短边与最长边的比值小于等于阈值roundness + """ + laneNumber = 0 + newLaneContours = () + for cnt in contours: + if len(cnt) >= 6: + rect = cv2.minAreaRect(cnt) + if rect[1][0] * rect[1][1] > pars['laneArea'] and min(rect[1]) / max(rect[1]) <= pars['roundness']: + laneNumber += 1 + newLaneContours = newLaneContours + (cnt, ) + return laneNumber, newLaneContours + + +# 将contours中顶点数大于等于6的车辆信息(合格vehicle)和顶点数小于6的车辆信息(不合格vehicle)分别保存起来 +def vehicleDivide(contours, vehicleBD, normVehicle, dets, count, i, unnormVehicle, normVehicleCOOR, centerCOOR): + if len(contours) >= 6: + vehicleBD.append(contours) + normVehicle.append(dets[count]) + normVehicleCOOR.append(centerCOOR) + else: + dets[int(i / 2)].append(0) + dets[int(i / 2)].append(0) + unnormVehicle.append(dets[int(i / 2)]) + return vehicleBD, normVehicle, unnormVehicle, normVehicleCOOR + + +# 存储所有vehicle的信息 +def storageVehicle(pars, imgVehicle, dets): + """ + 输入 + pars:字典名 + imgVehicle:分割图,只包含vehicle和背景 + dets:是一个list,其中存储检测得到的各vehicle的信息,即[[x0, y0, x1, y1, 车辆得分, cls], ...] + 输出 + dets:存储合格vehicle的信息,即[x0, y0, x1, y1, 车辆得分, cls] + vehicleBD:存储合格vehicle的contours + unnormVehicle:存储不合格vehicle的信息,即[x0, y0, x1, y1, 车辆得分, cls] + normVehicleCOOR:存储合格vehicle的中心点坐标 + 说明 + 合格vehicle:contours中的顶点数大于等于6 + 不合格vehicle:contours中的顶点数小于6 + """ + vehicleBD = [] # 存储一副图像中vehicles的contours + normVehicle = [] # 将合格vehicle的信息存储在normVehicle中 + unnormVehicle = [] # 将不合格vehicle的信息存储在unnormVehicle中 + normVehicleCOOR = [] # 存储合格vehicle的中心点坐标 + img = cv2.cvtColor(imgVehicle, cv2.COLOR_BGR2GRAY) + count = 0 + for i in range(0, len(pars['vehicleCOOR']), 2): + y1 = int(pars['vehicleCOOR'][i][1] * pars['ZoomFactor']['y']) + y2 = int(pars['vehicleCOOR'][i + 1][1] * pars['ZoomFactor']['y']) + x1 = int(pars['vehicleCOOR'][i][0] * pars['ZoomFactor']['x']) + x2 = int(pars['vehicleCOOR'][i + 1][0] * pars['ZoomFactor']['x']) + if y1 >= 2: + y1 = y1 - 2 + if y2 <= (pars['modelSize'][1] - 2): + y2 = y2 + 2 + if x1 >= 2: + x1 = x1 - 2 + if x2 <= (pars['modelSize'][0] - 2): + x2 = x2 + 2 + centerCOOR = (int((x1 + x2) / 2), int((y1 + y2) / 2)) + img1 = img[y1:y2, x1:x2] + up = np.zeros((20, (x2 - x1)), dtype='uint8') + left = np.zeros(((40 + y2 - y1), 20), dtype='uint8') + img1 = np.concatenate((up, img1), axis=0) + img1 = np.concatenate((img1, up), axis=0) + img1 = np.concatenate((left, img1), axis=1) + img2 = np.concatenate((img1, left), axis=1) + contours2, hierarchy = cv2.findContours(img2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + if len(contours2) != 0: + if len(contours2) > 1: + vehicleArea = [] # 存储vehicle的最小外接矩形的面积 + for j in range(len(contours2)): + rect = cv2.minAreaRect(contours2[j]) + vehicleArea.append(rect[1][0] * rect[1][1]) + maxAreaIndex = vehicleArea.index(max(vehicleArea)) + maxAreaContours = contours2[maxAreaIndex] + vehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = vehicleDivide(maxAreaContours, vehicleBD, normVehicle, dets, count, i, unnormVehicle, normVehicleCOOR, centerCOOR) + elif len(contours2) == 1: + vehicleBD, normVehicle, unnormVehicle, normVehicleCOOR = vehicleDivide(contours2[0], vehicleBD, normVehicle, dets, count, i, unnormVehicle, normVehicleCOOR, centerCOOR) + else: + dets[int(i / 2)].append(0) + dets[int(i / 2)].append(0) + unnormVehicle.append(dets[int(i / 2)]) + count += 1 + dets = normVehicle + return dets, vehicleBD, unnormVehicle, normVehicleCOOR + + +# 计算违停得分 +def IllegalParkScore1(vehicleBD, allRoadCnt, dets, unnormVehicle, normVehicleCOOR, a_l, b_l, c_l, a_r, b_r, c_r): + """ + 对vehicle是否在speedRoad上进行判断,并计算违章得分 + 输出targetList 其格式为:[[cls, x0, y0, x1, y1, score, 违章得分, 违章类别], ...] + """ + if len(vehicleBD) != 0: + for i in range(len(vehicleBD)): + rect = cv2.minAreaRect(vehicleBD[i]) + center = normVehicleCOOR[i] # vehicle的中心点坐标 + if len(allRoadCnt) != 0: # 当车道线个数至少有两条时,才计算违章得分 + for j in range(len(allRoadCnt)): + # 判断车辆矩形框的中心点坐标是否在道路矩形框的范围内 + flag = cv2.pointPolygonTest(allRoadCnt[j], center, False) + if flag >= 0: + dets[i].append(0) # 给违章得分占位 + dets[i].append(0) # 给违章类别占位 + if center[0] < predict(a_l, b_l, c_l, center[1]): + distance = point2QF(a_l, b_l, c_l, center[1], center[0]) + if distance >= min(rect[1]) / 2: + dets[i][6], dets[i][7] = 1, 1 + else: + dets[i][6], dets[i][7] = distance / (min(rect[1]) / 2), 1 + elif center[0] > predict(a_r, b_r, c_r, center[1]): + distance = point2QF(a_r, b_r, c_r, center[1], center[0]) + if distance >= min(rect[1]) / 2: + dets[i][6], dets[i][7] = 1, 1 + else: + dets[i][6], dets[i][7] = distance / (min(rect[1]) / 2), 1 + else: + dets[i][6], dets[i][7] = 0, 0 + break + # 如果分割图像中不存在speedRoad,则无法进行违章判定,将所有车辆的违章类别设为0,即没有违章 + if len(dets[i]) < 8: + dets[i].append(0) # 违章得分为0 + dets[i].append(0) # 0表示没有违章 + targetList = dets + if len(unnormVehicle) != 0: + for i in range(len(unnormVehicle)): + targetList.append(unnormVehicle[i]) # 将所有车辆的信息合并到一起 + else: + targetList = unnormVehicle + return targetList + + +# 计算违停得分 +def IllegalParkScore2(vehicleBD, dets, unnormVehicle): + """ + 计算违章得分 + 输出targetList 其格式为:[[cls, x0, y0, x1, y1, score, 违章得分, 违章类别], ...] + """ + if len(vehicleBD) != 0: + for i in range(len(vehicleBD)): + if len(dets[i]) < 8: + dets[i].append(0) # 违章得分为0 + dets[i].append(0) # 0表示没有违章 + targetList = dets + if len(unnormVehicle) != 0: + for i in range(len(unnormVehicle)): + targetList.append(unnormVehicle[i]) # 将所有车辆的信息合并到一起 + else: + targetList = unnormVehicle + return targetList + + +# 找最左侧lane时,将要删除的右侧lane的序号存储在delRightLane。找最右侧lane时,将要删除的左侧lane的序号存储在delLeftLane。 +def devideLane(laneInfo, i, m, delRightLane, delLeftLane, y): + index1 = np.where(laneInfo[i][3] == y) + index1 = index1[0].tolist() + index1.sort() + x_1 = laneInfo[i][5][index1[0]][0] + index2 = np.where(laneInfo[m][3] == y) + index2 = index2[0].tolist() + index2.sort() + x_2 = laneInfo[m][5][index2[0]][0] + if x_1 < x_2: + if i not in delLeftLane: + delLeftLane.append(i) # 保留右侧lane + if m not in delRightLane: + delRightLane.append(m) + else: + if m not in delLeftLane: + delLeftLane.append(m) + if i not in delRightLane: + delRightLane.append(i) + + return delRightLane, delLeftLane + + +# 确定最左侧和最右侧的lane簇 +def detLine(contours): + """ + 输入 + contours:各lane的contours + 输出 + laneInfo:存储各lane的信息,每条lane的信息为:[contours, y坐标范围, lane序号, arr_y, y坐标范围的长度, cnt] + delRightLane:在确定最左侧lane时,其存储需要删除的lane的序号 + delLeftLane:在确定最右侧lane时,其存储需要删除的lane的序号 + """ + mergList = [] + for i in range(len(contours)): + cnt = np.squeeze(contours[i], 1) + arr_y = cnt[:, 1] + arrList = list(set(arr_y)) + cnt_y = np.sort(np.array(arrList)) + mergList.append([contours[i], cnt_y, i, arr_y, len(cnt_y), cnt]) + laneInfo = sorted(mergList, key=(lambda x: x[4])) # [[contours[i], cnt_y, i, arr_y, len(cnt_y)],...] + delRightLane = [] # 求最左侧lane + delLeftLane = [] # 求最右侧lane + laneInfoNew = [] + for i in range(len(laneInfo)): + laneInfoNew.append([laneInfo[i][1][0], laneInfo[i][1][-1], i]) # [[y_min, y_max, i],...] + laneInfoNew = np.array(laneInfoNew) + new1 = laneInfoNew[:, np.newaxis, :].repeat(laneInfoNew.shape[0], 1) + new2 = laneInfoNew[np.newaxis, ...].repeat(laneInfoNew.shape[0], 0) + new3 = np.concatenate((new1, new2), axis=2) + y_i_min, y_i_max, y_m_min, y_m_max = new3[..., 0], new3[..., 1], new3[..., 3], new3[..., 4] + mask1 = (y_i_min >= y_m_min) & (y_i_min <= y_m_max) & (y_i_max > y_m_max) + mask2 = (y_i_max >= y_m_min) & (y_i_max <= y_m_max) & (y_i_min < y_m_min) + mask3 = (y_i_min >= y_m_min) & (y_i_max <= y_m_max) + mask4 = (y_i_min < y_m_min) & (y_i_max > y_m_max) + if len(np.nonzero(mask1)[0]) != 0: + mask1 = np.triu(mask1, k=1) + serial_i = new3[mask1][..., 2] + serial_m = new3[mask1][..., 5] + for k in range(len(serial_i)): + if (serial_m[k] not in delLeftLane) or (serial_m[k] not in delRightLane) or (serial_i[k] not in delLeftLane) or (serial_i[k] not in delRightLane): + delRightLane, delLeftLane = devideLane(laneInfo, serial_i[k], serial_m[k], delRightLane, delLeftLane, laneInfo[serial_i[k]][1][0]) + + if len(np.nonzero(mask2)[0]) != 0: + mask2 = np.triu(mask2, k=1) + serial_i = new3[mask2][..., 2] + serial_m = new3[mask2][..., 5] + for k in range(len(serial_i)): + if (serial_m[k] not in delLeftLane) or (serial_m[k] not in delRightLane) or (serial_i[k] not in delLeftLane) or (serial_i[k] not in delRightLane): + delRightLane, delLeftLane = devideLane(laneInfo, serial_i[k], serial_m[k], delRightLane, delLeftLane, laneInfo[serial_i[k]][1][-1]) + + if len(np.nonzero(mask3)[0]) != 0: + mask3 = np.triu(mask3, k=1) + serial_i = new3[mask3][..., 2] + serial_m = new3[mask3][..., 5] + for k in range(len(serial_i)): + if (serial_m[k] not in delLeftLane) or (serial_m[k] not in delRightLane) or (serial_i[k] not in delLeftLane) or (serial_i[k] not in delRightLane): + delRightLane, delLeftLane = devideLane(laneInfo, serial_i[k], serial_m[k], delRightLane, delLeftLane, laneInfo[serial_i[k]][1][0]) + + if len(np.nonzero(mask4)[0]) != 0: + mask4 = np.triu(mask4, k=1) + serial_i = new3[mask4][..., 2] + serial_m = new3[mask4][..., 5] + for k in range(len(serial_i)): + if (serial_m[k] not in delLeftLane) or (serial_m[k] not in delRightLane) or (serial_i[k] not in delLeftLane) or (serial_i[k] not in delRightLane): + delRightLane, delLeftLane = devideLane(laneInfo, serial_i[k], serial_m[k], delRightLane, delLeftLane, laneInfo[serial_m[k]][1][0]) + return laneInfo, delRightLane, delLeftLane + + +# 对lane中的y值坐标进行下采样 +def downSample(cnt_y): + # number = len(cnt_y) * 0.0125 + # cnt_y = np.random.choice(cnt_y, size=number, replace=False) + if len(cnt_y) >= 1000: + cnt_y = cnt_y[1::80] + elif len(cnt_y) >= 900 and len(cnt_y) < 1000: + cnt_y = cnt_y[1::75] + elif len(cnt_y) >= 800 and len(cnt_y) < 900: + cnt_y = cnt_y[1::70] + elif len(cnt_y) >= 700 and len(cnt_y) < 800: + cnt_y = cnt_y[1::65] + elif len(cnt_y) >= 600 and len(cnt_y) < 700: + cnt_y = cnt_y[1::60] + elif len(cnt_y) >= 500 and len(cnt_y) < 600: + cnt_y = cnt_y[1::55] + elif len(cnt_y) >= 400 and len(cnt_y) < 500: + cnt_y = cnt_y[1::40] + elif len(cnt_y) >= 300 and len(cnt_y) < 400: + cnt_y = cnt_y[1::45] + elif len(cnt_y) >= 200 and len(cnt_y) < 300: + cnt_y = cnt_y[1::40] + elif len(cnt_y) >= 100 and len(cnt_y) < 200: + cnt_y = cnt_y[1::35] + elif len(cnt_y) >= 50 and len(cnt_y) < 100: + cnt_y = cnt_y[1::20] + elif len(cnt_y) >= 20 and len(cnt_y) < 50: + cnt_y = cnt_y[1::6] + else: + cnt_y = cnt_y[1::5] + return cnt_y + + +# 求最左侧lane或最右侧lane中的各点坐标 +def targetCOOR(laneInfo, delLane): + """ + 输入 + laneInfo:存储各lane的信息,每条lane的信息为:[contours, y坐标范围, lane序号, arr_y, y坐标范围的长度, cnt] + delLane:在确定最左侧lane或最右侧lane时,其存储需要删除的lane的序号。 + 输出 + laneCOOR:存储最左侧或最右侧lane簇中各点的坐标 + """ + laneCOOR = [] # 存储lane中各点的坐标 + centerSort = [] # 存储各lane按照中心点的y坐标排序后的结果 + for j in range(len(laneInfo)): + if j not in delLane: + cnt = laneInfo[j][0] + rect = cv2.minAreaRect(cnt) + cnt = np.squeeze(cnt, 1) + cnt_y = laneInfo[j][1] + cnt_y = downSample(cnt_y) + centerSort.append([rect[0][1], cnt_y, laneInfo[j][3], cnt, j]) + centerSort = sorted(centerSort, key=(lambda x: x[0])) + for i in range(len(centerSort)): + centerCoordinate = [] + for j in range(len(centerSort[i][1])): + index = np.where(centerSort[i][2] == centerSort[i][1][j]) + indexList = index[0].tolist() + indexList.sort() + x = (centerSort[i][3][indexList[0]][0] + centerSort[i][3][indexList[-1]][0]) / 2 + y = (centerSort[i][3][indexList[0]][1] + centerSort[i][3][indexList[-1]][1]) / 2 + centerCoordinate.append([x, y]) + laneCOOR = laneCOOR + centerCoordinate + return laneCOOR + + +# 二次函数曲线表达式:x = a*(y**2) + b*y + c,根据图像中一点的y坐标求二次曲线中的x坐标 +def predict(a, b, c, y): + x = a * (y**2) + b * y + c + return x + + +def mixNoParking_road_postprocess(dets, mask, pars): + """ + 对于字典traffic_dict中的各个键,说明如下: + RoadArea:speedRoad的最小外接矩形的面积 + vehicleCOOR:是一个列表,用于存储被检测出的vehicle的坐标(vehicle检测模型) + roundness:圆度 ,lane的长与宽的比率,作为判定是否为车道线的标准之一 + laneArea:车道线的最小外接矩形的面积 + ZoomFactor:图像在H和W方向上的缩放因子,其值小于1 + fitOrder:多点拟合曲线的阶数 + 最终输出格式:[[x0, y0, x1, y1, 车辆得分, cls, 违章停车得分, 违章类别], ...] + 违章类别:0表示正常车辆,1表示违章车辆 + """ + det_cors = [] + for bb in dets: + det_cors.append((int(bb[0]), int(bb[1]))) + det_cors.append((int(bb[2]), int(bb[3]))) + print('###line341:', det_cors) + pars['vehicleCOOR'] = det_cors + H, W = mask.shape[0:2] # mask的分辨率为360x640 + scaleH = pars['modelSize'][1] / H # 自适应调整缩放比例 + scaleW = pars['modelSize'][0] / W + pars['ZoomFactor'] = {'x': scaleW, 'y': scaleH} + new_hw = [int(H * scaleH), int(W * scaleW)] + mask = cv2.resize(mask, (new_hw[1], new_hw[0])) + if len(mask.shape) == 3: + mask = mask[:, :, 0] + + t1 = time.time() + imgRoad = mask.copy() + imgVehicle = mask.copy() + lane_line = mask.copy() + # 将vehicle和lane过滤掉,只包含背景和speedRoad + imgRoad[imgRoad == 2] = 1 + imgRoad[imgRoad == 3] = 1 + # 将speedRoad和lane过滤掉,只保留vehicle和背景 + imgVehicle[imgVehicle != 2] = 0 + # 将speedRoad和vehicle过滤掉,只保留lane和背景 + lane_line[lane_line < 3] = 0 + imgRoad = cv2.cvtColor(np.uint8(imgRoad), cv2.COLOR_RGB2BGR) # 道路 + imgVehicle = cv2.cvtColor(np.uint8(imgVehicle), cv2.COLOR_RGB2BGR) # 车辆 + lane_line = cv2.cvtColor(np.uint8(lane_line), cv2.COLOR_RGB2BGR) + + # 对车道线进行膨胀操作 + # kernel = np.ones((3, 3), np.uint8) # 膨胀范围 + # lane_line = cv2.dilate(lane_line, kernel, iterations=2) # 迭代次数为2 + + t2 = time.time() + img1 = cv2.cvtColor(imgRoad, cv2.COLOR_BGR2GRAY) + roadContours, hierarchy = cv2.findContours(img1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + t3 = time.time() + # 存储所有speedRoad的信息 + allRoadCnt = storageRoad(roadContours, pars) + t4 = time.time() + img3 = cv2.cvtColor(lane_line, cv2.COLOR_BGR2GRAY) + laneContours, hierarchy = cv2.findContours(img3, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + # 存储所有lane的信息 + laneNumber, newLaneContours = storageLane(laneContours, pars) + t5 = time.time() + + if laneNumber >= 2: + laneInfo, delRightLane, delLeftLane = detLine(newLaneContours) + t6 = time.time() + # 存储所有vehicle的信息 + dets, vehicleBD, unnormVehicle, normVehicleCOOR = storageVehicle(pars, imgVehicle, dets) + t7 = time.time() + leftLaneCOOR = targetCOOR(laneInfo, delRightLane) + rightLaneCOOR = targetCOOR(laneInfo, delLeftLane) + rightLaneCOOR = np.array(rightLaneCOOR) + rightX = rightLaneCOOR[:, 0] + rightY = rightLaneCOOR[:, 1] + leftLaneCOOR = np.array(leftLaneCOOR) + leftX = leftLaneCOOR[:, 0] + leftY = leftLaneCOOR[:, 1] + # a_r,b_r,c_r分别是:最右侧车道线簇拟合的二次函数的二次项系数一次项系数,和常数项 + a_r, b_r, c_r = np.polyfit(rightY, rightX, pars['fitOrder'])[0], np.polyfit(rightY, rightX, pars['fitOrder'])[1], np.polyfit(rightY, rightX, pars['fitOrder'])[2] + # a_l,b_l,c_l分别是:最左侧车道线簇拟合的二次函数的二次项系数,一次项系数,和常数项 + a_l, b_l, c_l = np.polyfit(leftY, leftX, pars['fitOrder'])[0], np.polyfit(leftY, leftX, pars['fitOrder'])[1], np.polyfit(leftY, leftX, pars['fitOrder'])[2] + + # """以下四行代码用于在后处理函数外画图""" + # finalLane = [] + # abc = [a_l, b_l, c_l, a_r, b_r, c_r] # abc中存储的是最左侧和最右侧二次函数的各项系数 + # finalLane.append(rightLaneCOOR) + # finalLane.append(leftLaneCOOR) + + # 计算违停得分 + t8 = time.time() + targetList = IllegalParkScore1(vehicleBD, allRoadCnt, dets, unnormVehicle, normVehicleCOOR, a_l, b_l, c_l, a_r, b_r, c_r) + t9 = time.time() + time_infos = 'postTime:%.2f(分割时间:%.2f, findContours:%.2f, ruleJudge:%.2f, storageRoad:%.2f, detLane:%.2f, storageLane:%.2f, storageVehicle:%.2f, fitLine:%.2f, IllegalParkScore1:%.2f)' % ( + get_ms(t9, t1), get_ms(t2, t1), get_ms(t3, t2), get_ms(t9, t3), get_ms(t4, t3), get_ms(t6, t5), + get_ms(t5, t4), get_ms(t7, t6), get_ms(t8, t7), get_ms(t9, t8)) + # print('####line445:', targetList) + # return targetList, time_infos, finalLane, lane_line, abc + targetList = [ [ *b[0:4],b[6] if b[6]>0 else b[4], b[7] ] for b in targetList ] + return targetList, time_infos + + else: + dets, vehicleBD, unnormVehicle, normVehicleCOOR = storageVehicle(pars, imgVehicle, dets) + t6 = time.time() + targetList = IllegalParkScore2(vehicleBD, dets, unnormVehicle) + t7 = time.time() + time_infos = 'postTime:%.2f(分割时间:%.2f, findContours:%.2f, ruleJudge:%.2f, storageRoad:%.2f, storageLane:%.2f, storageVehicle:%.2f, IllegalParkScore2:%.2f)' % ( + get_ms(t7, t1), get_ms(t2, t1), get_ms(t3, t2), get_ms(t7, t3), get_ms(t4, t3), get_ms(t5, t4), get_ms(t6, t5), get_ms(t7, t6)) + # print('####line456:', targetList) + targetList = [ [ *b[0:4],b[6] if b[6]>0 else b[4], b[7] ] for b in targetList ] + return targetList, time_infos +def mixNoParking_road_postprocess_N(predList, pars): + dets, mask =predList[0:2] + return mixNoParking_road_postprocess(dets, mask, pars) + + \ No newline at end of file diff --git a/utilsK/pannelpostUtils.py b/utilsK/pannelpostUtils.py new file mode 100644 index 0000000..1a08eb5 --- /dev/null +++ b/utilsK/pannelpostUtils.py @@ -0,0 +1,107 @@ +import cv2 +import numpy as np +import torch +# from loguru import logger + +def pannel_post_process(preds, pars): + # pars={'solar':0} + ''' + 将光伏板上覆盖物、裂缝识别出来 + ''' + # print(preds[0]) + # logger.info('\n分类结果返回:%s'%preds) + preds = torch.tensor(preds[0]) + preds = preds.tolist() + preds = [[*sublist[:-1], int(sublist[-1])] for sublist in preds] # 类别从浮点型转为整型 + # print(preds) + # 设置空的列表 + # 1、判断类别中哪些有太阳能板?取出太阳能板检测结果,并取出覆盖物、裂缝检测结果。 + preds_solar = [] + preds_others = [] + for i in range(len(preds)): + if preds[i][5] in pars['objs']: # 识别为光伏板 + preds_solar.append(preds[i]) + else: # 识别为裂缝、覆盖物 + preds_others.append(preds[i]) + + return point_in_rectangle(preds_others, preds_solar) + + +def center_coordinate(boundbxs): + ''' + 根据检测矩形框,得到其矩形长度和宽度 + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return center_x, center_y + + +def fourcorner_coordinate(boundbxs): + ''' + 通过矩形框对角xyxy坐标,得到矩形框轮廓 + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + wid = boundbxs_x2 - boundbxs_x1 + hei = boundbxs_y2 - boundbxs_y1 + boundbxs_x3 = boundbxs_x1 + wid + boundbxs_y3 = boundbxs_y1 + boundbxs_x4 = boundbxs_x1 + boundbxs_y4 = boundbxs_y1 + hei + contours_rec = [[boundbxs_x1, boundbxs_y1], [boundbxs_x3, boundbxs_y3], [boundbxs_x2, boundbxs_y2], + [boundbxs_x4, boundbxs_y4]] + return contours_rec + + +def point_in_rectangle(preds_others, preds_solar): + ''' + 遍历所有光伏板异常目标,并输出 + ''' + if not preds_solar: + return [[],''] + + preds = [] + for i in range(len(preds_others)): + for solar in preds_solar: + solar_contour = fourcorner_coordinate(solar) + solar_contour = np.array(solar_contour, dtype=np.float32) + center_x, center_y = center_coordinate(preds_others[i]) + # print(cv2.pointPolygonTest(solar_contour, (center_x, center_y), False)) + if cv2.pointPolygonTest(solar_contour, (center_x, center_y), False) == 1: + preds.append(preds_others[i]) + + # logger.info('\n分类结果返回:%s' % preds) + return [preds,''] + + +if __name__ == "__main__": + # 对应DJI_20230306140129_0001_Z_165.jpg检测结果 + # preds=[[6.49000e+02, 2.91000e+02, 1.07900e+03, 7.33000e+02, 9.08165e-01, 3.00000e+00], + # [8.11000e+02, 2.99000e+02, 1.31200e+03, 7.65000e+02, 8.61268e-01, 3.00000e+00], + # [7.05000e+02, 1.96000e+02, 7.19000e+02, 2.62000e+02, 5.66877e-01, 0.00000e+00]] + + # 对应DJI_20230306152702_0001_Z_562.jpg检测结果 + preds = [[7.62000e+02, 7.14000e+02, 1.82800e+03, 9.51000e+02, 9.00902e-01, 3.00000e+00], + [2.00000e+01, 3.45000e+02, 1.51300e+03, 6.71000e+02, 8.81440e-01, 3.00000e+00], + [8.35000e+02, 8.16000e+02, 8.53000e+02, 8.30000e+02, 7.07651e-01, 0.00000e+00], + [1.35600e+03, 4.56000e+02, 1.42800e+03, 4.94000e+02, 6.70549e-01, 2.00000e+00]] + print('before :\n ', preds) + # preds=torch.tensor(preds) #返回的预测结果 + imgwidth = 1920 + imgheight = 1680 + pars = {'imgSize': (imgwidth, imgheight), 'wRation': 1 / 6.0, 'hRation': 1 / 6.0, 'smallId': 0, 'bigId': 3, + 'newId': 4, 'recScale': 1.2} + # 'smallId':0(国旗),'bigId':3(船只),wRation和hRation表示判断的阈值条件,newId--新目标的id + # yyy = channel2_post_process([preds], pars) # 送入后处理函数 + # + # print('after :\n ', yyy) diff --git a/utilsK/queRiver.py b/utilsK/queRiver.py new file mode 100644 index 0000000..8691093 --- /dev/null +++ b/utilsK/queRiver.py @@ -0,0 +1,513 @@ +from kafka import KafkaProducer, KafkaConsumer +from kafka.errors import kafka_errors +import traceback +import json, base64,os +import numpy as np +from multiprocessing import Process,Queue +import time,cv2,string,random +import subprocess as sp + +import matplotlib.pyplot as plt +from utils.datasets import LoadStreams, LoadImages +from models.experimental import attempt_load +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression,overlap_box_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path + +import torch,sys + +#from segutils.segmodel import SegModel,get_largest_contours +#sys.path.extend(['../yolov5/segutils']) + +from segutils.segWaterBuilding import SegModel,get_largest_contours,illBuildings + +#from segutils.core.models.bisenet import BiSeNet +from segutils.core.models.bisenet import BiSeNet_MultiOutput + +from utils.plots import plot_one_box,plot_one_box_PIL,draw_painting_joint,get_label_arrays,get_websource +from collections import Counter +#import matplotlib +import matplotlib.pyplot as plt +# get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str +#FP_DEBUG=open('debut.txt','w') +def bsJpgCode(image_ori): + jpgCode = cv2.imencode('.jpg',image_ori)[-1]###np.array,(4502009,1) + bsCode = str(base64.b64encode(jpgCode))[2:-1] ###str,长6002680 + return bsCode +def bsJpgDecode(bsCode): + bsDecode = base64.b64decode(bsCode)###types,长4502009 + npString = np.frombuffer(bsDecode,np.uint8)###np.array,(长4502009,) + jpgDecode = cv2.imdecode(npString,cv2.IMREAD_COLOR)###np.array,(3000,4000,3) + return jpgDecode +def get_ms(time0,time1): + str_time ='%.2f ms'%((time1-time0)*1000) + return str_time +rainbows=[ + (0,0,255),(0,255,0),(255,0,0),(255,0,255),(255,255,0),(255,127,0),(255,0,127), + (127,255,0),(0,255,127),(0,127,255),(127,0,255),(255,127,255),(255,255,127), + (127,255,255),(0,255,255),(255,127,255),(127,255,255), + (0,127,0),(0,0,127),(0,255,255) + ] + + +def get_labelnames(labelnames): + with open(labelnames,'r') as fp: + namesjson=json.load(fp) + names_fromfile=namesjson['labelnames'] + names = names_fromfile + return names + +def check_stream(stream): + cap = cv2.VideoCapture(stream) + if cap.isOpened(): + return True + else: + return False +##### +def drawWater(pred,image_array0,river={'color':(0,255,255),'line_width':3,'segRegionCnt':2,'segLineShow':True}):####pred是模型的输出,只有水分割的任务 + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + water = pred.copy(); water[:,:] = 0 + + if len(contours)==0: + return image_array0,water + max_ids = get_largest_contours(contours,river['segRegionCnt']); + for max_id in max_ids: + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + if river['segLineShow']: + cv2.drawContours(image_array0,contours,max_id,river['color'],river['line_width'] ) + return image_array0,water + + +def scale_back(boxes,padInfos): + top, left,r = padInfos[0:3] + + boxes[:,0] = (boxes[:,0] - left) * r + + boxes[:,2] = (boxes[:,2] - left) * r + boxes[:,1] = (boxes[:,1] - top) * r + boxes[:,3] = (boxes[:,3] - top) * r + return boxes +def img_pad(img, size, pad_value=[114,114,114]): + ###填充成固定尺寸 + H,W,_ = img.shape + r = max(H/size[0], W/size[1]) + img_r = cv2.resize(img, (int(W/r), int(H/r))) + tb = size[0] - img_r.shape[0] + lr = size[1] - img_r.shape[1] + top = int(tb/2) + bottom = tb - top + left = int(lr/2) + right = lr - left + pad_image = cv2.copyMakeBorder(img_r, top, bottom, left, right, cv2.BORDER_CONSTANT,value=pad_value) + return pad_image,(top, left,r) + +def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None): + object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt'] + ##输入dataset genereate 生成的数据,model预测的结果pred,nms参数 + ##主要操作NMS ---> 坐标转换 ---> 画图 + ##输出原图、AI处理后的图、检测结果 + time0=time.time() + path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6]; + #segmodel=True + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) + if ovlap_thres: + pred = overlap_box_suppression(pred, ovlap_thres) + time1=time.time() + i=0;det=pred[0]###一次检测一张图片 + time1_1 = time.time() + #p, s, im0 = path[i], '%g: ' % i, im0s[i].copy() + p, s, im0 = path[i], '%g: ' % i, im0s[i] + time1_2 = time.time() + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + time1_3 = time.time() + det_xywh=[]; + #im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR); + if segmodel: + if len(seg_pred)==2: + im0,water = illBuildings(seg_pred,im0) + else: + river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] } + im0,water = drawWater(seg_pred,im0,river) + time2=time.time() + #plt.imshow(im0);plt.show() + if len(det)>0: + # Rescale boxes from img_size to im0 size + if not padInfos: + det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round() + else: + #print('####line131:',det[:, :]) + det[:, :4] = scale_back( det[:, :4],padInfos).round() + #print('####line133:',det[:, :]) + #用seg模型,确定有效检测匡及河道轮廓线 + if segmodel: + cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32) + ##判断哪些目标属于岸坡的 + slope_flag = np.array([x in slopeIndex for x in cls_indexs ] ) + + det_c = det.clone(); det_c=det_c.cpu().numpy() + try: + area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] ) + except: + print('*****************************line143: error:',det_c) + water_flag = np.array(area_factors>0.1) + det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 + #对检测匡绘图 + + for *xyxy, conf, cls in reversed(det): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + cls_c = cls.cpu().numpy() + + + conf_c = conf.cpu().numpy() + tt=[ int(x.cpu()) for x in xyxy] + #line = [float(cls_c), *tt, float(conf_c)] # label format + line = [*tt, float(conf_c), float(cls_c)] # label format + det_xywh.append(line) + label = f'{names[int(cls)]} {conf:.2f}' + #print('- '*20, ' line165:',xyxy,cls,conf ) + if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示 + continue + #print('- '*20, ' line168:',xyxy,cls,conf ) + im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font) + time3=time.time() + strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) ) + return [im0s[0],im0,det_xywh,iframe],strout + + +def getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None): + object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt'] + ##输入dataset genereate 生成的数据,model预测的结果pred,nms参数 + ##主要操作NMS ---> 坐标转换 ---> 画图 + ##输出原图、AI处理后的图、检测结果 + time0=time.time() + path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6]; + #segmodel=True + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) + if ovlap_thres: + pred = overlap_box_suppression(pred, ovlap_thres) + time1=time.time() + i=0;det=pred[0]###一次检测一张图片 + time1_1 = time.time() + #p, s, im0 = path[i], '%g: ' % i, im0s[i].copy() + p, s, im0 = path[i], '%g: ' % i, im0s[i] + time1_2 = time.time() + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + time1_3 = time.time() + det_xywh=[]; + #im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR); + if segmodel: + if len(seg_pred)==2: + im0,water = illBuildings(seg_pred,im0) + else: + river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] } + im0,water = drawWater(seg_pred,im0,river) + time2=time.time() + #plt.imshow(im0);plt.show() + if len(det)>0: + # Rescale boxes from img_size to im0 size + if not padInfos: + det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round() + else: + #print('####line131:',det[:, :]) + det[:, :4] = scale_back( det[:, :4],padInfos).round() + #print('####line133:',det[:, :]) + #用seg模型,确定有效检测匡及河道轮廓线 + if segmodel: + cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32) + ##判断哪些目标属于岸坡的 + slope_flag = np.array([x in slopeIndex for x in cls_indexs ] ) + det_c = det.clone(); det_c=det_c.cpu().numpy() + try: + area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] ) + except: + print('*****************************line143: error:',det_c) + water_flag = np.array(area_factors>0.1) + det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 + #对检测匡绘图 + for *xyxy, conf, cls in reversed(det): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + cls_c = cls.cpu().numpy() + + + conf_c = conf.cpu().numpy() + tt=[ int(x.cpu()) for x in xyxy] + line = [float(cls_c), *tt, float(conf_c)] # label format + det_xywh.append(line) + label = f'{names[int(cls)]} {conf:.2f}' + if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示 + continue + + time3=time.time() + strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) ) + return [im0s[0],im0,det_xywh,iframe],strout + + +def riverDetSegMixProcess(preds,water,pars={'slopeIndex':list(range(20)),'riverIou':0.1}): + ''' + 输入参数: + preds:二维的list,之前的检测结果,格式,[cls,x0,y0,x1,y1,score] + water:二维数据,值是0,1。1--表示水域,0--表示背景。 + im0: 原始没有 + pars:出去preds,water之外的参数,dict形式 + slopeIndex:岸坡上目标类别索引 + threshold:水里的目标,与水域重合的比例阈值 + 输出参数: + det:检测结果 + ''' + assert 'slopeIndex' in pars.keys(), 'input para keys error,No: slopeIndex' + assert 'riverIou' in pars.keys(), 'input para keys error, No: riverIou' + time0 = time.time() + slopeIndex,riverIou = pars['slopeIndex'],pars['riverIou'] + if len(preds)>0: + preds = np.array(preds) + cls_indexs = [int(x[5]) for x in preds] + #area_factors= np.array([np.sum(water[int(x[2]):int(x[4]), int(x[1]):int(x[3])] )*1.0/(1.0*(x[3]-x[1])*(x[4]-x[2])+0.00001) for x in preds] ) + area_factors= np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in preds] ) + slope_flag = np.array([x in slopeIndex for x in cls_indexs ] ) + water_flag = np.array(area_factors>riverIou) + det = preds[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。 + else: det=[] + #print('##'*20,det) + time1=time.time() + timeInfos = 'all: %.1f '%( (time1-time0) ) + return det ,timeInfos +def riverDetSegMixProcess_N(predList,pars={'slopeIndex':list(range(20)),'riverIou':0.1}): + preds, water = predList[0:2] + return riverDetSegMixProcess(preds,water,pars=pars) + +def getDetectionsFromPreds(pred,img,im0,conf_thres=0.2,iou_thres=0.45,ovlap_thres=0.6,padInfos=None): + ''' + 输入参数: + preds--检测模型输出的结果 + img--输入检测模型是的图像 + im0--原始图像 + conf_thres-- 一次NMS置信度的阈值 + iou_thres-- 一次NMS Iou 的阈值 + ovlap_thres-- 二次NMS Iou 的阈值 + padInfos--resize时候的填充信息. + 输出: + img,im0--同输入 + det_xywh--二维list,存放检测结果,格式为[cls, x0,y0,x1,y1, score] + strout--时间信息 + ''' + + time0=time.time() + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) + if ovlap_thres: + pred = overlap_box_suppression(pred, ovlap_thres) + time1=time.time() + i=0;det=pred[0]###一次检测一张图片 + det_xywh=[] + if len(det)>0: + #将坐标恢复成原始尺寸的大小 + H,W = im0.shape[0:2] + det[:, :4] = scale_back( det[:, :4],padInfos).round() if padInfos else scale_coords(img.shape[2:], det[:, :4],im0.shape).round() + + #转换坐标格式,及tensor转换为cpu中的numpy格式。 + for *xyxy, conf, cls in reversed(det): + cls_c = cls.cpu().numpy() + conf_c = conf.cpu().numpy() + tt=[ int(x.cpu()) for x in xyxy] + x0,y0,x1,y1 = tt[0:4] + x0 = max(0,x0);y0 = max(0,y0); + x1 = min(W-1,x1);y1 = min(H-1,y1) + #line = [float(cls_c), *tt, float(conf_c)] # label format , + line = [ x0,y0,x1,y1, float(conf_c),float(cls_c)] # label format 2023.08.03--修改 + #print('###line305:',line) + det_xywh.append(line) + + time2=time.time() + strout='nms:%s scaleback:%s '%( get_ms(time0,time1),get_ms(time1,time2) ) + return [im0,im0,det_xywh,0],strout ###0,没有意义,只是为了和过去保持一致长度4个元素。 + + +def detectDraw(im0,dets,label_arraylist,rainbows,font): + for det in dets: + xyxy = det[1:5] + cls = det[0]; + conf = det[5] + im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font) + return im0 + + +def preprocess(par): + print('#####process:',par['name']) + ##负责读取视频,生成原图及供检测的使用图,numpy格式 + #source='rtmp://liveplay.yunhengzhizao.cn/live/demo_HD5M' + #img_size=640; stride=32 + while True: + cap = cv2.VideoCapture(par['source']) + iframe = 0 + if cap.isOpened(): + print( '#### read %s success!'%(par['source'])) + try: + dataset = LoadStreams(par['source'], img_size=640, stride=32) + for path, img, im0s, vid_cap in dataset: + datas=[path, img, im0s, vid_cap,iframe] + par['queOut'].put(datas) + iframe +=1 + except Exception as e: + print('###read error:%s '%(par['source'])) + time.sleep(10) + iframe = 0 + + else: + print('###read error:%s '%(par['source'] )) + time.sleep(10) + iframe = 0 + +def gpu_process(par): + print('#####process:',par['name']) + half=True + ##gpu运算,检测模型 + weights = par['weights'] + device = par['device'] + print('###line127:',par['device']) + model = attempt_load(par['weights'], map_location=par['device']) # load FP32 model + if half: + model.half() + + ##gpu运算,分割模型 + seg_nclass = par['seg_nclass'] + seg_weights = par['seg_weights'] + + #segmodel = SegModel(nclass=seg_nclass,weights=seg_weights,device=device) + + + nclass = [2,2] + Segmodel = BiSeNet_MultiOutput(nclass) + weights='weights/segmentation/WaterBuilding.pth' + segmodel = SegModel(model=Segmodel,nclass=nclass,weights=weights,device='cuda:0',multiOutput=True) + while True: + if not par['queIn'].empty(): + time0=time.time() + datas = par['queIn'].get() + path, img, im0s, vid_cap,iframe = datas[0:5] + time1=time.time() + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + time2 = time.time() + pred = model(img,augment=False)[0] + time3 = time.time() + seg_pred = segmodel.eval(im0s[0],outsize=None,smooth_kernel=20) + time4 = time.time() + fpStr= 'process:%s ,iframe:%d,getdata:%s,copygpu:%s,dettime:%s,segtime:%s , time:%s, queLen:%d '%( par['name'],iframe,get_ms(time0,time1) ,get_ms(time1,time2) ,get_ms(time2,time3) ,get_ms(time3,time4),get_ms(time0,time4) ,par['queIn'].qsize() ) + #FP_DEBUG.write( fpStr+'\n' ) + datasOut = [path, img, im0s, vid_cap,pred,seg_pred,iframe] + par['queOut'].put(datasOut) + if par['debug']: + print('#####process:',par['name'],' line107') + else: + time.sleep(1/300) +def get_cls(array): + dcs = Counter(array) + keys = list(dcs.keys()) + values = list(dcs.values()) + max_index = values.index(max(values)) + cls = int(keys[max_index]) + return cls +def save_problem_images(post_results,iimage_cnt,names,streamName='live-THSAHD5M',outImaDir='problems/images_tmp',imageTxtFile=False): + ## [cls, x,y,w,h, conf] + problem_image=[[] for i in range(6)] + + + dets_list = [x[2] for x in post_results] + + mean_scores=[ np.array(x)[:,5].mean() for x in dets_list ] ###mean conf + + best_index = mean_scores.index(max(mean_scores)) ##获取该批图片里,问题图片的index + best_frame = post_results[ best_index][3] ##获取绝对帧号 + img_send = post_results[best_index][1]##AI处理后的图 + img_bak = post_results[best_index][0]##原图 + cls_max = get_cls( x[5] for x in dets_list[best_index] ) + + + time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + uid=''.join(random.sample(string.ascii_letters + string.digits, 16)) + #ori_name = '2022-01-20-15-57-36_frame-368-720_type-漂浮物_qVh4zI08ZlwJN9on_s-live-THSAHD5M_OR.jpg' + #2022-01-13-15-07-57_frame-9999-9999_type-结束_9999999999999999_s-off-XJRW20220110115904_AI.jpg + outnameOR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_AI.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) + outnameAR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_OR.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) + + cv2.imwrite(outnameOR,img_send) + try: + cv2.imwrite(outnameAR,img_bak) + except: + print(outnameAR,type(img_bak),img_bak.size()) + if imageTxtFile: + outnameOR_txt = outnameOR.replace('.jpg','.txt') + fp=open(outnameOR_txt,'w');fp.write(outnameOR+'\n');fp.close() + outnameAI_txt = outnameAR.replace('.jpg','.txt') + fp=open(outnameAI_txt,'w');fp.write(outnameAR+'\n');fp.close() + + parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid + parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); + parOut['time_str'] = time_str;parOut['type'] = names[cls_max] + return parOut + + + + +def post_process(par): + + print('#####process:',par['name']) + ###post-process参数 + conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] + labelnames=par['labelnames'] + rainbows=par['rainbows'] + fpsample = par['fpsample'] + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) + iimage_cnt = 0 + post_results=[] + while True: + if not par['queIn'].empty(): + time0=time.time() + datas = par['queIn'].get() + iframe = datas[6] + if par['debug']: + print('#####process:',par['name'],' line129') + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) + par['queOut'].put(p_result) + ##输出结果 + + + + ##每隔 fpsample帧处理一次,如果有问题就保存图片 + if (iframe % fpsample == 0) and (len(post_results)>0) : + #print('####line204:',iframe,post_results) + save_problem_images(post_results,iframe,names) + post_results=[] + + if len(p_result[2] )>0: ## + #post_list = p_result.append(iframe) + post_results.append(p_result) + #print('####line201:',type(p_result)) + + time1=time.time() + outstr='process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) + #FP_DEBUG.write(outstr +'\n') + #print( 'process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) ) + else: + time.sleep(1/300) + + +def save_logfile(name,txt): + if os.path.exists(name): + fp=open(name,'r+') + else: + fp=open(name,'w') + + fp.write('%s %s \n'%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),txt)) + fp.close() +def time_str(): + return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + + + +if __name__=='__main__': + jsonfile='config/queRiver.json' + #image_encode_decode() + work_stream(jsonfile) + #par={'name':'preprocess'} + #preprocess(par) diff --git a/utilsK/sendUtils.py b/utilsK/sendUtils.py new file mode 100644 index 0000000..4160bc1 --- /dev/null +++ b/utilsK/sendUtils.py @@ -0,0 +1,207 @@ +from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest +import json +import traceback +from aliyunsdkcore.client import AcsClient + +from PIL import Image +import numpy as np +import cv2 +import base64 +import io,os,copy +import requests +import time,json +import string,random +import glob,string,sys +from multiprocessing import Process,Queue +import oss2 +from kafka import KafkaProducer, KafkaConsumer +from voduploadsdk.UploadVideoRequest import UploadVideoRequest +from voduploadsdk.AliyunVodUtils import * +from voduploadsdk.AliyunVodUploader import AliyunVodUploader +from datetime import datetime, date, timedelta +def get_today(): + return date.today().strftime("%Y-%m-%d") +def get_yesterday(beforeday=-1): + return (date.today() + timedelta(days =beforeday)).strftime("%Y-%m-%d") + +def get_videoUurl(videoBakDir,filename): + ###七天时间内 + potentialUrls=[ os.path.join( videoBakDir,get_yesterday(beforeday=-x),filename) for x in range(7) ] + existsList=[os.path.exists(x ) for x in potentialUrls] + for i,flag in enumerate(existsList): + if flag: return potentialUrls[i] + return potentialUrls[0] + +def getNamedic(jsonfile): + with open(jsonfile) as fp: + dataDic=json.load(fp) + #"labelnames":["排口","排污口","水生植被","漂浮物","其它"], + #"labelIndexs":["SL014","SL011","SL013","SL001","SL001" ] + + assert 'labelnames' in dataDic.keys() , 'labelnames is not the key in %s'%(jsonfile) + assert 'labelIndexs' in dataDic.keys() , 'labelIndexs is not the key in %s'%(jsonfile) + assert len(dataDic['labelnames'])==len(dataDic['labelIndexs']) + nameDic={} + for key,value in zip(dataDic['labelnames'],dataDic['labelIndexs']): + nameDic[key]=value + return nameDic + + +def get_play_info(clt, videoId): + request = GetPlayInfoRequest.GetPlayInfoRequest() + request.set_accept_format('JSON') + request.set_VideoId(videoId) + request.set_AuthTimeout(3600*5) + response = json.loads(clt.do_action_with_exception(request)) + return response + +def create_status_msg(msg_dict_off,taskInfos,sts='waiting'): + msg= copy.deepcopy(msg_dict_off) + msg=update_json(taskInfos,msg,offkeys=["request_id"] ) + msg['status']=sts + msg = json.dumps(msg, ensure_ascii=False) + return msg +# 填入AccessKey信息 +def init_vod_client(accessKeyId, accessKeySecret): + regionId = 'cn-shanghai' # 点播服务接入地域 + connectTimeout = 3 # 连接超时,单位为秒 + return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout) +def update_json(jsonOri,jsonNew,offkeys=["request_id" ]): + #{'biz_id': 'hehuzhang', 'mod_id': 'ai', 'request_id': 'bblvgyntTsZCamqjuLArkiSYIbKXEeWx', 'offering_id': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'offering_type': 'mp4', 'results_base_dir': 'XJRW20220317153547', 'inSource': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'outSource': 'NO'} + for key in offkeys: + jsonNew[key] = jsonOri[key] + return jsonNew +def get_time(filename): + #2021-10-09-11-44-51_frame-598-720_type-水生植被.jpg + sps=filename.strip().split('_')[0] + tsps=sps.split('-') + return '%s-%s-%s %s:%s:%s'%(tsps[0],tsps[1],tsps[2],tsps[3],tsps[4],tsps[5]) +def get_ms(time0,time1): + str_time ='%.2f ms'%((time1-time0)*1000) + return str_time + +def get_urls( platform_query_url,fp_log ): + try: + if os.path.exists(platform_query_url): + #print('###line49') + with open('SendLog/platformQuery.json','r') as fp: + res = json.load(fp) + else: + res = requests.get(platform_query_url,timeout=10).json() + #print('###line54') + questionUrl = res['data']['questionUrl'] ###直播流时,问题图片的推送地址 + offlineUrl = res['data']['offlineUrl'] ###http离线视频时,问题图片的推送地址 + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###### %s: file:send_transfer: error %s ,url:%s #####'%(timestr,ee,platform_query_url)) + outstr = '\n %s ###### get url platform error : update error:%s , url:%s'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,ee,platform_query_url) + fp_log.write(outstr);fp_log.flush() + questionUrl="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion" + offlineUrl ="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion" + return questionUrl,offlineUrl +def parse_filename(filename_base): + #etc:2022-01-13-16-04-17_frame-823-1440_type-水生植被_hgYFEulc0dPIrG1S_s-off-XJRW20220113154959_AI.jpg + uid =filename_base.split('.')[0].split('_')[3].strip() + sourceType=filename_base.split('_')[4].split('-')[1] + sourceId=filename_base.split('_')[4].split('-')[2] + typename=filename_base.split('.')[0].split('_')[2].split('-')[1].strip() + return uid,sourceType,sourceId,typename +def b64encode_function(filename, filename_OR): + if os.path.exists(filename): + image_ori=cv2.imread(filename) + image_ori_OR=cv2.imread(filename_OR) + else: + image_ori = filename.copy() + image_ori_OR = image_ori_OR.copy() + image_pngcode = cv2.imencode('.jpg',image_ori)[-1] + image_pngcode_OR = cv2.imencode('.jpg',image_ori_OR)[-1] + image_code = str(base64.b64encode(image_pngcode))[2:-1] + image_code_OR = str(base64.b64encode(image_pngcode_OR))[2:-1] + return image_code, image_code_OR +def JsonSend(parIn): + + fp_log = parIn['fp_log'] + try: + response=requests.post(parIn['api'],json=parIn['input_'],timeout=10).json() + t3 = time.time() + print('\n file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s, size:%.2f M \n'%(parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],api,response['code'],parIn['sizeImage'])) + outstr = '%s file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s,size:%.2f M ,%s\n'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],parIn['api'],response['code'],parIn['sizeImage'],parIn['dic_str']) + fp_log.write(outstr);fp_log.flush() + + except Exception as ee: + print('\n ######file:%s: upload error:%s,size:%.2f M'%(parIn['filename_base'],ee, parIn['sizeImage'])) + outstr = '\n%s ###### file:%s: upload error:%s , size:%.2f M'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,parIn['filename_base'],ee,parIn['sizeImage']) + fp_log.write(outstr);fp_log.flush() + + +def dic2str(dic): + st='' + for key in dic.keys(): + st='%s %s:%s,'%(st,key,dic[key]) + return st +def createJsonInput(filename,offlineUrl,questionUrl): + flag = True + filename_base = os.path.basename(filename) + filename_OR=filename.replace('_AI.','_OR.') + if not os.path.exists(filename_OR ): + return False + + uid,sourceType, sourceId,typename = parse_filename(filename_base) + if (typename not in name_dic.keys()) or (typename == '排口'): + return False + api = questionUrl if sourceType=='live' else offlineUrl + + time_str = get_time(filename_base) + input_ ={ + 'imgName':os.path.basename(filename), + 'imgNameOriginal':os.path.basename(filename_OR), + 'time':time_str, + 'fid':uid, ###随机16位字符 + 'type':name_dic[typename],###这次先采用 ["排口","污口","水生植被","漂浮物","其它"] + 'typeId':nameID_dic[typename] + + } + if sourceType!='live': + input_['code']=sourceId;###只有离线视频才需要code, + + dic_str = dic2str(input_) + t1 = time.time() + + image_code, image_code_OR = b64encode_function(filename, filename_OR) + input_['imgData']=image_code + input_['imgDataOriginal']=image_code_OR + + sizeImage = (len(image_code) + len(image_code_OR) )/1000000.0 + + parOut={};parOut['flag']=True;parOut['input_']=input_; + parOut['sizeImage']=sizeImage;parOut['dic_str']=dic_str; + parOut['filename']=filename;parOut['filename_OR']=filename_OR; + parOut['api']=api ; parOut['t1']=t1 ; parOut['filename_base']= filename_base + return parOut + +def getLogFileFp(streamName): + logname ='SendLog/'+ time.strftime("%Y-%m-%d", time.localtime())+'_%s.txt'%(streamName) + if os.path.exists(logname): + fp_log = open(logname,'a+') + else: + fp_log = open(logname,'w') + return + +def lodaMsgInfos(jsonDir,msgId): + jsonUrl = os.path.join(jsonDir,msgId+'.json') + with open(jsonUrl,'r') as fp: + data=json.load(fp) + return data + +def parse_filename_for_oss(name): + splts=name.split('_') + typename=splts[2].split('-')[1].strip() + msgId=splts[4].split('-')[3] + onLineType=splts[4].split('-')[1] + return typename,msgId,onLineType +def percentage(consumed_bytes, total_bytes): + if total_bytes: + rate = int(100 * (float(consumed_bytes) / float(total_bytes))) + print('\r{0}% '.format(rate), end='') + sys.stdout.flush() + diff --git a/utilsK/spillUtils.py b/utilsK/spillUtils.py new file mode 100644 index 0000000..c2c819c --- /dev/null +++ b/utilsK/spillUtils.py @@ -0,0 +1,118 @@ +import numpy as np +import time, cv2 +from loguru import logger + + +def ms(t1, t0): + return (t1 - t0) * 1000.0 + + +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1 = boundbxs[0] + boundbxs_y1 = boundbxs[1] + boundbxs_x2 = boundbxs[2] + boundbxs_y2 = boundbxs[3] + center_x = 0.5 * (boundbxs_x1 + boundbxs_x2) + center_y = 0.5 * (boundbxs_y1 + boundbxs_y2) + return center_x, center_y + + + +def mixSpillage_postprocess(preds, _mask_cv,pars=None): + '''考虑船上人过滤''' + '''输入:抛洒物的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断抛洒物是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终抛洒物(坐标、类别、置信度), + ''' + '''1、最大分隔路面作为判断依据''' + # zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + # speedroad = _mask_cv.copy() + # speedroad = _mask_cv[speedroad==1] + # _mask_cv[0] = _mask_cv[1] + original_height = _mask_cv.shape[0] + original_width = _mask_cv.shape[1] + + zoom_factor = original_width / 480.0 + + zoom_height = int(original_height / zoom_factor) + zoom_width = int(original_width / zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width, zoom_height)) # 缩小原图,宽在前,高在后 + t4 = time.time() + #print('+'*10,'_mask_cv shape信息',_mask_cv.shape) + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape) == 3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + # print('+'*10,'路面分隔信息',len(contour_info)) + '''新增模块::如果路面为空,则返回原图、无抛洒物等。''' + if contour_info == []: + # final_img=_img_cv + final_spillage_filterroad = [] + timeInfos = 'road is empty' + + return final_spillage_filterroad, timeInfos + else: + # print(contour_info[0]) + max_contour = contour_info[0] + max_contour = max_contour[0] * zoom_factor # contours恢复原图尺寸 + max_contour = max_contour.astype(np.int32) + # print(max_contour) + t7 = time.time() + '''2.1、preds中spillage取出,car取出。''' + init_spillage = [] + # init_car_per = [] + for i in range(len(preds)): + if preds[i][5] == 0: + init_spillage.append(preds[i]) + # else: + # init_car_per.append(preds[i]) + #print('-'*10,'车辆',len(init_car_per)) + #print('+'*10,'抛洒物',len(init_spillage)) + #print('+'*10,'路面分隔信息',len(max_contour)) + # person + + # points = max_contour.reshape((-1, 1, 2)) + # cv2.polylines(image, [points], isClosed=True, color=(0, 255, 0), thickness=2) + + '''3、preds中spillage,通过1中路面过滤''' + init_spillage_filterroad = init_spillage + final_spillage_filterroad = [] + logger.info("车辆信息, max_contour: {}", max_contour) + logger.info("车辆信息, init_spillage: {}", init_spillage) + for i in range(len(init_spillage_filterroad)): + center_x, center_y = center_coordinate(init_spillage_filterroad[i]) + # print('#'*20,'line176:',len(max_contour),np.array(max_contour).shape,(center_x, center_y)) + # 返回 1、-1 或 0,分别对应点在多边形内部、外部或边界上的情况 + flag = cv2.pointPolygonTest(max_contour, (int(center_x), int(center_y)), + False) # 若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + logger.info("车辆信息, flag: {}",flag) + if flag == 1: + final_spillage_filterroad.append(init_spillage_filterroad[i]) + else: + pass + t9 = time.time() + + + timeInfos = ' findMaxroad:%.1f releJudge:%.1f' % (ms(t6, t4), ms(t9, t6)) + + return final_spillage_filterroad, timeInfos # 返回最终绘制的结果图、最高速搞萨物(坐标、类别、置信度) + + diff --git a/voduploadsdk/AliyunVodUploader.py b/voduploadsdk/AliyunVodUploader.py new file mode 100644 index 0000000..c0356c9 --- /dev/null +++ b/voduploadsdk/AliyunVodUploader.py @@ -0,0 +1,670 @@ +# -*- coding: UTF-8 -*- +import json +import oss2 +import base64 +import requests +from oss2 import compat +import time + +from aliyunsdkcore import client +from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest +from aliyunsdkvod.request.v20170321 import RefreshUploadVideoRequest +from aliyunsdkvod.request.v20170321 import CreateUploadImageRequest +from aliyunsdkvod.request.v20170321 import CreateUploadAttachedMediaRequest +from voduploadsdk.AliyunVodUtils import * +from voduploadsdk.UploadVideoRequest import UploadVideoRequest + +VOD_MAX_TITLE_LENGTH = 128 +VOD_MAX_DESCRIPTION_LENGTH = 1024 + +class AliyunVodUploader: + + def __init__(self, accessKeyId, accessKeySecret, ecsRegionId=None): + """ + constructor for VodUpload + :param accessKeyId: string, access key id + :param accessKeySecret: string, access key secret + :param ecsRegion: string, 部署迁移脚本的ECS所在的Region,详细参考:https://help.aliyun.com/document_detail/40654.html,如:cn-beijing + :return + """ + self.__accessKeyId = accessKeyId + self.__accessKeySecret = accessKeySecret + self.__ecsRegion = ecsRegionId + self.__vodApiRegion = None + self.__connTimeout = 3 + self.__bucketClient = None + self.__maxRetryTimes = 3 + self.__vodClient = None + self.__EnableCrc = True + + # 分片上传参数 + self.__multipartThreshold = 10 * 1024 * 1024 # 分片上传的阈值,超过此值开启分片上传 + self.__multipartPartSize = 10 * 1024 * 1024 # 分片大小,单位byte + self.__multipartThreadsNum = 3 # 分片上传时并行上传的线程数,暂时为串行上传,不支持并行,后续会支持。 + + self.setApiRegion('cn-shanghai') + + + def setApiRegion(self, apiRegion): + """ + 设置VoD的接入地址,中国大陆为cn-shanghai,海外支持ap-southeast-1(新加坡)等区域,详情参考:https://help.aliyun.com/document_detail/98194.html + :param apiRegion: 接入地址的Region英文表示 + :return: + """ + self.__vodApiRegion = apiRegion + self.__vodClient = self.__initVodClient() + + + def setMultipartUpload(self, multipartThreshold=10*1024*1024, multipartPartSize=10*1024*1024, multipartThreadsNum=1): + if multipartThreshold > 0: + self.__multipartThreshold = multipartThreshold + if multipartPartSize > 0: + self.__multipartPartSize = multipartPartSize + if multipartThreadsNum > 0: + self.__multipartThreadsNum = multipartThreadsNum + + def setEnableCrc(self, isEnable=False): + self.__EnableCrc = True if isEnable else False + + @catch_error + def uploadLocalVideo(self, uploadVideoRequest, startUploadCallback=None): + """ + 上传本地视频或音频文件到点播,最大支持48.8TB的单个文件,暂不支持断点续传 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为本地文件的绝对路径 + :param startUploadCallback为获取到上传地址和凭证(uploadInfo)后开始进行文件上传时的回调,可用于记录上传日志等;uploadId为设置的上传ID,可用于关联导入视频。 + :return + """ + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + if startUploadCallback: + startUploadCallback(uploadVideoRequest.uploadId, uploadInfo) + headers = self.__getUploadHeaders(uploadVideoRequest) + self.__uploadOssObjectWithRetry(uploadVideoRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, headers) + return uploadInfo + + @catch_error + def uploadWebVideo(self, uploadVideoRequest, startUploadCallback=None): + """ + 上传网络视频或音频文件到点播,最大支持48.8TB的单个文件(需本地磁盘空间足够);会先下载到本地临时目录,再上传到点播存储 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为网络文件的URL地址 + :return + """ + # 下载文件 + uploadVideoRequest = self.__downloadWebMedia(uploadVideoRequest) + + # 上传到点播 + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + if startUploadCallback: + startUploadCallback(uploadVideoRequest.uploadId, uploadInfo) + headers = self.__getUploadHeaders(uploadVideoRequest) + self.__uploadOssObjectWithRetry(uploadVideoRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, headers) + + # 删除本地临时文件 + os.remove(uploadVideoRequest.filePath) + + return uploadInfo['VideoId'] + + @catch_error + def uploadLocalM3u8(self, uploadVideoRequest, sliceFilePaths=None): + """ + 上传本地m3u8视频或音频文件到点播,m3u8文件和分片文件默认在同一目录 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为本地m3u8索引文件的绝对路径, + 且m3u8文件的分片信息必须是相对地址,不能含有URL或本地绝对路径 + :param sliceFilePaths: list, 分片文件的本地路径列表,例如:['/opt/m3u8_video/sample_001.ts', '/opt/m3u8_video/sample_002.ts'] + sliceFilePaths为None时,会按照同一目录去解析分片地址;如不在同一目录等原因导致解析有误,可自行组装分片地址 + :return + """ + + if sliceFilePaths is None: + sliceFilePaths = self.parseLocalM3u8(uploadVideoRequest.filePath) + + if (not isinstance(sliceFilePaths, list)) or len(sliceFilePaths) <= 0: + raise AliyunVodException('InvalidM3u8SliceFile', 'M3u8 slice files invalid', 'sliceFilePaths invalid or m3u8 index file error') + + # 上传到点播的m3u8索引文件会重写,以此确保分片地址都为相对地址 + downloader = AliyunVodDownloader() + m3u8LocalDir = downloader.getSaveLocalDir() + '/' + AliyunVodUtils.getStringMd5(uploadVideoRequest.fileName) + downloader.setSaveLocalDir(m3u8LocalDir) + m3u8LocalPath = m3u8LocalDir + '/' + os.path.basename(uploadVideoRequest.fileName) + self.__rewriteM3u8File(uploadVideoRequest.filePath, m3u8LocalPath, True) + + # 获取上传凭证 + uploadVideoRequest.setFilePath(m3u8LocalPath) + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + uploadAddress = uploadInfo['UploadAddress'] + headers = self.__getUploadHeaders(uploadVideoRequest) + + # 依次上传分片文件 + for sliceFilePath in sliceFilePaths: + tempFilePath, sliceFileName = AliyunVodUtils.getFileBriefPath(sliceFilePath) + self.__uploadOssObjectWithRetry(sliceFilePath, uploadAddress['ObjectPrefix'] + sliceFileName, uploadInfo, headers) + + # 上传m3u8文件 + self.__uploadOssObjectWithRetry(m3u8LocalPath, uploadAddress['FileName'], uploadInfo, headers) + + # 删除重写到本地的m3u8文件 + if os.path.exists(m3u8LocalPath): + os.remove(m3u8LocalPath) + if not os.listdir(m3u8LocalDir): + os.rmdir(m3u8LocalDir) + + return uploadInfo['VideoId'] + + @catch_error + def uploadWebM3u8(self, uploadVideoRequest, sliceFileUrls=None): + """ + 上传网络m3u8视频或音频文件到点播,需本地磁盘空间足够,会先下载到本地临时目录,再上传到点播存储 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为m3u8网络文件的URL地址 + :param sliceFileUrls: list, 分片文件的url,例如:['http://host/sample_001.ts', 'http://host/sample_002.ts'] + sliceFileUrls为None时,会按照同一前缀解析分片地址;如分片路径和m3u8索引文件前缀不同等原因导致解析有误,可自行组装分片地址 + :return + """ + if sliceFileUrls is None: + sliceFileUrls = self.parseWebM3u8(uploadVideoRequest.filePath) + + if (not isinstance(sliceFileUrls, list)) or len(sliceFileUrls) <= 0: + raise AliyunVodException('InvalidM3u8SliceFile', 'M3u8 slice urls invalid', + 'sliceFileUrls invalid or m3u8 index file error') + + # 下载m3u8文件和所有ts分片文件到本地;上传到点播的m3u8索引文件会重写,以此确保分片地址都为相对地址 + downloader = AliyunVodDownloader() + m3u8LocalDir = downloader.getSaveLocalDir() + '/' + AliyunVodUtils.getStringMd5(uploadVideoRequest.fileName) + downloader.setSaveLocalDir(m3u8LocalDir) + m3u8LocalPath = m3u8LocalDir + '/' + os.path.basename(uploadVideoRequest.fileName) + self.__rewriteM3u8File(uploadVideoRequest.filePath, m3u8LocalPath, False) + + sliceList = [] + for sliceFileUrl in sliceFileUrls: + tempFilePath, sliceFileName = AliyunVodUtils.getFileBriefPath(sliceFileUrl) + err, sliceLocalPath = downloader.downloadFile(sliceFileUrl, sliceFileName) + if sliceLocalPath is None: + raise AliyunVodException('FileDownloadError', 'Download M3u8 File Error', '') + sliceList.append((sliceLocalPath, sliceFileName)) + + # 获取上传凭证 + uploadVideoRequest.setFilePath(m3u8LocalPath) + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + uploadAddress = uploadInfo['UploadAddress'] + headers = self.__getUploadHeaders(uploadVideoRequest) + + # 依次上传分片文件 + for sliceFile in sliceList: + self.__uploadOssObjectWithRetry(sliceFile[0], uploadAddress['ObjectPrefix'] + sliceFile[1], uploadInfo, headers) + + # 上传m3u8文件 + self.__uploadOssObjectWithRetry(m3u8LocalPath, uploadAddress['FileName'], uploadInfo, headers) + + # 删除下载到本地的m3u8文件和分片文件 + if os.path.exists(m3u8LocalPath): + os.remove(m3u8LocalPath) + for sliceFile in sliceList: + if os.path.exists(sliceFile[0]): + os.remove(sliceFile[0]) + if not os.listdir(m3u8LocalDir): + os.rmdir(m3u8LocalDir) + + return uploadInfo['VideoId'] + + + @catch_error + def uploadImage(self, uploadImageRequest, isLocalFile=True): + """ + 上传图片文件到点播,不支持断点续传;该接口可支持上传本地图片或网络图片 + :param uploadImageRequest: UploadImageRequest,注意filePath为本地文件的绝对路径或网络文件的URL地址 + :param isLocalFile: bool, 是否为本地文件。True:本地文件,False:网络文件 + :return + """ + # 网络图片需要先下载到本地 + if not isLocalFile: + uploadImageRequest = self.__downloadWebMedia(uploadImageRequest) + + # 上传到点播 + uploadInfo = self.__createUploadImage(uploadImageRequest) + self.__uploadOssObject(uploadImageRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, None) + + # 删除本地临时文件 + if not isLocalFile: + os.remove(uploadImageRequest.filePath) + + return uploadInfo['ImageId'], uploadInfo['ImageURL'] + + @catch_error + def uploadAttachedMedia(self, uploadAttachedRequest, isLocalFile=True): + """ + 上传辅助媒资文件(如水印、字幕文件)到点播,不支持断点续传;该接口可支持上传本地或网络文件 + :param uploadAttachedRequest: UploadAttachedMediaRequest,注意filePath为本地文件的绝对路径或网络文件的URL地址 + :param isLocalFile: bool, 是否为本地文件。True:本地文件,False:网络文件 + :return + """ + # 网络文件需要先下载到本地 + if not isLocalFile: + uploadAttachedRequest = self.__downloadWebMedia(uploadAttachedRequest) + + # 上传到点播 + uploadInfo = self.__createUploadAttachedMedia(uploadAttachedRequest) + self.__uploadOssObject(uploadAttachedRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, None) + + # 删除本地临时文件 + if not isLocalFile: + os.remove(uploadAttachedRequest.filePath) + + result = {'MediaId': uploadInfo['MediaId'], 'MediaURL': uploadInfo['MediaURL'], 'FileURL': uploadInfo['FileURL']} + return result + + @catch_error + def parseWebM3u8(self, m3u8FileUrl): + """ + 解析网络m3u8文件得到所有分片文件地址,原理是将m3u8地址前缀拼接ts分片名称作为后者的下载url,适用于url不带签名或分片与m3u8文件签名相同的情况 + 本函数解析时会默认分片文件和m3u8文件位于同一目录,如不是则请自行拼接分片文件的地址列表 + :param m3u8FileUrl: string, m3u8网络文件url,例如:http://host/sample.m3u8 + :return sliceFileUrls + """ + sliceFileUrls = [] + res = requests.get(m3u8FileUrl) + res.raise_for_status() + for line in res.iter_lines(): + if line.startswith('#'): + continue + sliceFileUrl = AliyunVodUtils.replaceFileName(m3u8FileUrl, line.strip()) + sliceFileUrls.append(sliceFileUrl) + + return sliceFileUrls + + @catch_error + def parseLocalM3u8(self, m3u8FilePath): + """ + 解析本地m3u8文件得到所有分片文件地址,原理是将m3u8地址前缀拼接ts分片名称作为后者的本地路径 + 本函数解析时会默认分片文件和m3u8文件位于同一目录,如不是则请自行拼接分片文件的地址列表 + :param m3u8FilePath: string, m3u8本地文件路径,例如:/opt/videos/sample.m3u8 + :return sliceFilePaths + """ + sliceFilePaths = [] + m3u8FilePath = AliyunVodUtils.toUnicode(m3u8FilePath) + for line in open(m3u8FilePath): + if line.startswith('#'): + continue + sliceFileName = line.strip() + sliceFilePath = AliyunVodUtils.replaceFileName(m3u8FilePath, sliceFileName) + sliceFilePaths.append(sliceFilePath) + + return sliceFilePaths + + + # 定义进度条回调函数;consumedBytes: 已经上传的数据量,totalBytes:总数据量 + def uploadProgressCallback(self, consumedBytes, totalBytes): + + if totalBytes: + rate = int(100 * (float(consumedBytes) / float(totalBytes))) + else: + rate = 0 + + print ("[%s]uploaded %s bytes, percent %s%s" % (AliyunVodUtils.getCurrentTimeStr(), consumedBytes, format(rate), '%')) + sys.stdout.flush() + + + def __initVodClient(self): + return client.AcsClient(self.__accessKeyId, self.__accessKeySecret, self.__vodApiRegion, + auto_retry=True, max_retry_time=self.__maxRetryTimes, timeout=self.__connTimeout) + + def __downloadWebMedia(self, request): + + # 下载媒体文件到本地临时目录 + downloader = AliyunVodDownloader() + localFileName = "%s.%s" % (AliyunVodUtils.getStringMd5(request.fileName), request.mediaExt) + fileUrl = request.filePath + err, localFilePath = downloader.downloadFile(fileUrl, localFileName) + if err < 0: + raise AliyunVodException('FileDownloadError', 'Download File Error', '') + + # 重新设置上传请求对象 + request.setFilePath(localFilePath) + return request + + def __rewriteM3u8File(self, srcM3u8File, dstM3u8File, isSrcLocal=True): + newM3u8Text = '' + if isSrcLocal: + for line in open(AliyunVodUtils.toUnicode(srcM3u8File)): + item = self.__processM3u8Line(line) + if item is not None: + newM3u8Text += item + "\n" + else: + res = requests.get(srcM3u8File) + res.raise_for_status() + for line in res.iter_lines(): + item = self.__processM3u8Line(line) + if item is not None: + newM3u8Text += item + "\n" + + AliyunVodUtils.mkDir(dstM3u8File) + with open(dstM3u8File, 'w') as f: + f.write(newM3u8Text) + + + def __processM3u8Line(self, line): + item = line.strip() + if len(item) <= 0: + return None + + if item.startswith('#'): + return item + + tempFilePath, fileName = AliyunVodUtils.getFileBriefPath(item) + return fileName + + + def __requestUploadInfo(self, request, mediaType): + request.set_accept_format('JSON') + result = json.loads(self.__vodClient.do_action_with_exception(request).decode('utf-8')) + result['OriUploadAddress'] = result['UploadAddress'] + result['OriUploadAuth'] = result['UploadAuth'] + + result['UploadAddress'] = json.loads(base64.b64decode(result['OriUploadAddress']).decode('utf-8')) + result['UploadAuth'] = json.loads(base64.b64decode(result['OriUploadAuth']).decode('utf-8')) + + result['MediaType'] = mediaType + if mediaType == 'video': + result['MediaId'] = result['VideoId'] + elif mediaType == 'image': + result['MediaId'] = result['ImageId'] + result['MediaURL'] = result['ImageURL'] + + return result + + + # 获取视频上传地址和凭证 + def __createUploadVideo(self, uploadVideoRequest): + request = CreateUploadVideoRequest.CreateUploadVideoRequest() + + title = AliyunVodUtils.subString(uploadVideoRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + request.set_FileName(uploadVideoRequest.fileName) + + if uploadVideoRequest.description: + description = AliyunVodUtils.subString(uploadVideoRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadVideoRequest.coverURL: + request.set_CoverURL(uploadVideoRequest.coverURL) + if uploadVideoRequest.tags: + request.set_Tags(uploadVideoRequest.tags) + if uploadVideoRequest.cateId: + request.set_CateId(uploadVideoRequest.cateId) + if uploadVideoRequest.templateGroupId: + request.set_TemplateGroupId(uploadVideoRequest.templateGroupId) + if uploadVideoRequest.storageLocation: + request.set_StorageLocation(uploadVideoRequest.storageLocation) + if uploadVideoRequest.userData: + request.set_UserData(uploadVideoRequest.userData) + if uploadVideoRequest.appId: + request.set_AppId(uploadVideoRequest.appId) + if uploadVideoRequest.workflowId: + request.set_WorkflowId(uploadVideoRequest.workflowId) + + result = self.__requestUploadInfo(request, 'video') + logger.info("CreateUploadVideo, FilePath: %s, VideoId: %s" % (uploadVideoRequest.filePath, result['VideoId'])) + return result + + # 刷新上传凭证 + def __refresh_upload_video(self, videoId): + request = RefreshUploadVideoRequest.RefreshUploadVideoRequest(); + request.set_VideoId(videoId) + + result = self.__requestUploadInfo(request, 'video') + logger.info("RefreshUploadVideo, VideoId %s" % (result['VideoId'])) + return result + + # 获取图片上传地址和凭证 + def __createUploadImage(self, uploadImageRequest): + request = CreateUploadImageRequest.CreateUploadImageRequest() + + request.set_ImageType(uploadImageRequest.imageType) + request.set_ImageExt(uploadImageRequest.imageExt) + if uploadImageRequest.title: + title = AliyunVodUtils.subString(uploadImageRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + if uploadImageRequest.description: + description = AliyunVodUtils.subString(uploadImageRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadImageRequest.tags: + request.set_Tags(uploadImageRequest.tags) + if uploadImageRequest.cateId: + request.set_CateId(uploadImageRequest.cateId) + if uploadImageRequest.storageLocation: + request.set_StorageLocation(uploadImageRequest.storageLocation) + if uploadImageRequest.userData: + request.set_UserData(uploadImageRequest.userData) + if uploadImageRequest.appId: + request.set_AppId(uploadImageRequest.appId) + if uploadImageRequest.workflowId: + request.set_WorkflowId(uploadImageRequest.workflowId) + + result = self.__requestUploadInfo(request, 'image') + logger.info("CreateUploadImage, FilePath: %s, ImageId: %s, ImageUrl: %s" % ( + uploadImageRequest.filePath, result['ImageId'], result['ImageURL'])) + return result + + def __createUploadAttachedMedia(self, uploadAttachedRequest): + request = CreateUploadAttachedMediaRequest.CreateUploadAttachedMediaRequest() + request.set_BusinessType(uploadAttachedRequest.businessType) + request.set_MediaExt(uploadAttachedRequest.mediaExt) + + if uploadAttachedRequest.title: + title = AliyunVodUtils.subString(uploadAttachedRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + if uploadAttachedRequest.description: + description = AliyunVodUtils.subString(uploadAttachedRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadAttachedRequest.tags: + request.set_Tags(uploadAttachedRequest.tags) + if uploadAttachedRequest.cateId: + request.set_CateId(uploadAttachedRequest.cateId) + if uploadAttachedRequest.storageLocation: + request.set_StorageLocation(uploadAttachedRequest.storageLocation) + if uploadAttachedRequest.userData: + request.set_UserData(uploadAttachedRequest.userData) + if uploadAttachedRequest.appId: + request.set_AppId(uploadAttachedRequest.appId) + if uploadAttachedRequest.workflowId: + request.set_WorkflowId(uploadAttachedRequest.workflowId) + + result = self.__requestUploadInfo(request, 'attached') + logger.info("CreateUploadImage, FilePath: %s, MediaId: %s, MediaURL: %s" % ( + uploadAttachedRequest.filePath, result['MediaId'], result['MediaURL'])) + return result + + + def __getUploadHeaders(self, uploadVideoRequest): + if uploadVideoRequest.isShowWatermark is None: + return None + else: + userData = "{\"Vod\":{\"UserData\":{\"IsShowWaterMark\": \"%s\"}}}" % (uploadVideoRequest.isShowWatermark) + return {'x-oss-notification': base64.b64encode(userData, 'utf-8')} + + # uploadType,可选:multipart, put, web + def __uploadOssObjectWithRetry(self, filePath, object, uploadInfo, headers=None): + retryTimes = 0 + while retryTimes < self.__maxRetryTimes: + try: + return self.__uploadOssObject(filePath, object, uploadInfo, headers) + except OssError as e: + # 上传凭证过期需要重新获取凭证 + if e.code == 'SecurityTokenExpired' or e.code == 'InvalidAccessKeyId': + uploadInfo = self.__refresh_upload_video(uploadInfo['MediaId']) + except Exception as e: + raise e + except: + raise AliyunVodException('UnkownError', repr(e), traceback.format_exc()) + finally: + retryTimes += 1 + + + def __uploadOssObject(self, filePath, object, uploadInfo, headers=None): + self.__createOssClient(uploadInfo['UploadAuth'], uploadInfo['UploadAddress']) + """ + p = os.path.dirname(os.path.realpath(__file__)) + store = os.path.dirname(p) + '/osstmp' + return oss2.resumable_upload(self.__bucketClient, object, filePath, + store=oss2.ResumableStore(root=store), headers=headers, + multipart_threshold=self.__multipartThreshold, part_size=self.__multipartPartSize, + num_threads=self.__multipartThreadsNum, progress_callback=self.uploadProgressCallback) + """ + uploader = _VodResumableUploader(self.__bucketClient, filePath, object, uploadInfo, headers, + self.uploadProgressCallback, self.__refreshUploadAuth) + uploader.setMultipartInfo(self.__multipartThreshold, self.__multipartPartSize, self.__multipartThreadsNum) + uploader.setClientId(self.__accessKeyId) + res = uploader.upload() + + uploadAddress = uploadInfo['UploadAddress'] + bucketHost = uploadAddress['Endpoint'].replace('://', '://' + uploadAddress['Bucket'] + ".") + logger.info("UploadFile %s Finish, MediaId: %s, FilePath: %s, Destination: %s/%s" % ( + uploadInfo['MediaType'], uploadInfo['MediaId'], filePath, bucketHost, object)) + return res + + # 使用上传凭证和地址信息初始化OSS客户端(注意需要先Base64解码并Json Decode再传入) + # 如果上传的ECS位于点播相同的存储区域(如上海),则可以指定internal为True,通过内网上传更快且免费 + def __createOssClient(self, uploadAuth, uploadAddress): + auth = oss2.StsAuth(uploadAuth['AccessKeyId'], uploadAuth['AccessKeySecret'], uploadAuth['SecurityToken']) + endpoint = AliyunVodUtils.convertOssInternal(uploadAddress['Endpoint'], self.__ecsRegion) + self.__bucketClient = oss2.Bucket(auth, endpoint, uploadAddress['Bucket'], + connect_timeout=self.__connTimeout, enable_crc=self.__EnableCrc) + return self.__bucketClient + + def __refreshUploadAuth(self, videoId): + uploadInfo = self.__refresh_upload_video(videoId) + uploadAuth = uploadInfo['UploadAuth'] + uploadAddress = uploadInfo['UploadAddress'] + return self.__createOssClient(uploadAuth, uploadAddress) + + +from oss2 import SizedFileAdapter, determine_part_size +from oss2.models import PartInfo +from aliyunsdkcore.utils import parameter_helper as helper +class _VodResumableUploader: + def __init__(self, bucket, filePath, object, uploadInfo, headers, progressCallback, refreshAuthCallback): + self.__bucket = bucket + self.__filePath = filePath + self.__object = object + self.__uploadInfo = uploadInfo + self.__totalSize = None + self.__headers = headers + self.__mtime = os.path.getmtime(filePath) + self.__progressCallback = progressCallback + self.__refreshAuthCallback = refreshAuthCallback + + self.__threshold = None + self.__partSize = None + self.__threadsNum = None + self.__uploadId = 0 + + self.__record = {} + self.__finishedSize = 0 + self.__finishedParts = [] + self.__filePartHash = None + self.__clientId = None + + def setMultipartInfo(self, threshold, partSize, threadsNum): + self.__threshold = threshold + self.__partSize = partSize + self.__threadsNum = threadsNum + + + def setClientId(self, clientId): + self.__clientId = clientId + + + def upload(self): + self.__totalSize = os.path.getsize(self.__filePath) + if self.__threshold and self.__totalSize <= self.__threshold: + return self.simpleUpload() + else: + return self.multipartUpload() + + + def simpleUpload(self): + with open(AliyunVodUtils.toUnicode(self.__filePath), 'rb') as f: + result = self.__bucket.put_object(self.__object, f, headers=self.__headers, progress_callback=None) + if self.__uploadInfo['MediaType'] == 'video': + self.__reportUploadProgress('put', 1, self.__totalSize) + + return result + + def multipartUpload(self): + psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize) + + # 初始化分片 + self.__uploadId = self.__bucket.init_multipart_upload(self.__object).upload_id + + startTime = time.time() + expireSeconds = 2500 # 上传凭证有效期3000秒,提前刷新 + # 逐个上传分片 + with open(AliyunVodUtils.toUnicode(self.__filePath), 'rb') as fileObj: + partNumber = 1 + offset = 0 + + while offset < self.__totalSize: + uploadSize = min(psize, self.__totalSize - offset) + #logger.info("UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize)) + result = self.__bucket.upload_part(self.__object, self.__uploadId, partNumber, SizedFileAdapter(fileObj,uploadSize)) + #print(result.request_id) + self.__finishedParts.append(PartInfo(partNumber, result.etag)) + offset += uploadSize + partNumber += 1 + + # 上传进度回调 + self.__progressCallback(offset, self.__totalSize) + + if self.__uploadInfo['MediaType'] == 'video': + # 上报上传进度 + self.__reportUploadProgress('multipart', partNumber - 1, offset) + + # 检测上传凭证是否过期 + nowTime = time.time() + if nowTime - startTime >= expireSeconds: + self.__bucket = self.__refreshAuthCallback(self.__uploadInfo['MediaId']) + startTime = nowTime + + + # 完成分片上传 + self.__bucket.complete_multipart_upload(self.__object, self.__uploadId, self.__finishedParts, headers=self.__headers) + + return result + + + def __reportUploadProgress(self, uploadMethod, donePartsCount, doneBytes): + reportHost = 'vod.cn-shanghai.aliyuncs.com' + sdkVersion = '1.3.1' + reportKey = 'HBL9nnSwhtU2$STX' + + uploadPoint = {'upMethod': uploadMethod, 'partSize': self.__partSize, 'doneBytes': doneBytes} + timestamp = int(time.time()) + authInfo = AliyunVodUtils.getStringMd5("%s|%s|%s" % (self.__clientId, reportKey, timestamp)) + + fields = {'Action': 'ReportUploadProgress', 'Format': 'JSON', 'Version': '2017-03-21', + 'Timestamp': helper.get_iso_8061_date(), 'SignatureNonce': helper.get_uuid(), + 'VideoId': self.__uploadInfo['MediaId'], 'Source': 'PythonSDK', 'ClientId': self.__clientId, + 'BusinessType': 'UploadVideo', 'TerminalType': 'PC', 'DeviceModel': 'Server', + 'AppVersion': sdkVersion, 'AuthTimestamp': timestamp, 'AuthInfo': authInfo, + + 'FileName': self.__filePath, 'FileHash': self.__getFilePartHash(self.__clientId, self.__filePath, self.__totalSize), + 'FileSize': self.__totalSize, 'FileCreateTime': timestamp, 'UploadRatio': 0, 'UploadId': self.__uploadId, + 'DonePartsCount': donePartsCount, 'PartSize': self.__partSize, 'UploadPoint': json.dumps(uploadPoint), + 'UploadAddress': self.__uploadInfo['OriUploadAddress'] + } + requests.post('http://' + reportHost, fields, timeout=1) + + + def __getFilePartHash(self, clientId, filePath, fileSize): + if self.__filePartHash: + return self.__filePartHash + + length = 1 * 1024 * 1024 + if fileSize < length: + length = fileSize + + try: + fp = open(AliyunVodUtils.toUnicode(filePath), 'rb') + strVal = fp.read(length) + self.__filePartHash = AliyunVodUtils.getStringMd5(strVal, False) + fp.close() + except: + self.__filePartHash = "%s|%s|%s" % (clientId, filePath, self.__mtime) + + return self.__filePartHash diff --git a/voduploadsdk/AliyunVodUtils.py b/voduploadsdk/AliyunVodUtils.py new file mode 100644 index 0000000..5a477b9 --- /dev/null +++ b/voduploadsdk/AliyunVodUtils.py @@ -0,0 +1,325 @@ +# -*- coding: UTF-8 -*- +import os,sys +import hashlib +import datetime +import functools +import logging +from oss2.exceptions import OssError +from aliyunsdkcore.acs_exception.exceptions import ServerException +from aliyunsdkcore.acs_exception.exceptions import ClientException +import traceback +import requests + +if sys.version_info[0] == 3: + import urllib.parse +else: + from urllib import unquote + + +VOD_PRINT_INFO_LOG_SWITCH = 1 + +class AliyunVodLog: + """ + VOD日志类,基于logging实现 + """ + @staticmethod + def printLogStr(msg, *args, **kwargs): + if VOD_PRINT_INFO_LOG_SWITCH: + print("[%s]%s" % (AliyunVodUtils.getCurrentTimeStr(), msg)) + + @staticmethod + def info(msg, *args, **kwargs): + logging.info(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + + @staticmethod + def error(msg, *args, **kwargs): + logging.error(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + + @staticmethod + def warning(msg, *args, **kwargs): + logging.warning(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + +logger = AliyunVodLog + + +class AliyunVodUtils: + """ + VOD上传SDK的工具类,提供截取字符串、获取扩展名、获取文件名等静态函数 + """ + + # 截取字符串,在不超过最大字节数前提下确保中文字符不被截断出现乱码(先转换成unicode,再取子串,然后转换成utf-8) + @staticmethod + def subString(strVal, maxBytes, charSet='utf-8'): + i = maxBytes + if sys.version_info[0] == 3: + while len(strVal.encode(charSet)) > maxBytes: + if i < 0: + return '' + strVal = strVal[:i] + i -= 1 + else: + while len(strVal) > maxBytes: + if i < 0: + return '' + strVal = strVal.decode(charSet)[:i].encode(charSet) + i -= 1 + return strVal + + @staticmethod + def getFileExtension(fileName): + end = fileName.rfind('?') + if end <= 0: + end = len(fileName) + + i = fileName.rfind('.') + if i >= 0: + return fileName[i+1:end].lower() + else: + return None + + # urldecode + @staticmethod + def urlDecode(fileUrl): + if sys.version_info[0] == 3: + return urllib.parse.unquote(fileUrl) + else: + return unquote(fileUrl) + + # urlencode + @staticmethod + def urlEncode(fileUrl): + if sys.version_info[0] == 3: + return urllib.parse.urlencode(fileUrl) + else: + return urllib.urlencode(fileUrl) + + # 获取Url的摘要地址(去除?后的参数,如果有)以及文件名 + @staticmethod + def getFileBriefPath(fileUrl): + #fileUrl = AliyunVodUtils.urlDecode(fileUrl) + i = fileUrl.rfind('?') + if i > 0: + briefPath = fileUrl[:i] + else: + briefPath = fileUrl + + briefName = os.path.basename(briefPath) + return briefPath, AliyunVodUtils.urlDecode(briefName) + + @staticmethod + def getStringMd5(strVal, isEncode=True): + m = hashlib.md5() + m.update(strVal.encode('utf-8') if isEncode else strVal) + return m.hexdigest() + + @staticmethod + def getCurrentTimeStr(): + now = datetime.datetime.now() + return now.strftime("%Y-%m-%d %H:%M:%S") + + # 将oss地址转换为内网地址(如果脚本部署的ecs与oss bucket在同一区域) + @staticmethod + def convertOssInternal(ossUrl, ecsRegion=None, isVpc=False): + if (not ossUrl) or (not ecsRegion): + return ossUrl + + availableRegions = ['cn-qingdao', 'cn-beijing', 'cn-zhangjiakou', 'cn-huhehaote', 'cn-hangzhou', 'cn-shanghai', 'cn-shenzhen', + 'cn-hongkong', 'ap-southeast-1', 'ap-southeast-2', 'ap-southeast-3', + 'ap-northeast-1', 'us-west-1', 'us-east-1', 'eu-central-1', 'me-east-1'] + if ecsRegion not in availableRegions: + return ossUrl + + ossUrl = ossUrl.replace("https:", "http:") + if isVpc: + return ossUrl.replace("oss-%s.aliyuncs.com" % (ecsRegion), "vpc100-oss-%s.aliyuncs.com" % (ecsRegion)) + else: + return ossUrl.replace("oss-%s.aliyuncs.com" % (ecsRegion), "oss-%s-internal.aliyuncs.com" % (ecsRegion)) + + # 把输入转换为unicode + @staticmethod + def toUnicode(data): + if isinstance(data, bytes): + return data.decode('utf-8') + else: + return data + + # 替换路径中的文件名;考虑分隔符为"/" 或 "\"(windows) + @staticmethod + def replaceFileName(filePath, replace): + if len(filePath) <= 0 or len(replace) <= 0: + return filePath + + filePath = AliyunVodUtils.urlDecode(filePath) + separator = '/' + start = filePath.rfind(separator) + if start < 0: + separator = '\\' + start = filePath.rfind(separator) + if start < 0: + return None + + result = "%s%s%s" % (filePath[0:start], separator, replace) + return result + + # 创建文件中的目录 + @staticmethod + def mkDir(filePath): + if len(filePath) <= 0: + return -1 + + separator = '/' + i = filePath.rfind(separator) + if i < 0: + separator = '\\' + i = filePath.rfind(separator) + if i < 0: + return -2 + + dirs = filePath[:i] + if os.path.exists(dirs) and os.path.isdir(dirs): + return 0 + + os.makedirs(dirs) + return 1 + + + +class AliyunVodException(Exception): + """ + VOD上传SDK的异常类,做统一的异常处理,外部捕获此异常即可 + """ + + def __init__(self, type, code, msg, http_status=None, request_id=None): + Exception.__init__(self) + self.type = type or 'UnkownError' + self.code = code + self.message = msg + self.http_status = http_status or 'NULL' + self.request_id = request_id or 'NULL' + + def __str__(self): + return "Type: %s, Code: %s, Message: %s, HTTPStatus: %s, RequestId: %s" % ( + self.type, self.code, self.message, str(self.http_status), self.request_id) + +def catch_error(method): + """ + 装饰器,将内部异常转换成统一的异常类AliyunVodException + """ + + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + try: + return method(self, *args, **kwargs) + except ServerException as e: + # 可能原因:AK错误、账号无权限、参数错误等 + raise AliyunVodException('ServerException', e.get_error_code(), e.get_error_msg(), e.get_http_status(), e.get_request_id()) + logger.error("ServerException: %s", e) + except ClientException as e: + # 可能原因:本地网络故障(如不能连接外网)等 + raise AliyunVodException('ClientException', e.get_error_code(), e.get_error_msg()) + logger.error("ClientException: %s", e) + except OssError as e: + # 可能原因:上传凭证过期等 + raise AliyunVodException('OssError', e.code, e.message, e.status, e.request_id) + logger.error("OssError: %s", e) + except IOError as e: + # 可能原因:文件URL不能访问、本地文件无法读取等 + raise AliyunVodException('IOError', repr(e), traceback.format_exc()) + logger.error("IOError: %s", traceback.format_exc()) + except OSError as e: + # 可能原因:本地文件不存在等 + raise AliyunVodException('OSError', repr(e), traceback.format_exc()) + logger.error("OSError: %s", traceback.format_exc()) + except AliyunVodException as e: + # 可能原因:参数错误 + raise e + logger.error("VodException: %s", e) + except Exception as e: + raise AliyunVodException('UnkownException', repr(e), traceback.format_exc()) + logger.error("UnkownException: %s", traceback.format_exc()) + except: + raise AliyunVodException('UnkownError', 'UnkownError', traceback.format_exc()) + logger.error("UnkownError: %s", traceback.format_exc()) + + return wrapper + + +class AliyunVodDownloader: + """ + VOD网络文件的下载类,上传网络文件时会先下载到本地临时目录,再上传到点播 + """ + + def __init__(self, localDir=None): + if localDir: + self.__localDir = localDir + else: + p = os.path.dirname(os.path.realpath(__file__)) + self.__localDir = os.path.dirname(p) + '/dlfiles' + + def setSaveLocalDir(self, localDir): + self.__localDir = localDir + + def getSaveLocalDir(self): + return self.__localDir + + def downloadFile(self, fileUrl, localFileName, fileSize=None): + localPath = self.__localDir + '/' + localFileName + logger.info("Download %s To %s" % (fileUrl, localPath)) + try: + lsize = self.getFileSize(localPath) + if fileSize and lsize == fileSize: + logger.info('Download OK, File Exists') + return 0, localPath + + AliyunVodUtils.mkDir(self.__localDir) + + err, webPage = self.__openWebFile(fileUrl, lsize) + if err == 0: + logger.info('Download OK, File Exists') + webPage.close() + return 0, localPath + + fileObj = open(localPath, 'ab+') + for chunk in webPage.iter_content(chunk_size=8 * 1024): + if chunk: + fileObj.write(chunk) + except Exception as e: + logger.error("Download fail: %s" % (e)) + return -1, None + + fileObj.close() + webPage.close() + logger.info('Download OK') + return 1, localPath + + def getFileSize(self, filePath): + try: + lsize = os.stat(filePath).st_size + except: + lsize = 0 + + return lsize + + + def __openWebFile(self, fileUrl, offset): + webPage = None + try: + headers = {'Range': 'bytes=%d-' % offset} + webPage = requests.get(fileUrl, stream=True, headers=headers, timeout=120, verify=False) + status_code = webPage.status_code + err = -1 + if status_code in [200, 206]: + err = 1 + elif status_code == 416: + err = 0 + else: + logger.error("Download offset %s fail, invalid url, status: %s" % (offset, status_code)) + except Exception as e: + logger.error("Download offset %s fail: %s" % (offset, e)) + err = -2 + finally: + return err, webPage + diff --git a/voduploadsdk/ChangeLog.txt b/voduploadsdk/ChangeLog.txt new file mode 100755 index 0000000..084c65b --- /dev/null +++ b/voduploadsdk/ChangeLog.txt @@ -0,0 +1,20 @@ +2019-04-12 Version: 1.3.1 +1. 上传时可指定应用ID,以实现多应用体系的资源隔离 +2. 支持上传时指定工作流ID,可自动化媒体处理 + +2019-02-12 Version: 1.3.0 +1. 可指定点播中心(默认为上海)和存储区域,便于海外上传 +2. 支持辅助媒资(水印、字幕文件等)的上传 +3. 支持上传时设置UserData等个性化配置和更多元数据 +4. 上传网络文件调整为先下载到本地,再上传到点播,以支持大文件上传(最大48.8TB) +5. 改进m3u8视频的上传,提供默认解析m3u8分片信息的接口,也可自定义分片列表 + +2018-07-05 Version: 1.2.1 +1. 支持设置视频存储区域UploadVideoRequest.setStorageLocation +2. 修复上传大文件时上传凭证过期未刷新的问题 + +2017-12-21 Version: 1.1.1 +1. 支持上传本地单个视频、m3u8视频(含ts分片)、图片文件到点播 +2. 支持上传网络上的(HTTP/HTTPS链接,含OSS链接)的单个视频、m3u8视频(含ts分片)、图片文件到点播. +3. 支持Python2、Python3版本 + diff --git a/voduploadsdk/UploadAttachedMediaRequest.py b/voduploadsdk/UploadAttachedMediaRequest.py new file mode 100644 index 0000000..a09cfe9 --- /dev/null +++ b/voduploadsdk/UploadAttachedMediaRequest.py @@ -0,0 +1,87 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadAttachedMediaRequest + # + # Aliyun VoD's Upload Attached Media(such as watermark,subtitle files) Request class, which wraps parameters to upload an media file into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadAttachedMediaRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/98467.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadAttachedMediaRequest: + def __init__(self, filePath, businessType, title=None, fileExt=None): + """ + constructor for UploadAttachedMediaRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :return + """ + self.businessType = businessType + self.filePath = None + self.fileName = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.fileSize = None + self.cateId = None + self.tags = None + self.description = None + self.userData = None + self.storageLocation = None + self.appId = None + self.workflowId = None + + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setBusinessType(self, businessType): + self.businessType = businessType + + def setTitle(self, title): + self.title = title + + def setFileSize(self, fileSize): + self.fileSize = fileSize + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setStorageLocation(self, storageLocation): + self.storageLocation = storageLocation + + def setUserData(self, userData): + self.userData = userData + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId + + diff --git a/voduploadsdk/UploadImageRequest.py b/voduploadsdk/UploadImageRequest.py new file mode 100644 index 0000000..73f6baa --- /dev/null +++ b/voduploadsdk/UploadImageRequest.py @@ -0,0 +1,84 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadImageRequest + # + # Aliyun VoD's Upload Image Request class, which wraps parameters to upload an image into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadImageRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/55619.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadImageRequest: + def __init__(self, filePath, title=None, fileExt=None): + """ + constructor for UploadVideoRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :param title: string, 图片标题 + :return + """ + self.filePath = None + self.fileName = None + self.imageExt = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.imageType = 'default' + self.cateId = None + self.tags = None + self.description = None + self.userData = None + self.storageLocation = None + self.appId = None + self.workflowId = None + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.imageExt = fileExt + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setImageType(self, imageType): + self.imageType = imageType + + def setTitle(self, title): + self.title = title + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setStorageLocation(self, storageLocation): + self.storageLocation = storageLocation + + def setUserData(self, userData): + self.userData = userData + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId diff --git a/voduploadsdk/UploadVideoRequest.py b/voduploadsdk/UploadVideoRequest.py new file mode 100644 index 0000000..afe3b35 --- /dev/null +++ b/voduploadsdk/UploadVideoRequest.py @@ -0,0 +1,86 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadVideoRequest + # + # Aliyun VoD's Upload Video Request class, which wraps parameters to upload a video into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadVideoRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/55407.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadVideoRequest: + def __init__(self, filePath, title=None, fileExt=None): + """ + constructor for UploadVideoRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :param title: string, 视频标题,最长128字节,不传则使用文件名为标题 + :return + """ + self.filePath = None + self.fileName = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.cateId = None + self.tags = None + self.description = None + self.coverURL = None + self.templateGroupId = None + self.isShowWatermark = None + self.userData = None + self.storageLocation = None + self.uploadId = None + self.appId = None + self.workflowId = None + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setCoverURL(self, coverURL): + self.coverURL = coverURL + + def setTemplateGroupId(self, templateGroupId): + self.templateGroupId = templateGroupId + + # 关闭水印,仅用于配置全局水印且转码模板开启水印后,单次上传时关闭水印 + def shutdownWatermark(self): + self.isShowWatermark = False + + # 设置上传ID,可用于关联导入视频 + def setUploadId(self, uploadId): + self.uploadId = uploadId + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId diff --git a/voduploadsdk/__init__.py b/voduploadsdk/__init__.py new file mode 100644 index 0000000..7bd766b --- /dev/null +++ b/voduploadsdk/__init__.py @@ -0,0 +1,3 @@ +__version__ = '1.3.1' + + diff --git a/yolov5.py b/yolov5.py new file mode 100644 index 0000000..4973922 --- /dev/null +++ b/yolov5.py @@ -0,0 +1,107 @@ +from models.experimental import attempt_load +import tensorrt as trt +import sys +from segutils.trtUtils import yolov5Trtforward +from utilsK.queRiver import getDetectionsFromPreds,img_pad +from utils.datasets import letterbox +import numpy as np +import torch,time +import os +def score_filter_byClass(pdetections,score_para_2nd): + ret=[] + for det in pdetections: + score,cls = det[4],det[5] + if int(cls) in score_para_2nd.keys(): + score_th = score_para_2nd[int(cls)] + elif str(int(cls)) in score_para_2nd.keys(): + score_th = score_para_2nd[str(int(cls))] + else: + score_th = 0.7 + if score > score_th: + ret.append(det) + return ret + +class yolov5Model(object): + def __init__(self, weights=None,par={}): + + + self.par = par + self.device = par['device'] + self.half =par['half'] + + if weights.endswith('.engine'): + self. infer_type ='trt' + elif weights.endswith('.pth') or weights.endswith('.pt') : + self. infer_type ='pth' + elif weights.endswith('.jit'): + self. infer_type ='jit' + else: + print('#########ERROR:',weights,': no registered inference type, exit') + sys.exit(0) + + if self.infer_type=='trt': + logger = trt.Logger(trt.Logger.ERROR) + with open(weights, "rb") as f, trt.Runtime(logger) as runtime: + self.model=runtime.deserialize_cuda_engine(f.read())# 输入trt本地文件,返回ICudaEngine对象 + #print('####load TRT model :%s'%(weights)) + elif self.infer_type=='pth': + self.model = attempt_load(weights, map_location=self.device) # load FP32 model + if self.half: self.model.half() + elif self.infer_type=='jit': + assert os.path.exists(weights), "%s not exists" + self.model = torch.jit.load(weights, map_location=self.device) # load FP32 model + + if 'score_byClass' in par.keys(): self.score_byClass = par['score_byClass'] + else: self.score_byClass = None + + print('#########加载模型:',weights,' 类型:',self.infer_type) + + def eval(self, image): + t0 = time.time() + if self.infer_type != 'jit': + img = self.preprocess_image(image) + t1 = time.time() + if self.infer_type == 'trt': + pred = yolov5Trtforward(self.model, img) + else : + pred = self.model(img, augment=False)[0] + else: + pred = self.model(image) + t3 = time.time() + timeOut = 'yolov5 :%.1f (pre-process:%.1f, ) ' % (self.get_ms(t3, t0), self.get_ms(t3, t0)) + return pred, timeOut + + t2=time.time() + if 'ovlap_thres_crossCategory' in self.par.keys(): + ovlap_thres = self.par['ovlap_thres_crossCategory'] + else: + ovlap_thres = None + + p_result, timeOut = getDetectionsFromPreds(pred,img,image,conf_thres=self.par['conf_thres'],iou_thres=self.par['iou_thres'],ovlap_thres=ovlap_thres,padInfos=self.padInfos) + if self.score_byClass: + p_result[2] = score_filter_byClass(p_result[2],self.score_byClass) + + t3=time.time() + timeOut = 'yolov5 :%.1f (pre-process:%.1f, inference:%.1f, post-process:%.1f) '%( self.get_ms(t3,t0) , self.get_ms(t1,t0) , self.get_ms(t2,t1) , self.get_ms(t3,t2) ) + return p_result[2], timeOut + + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + if self.infer_type=='trt': + img, padInfos = img_pad( image , size=(640,640,3)) ;img = [img] + self.padInfos =padInfos + else: + img = [letterbox(x, 640, auto=True, stride=32)[0] for x in [image]]; + self.padInfos=None + # Stack + img = np.stack(img, 0) + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + img = torch.from_numpy(img).to(self.device) + img = img.half() if self.half else img.float() # uint8 to fp16/32 + img /= 255.0 + return img +