commit 37f133a465d147b2a4e101f88f46fe68d9b7ab02 Author: thsw Date: Mon Aug 1 17:10:06 2022 +0800 demo diff --git a/conf/bak/model_5class.json b/conf/bak/model_5class.json new file mode 100644 index 0000000..e1c111d --- /dev/null +++ b/conf/bak/model_5class.json @@ -0,0 +1,16 @@ +{ + + "gpu_process":{"det_weights":"../yolov5/weights/best_5classes.pt","seg_nclass":2,"seg_weights": "../yolov5/weights/segmentation/BiSeNet/checkpoint.pth" }, + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"../yolov5/config/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" }, + + "push_process":{ "OutVideoW":1920, "OutVideoH":1080 }, + "AI_video_save": {"onLine":false,"offLine":true }, + "imageTxtFile":true, + "logChildProcessOffline":"logs/logChildProcess/offline", + "logChildProcessOnline":"logs/logChildProcess/online", + "StreamWaitingTime":240, + "StreamRecoveringTime":180 + + +} diff --git a/conf/bak/model_9class.json b/conf/bak/model_9class.json new file mode 100644 index 0000000..2b60325 --- /dev/null +++ b/conf/bak/model_9class.json @@ -0,0 +1,16 @@ +{ + + "gpu_process":{"det_weights":"../weights/yolov5/class9/weights/best.pt","seg_nclass":2,"seg_weights": "../yolov5/weights/segmentation/BiSeNet/checkpoint.pth" }, + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"../weights/yolov5/class9/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" }, + + "push_process":{ "OutVideoW":1920, "OutVideoH":1080 }, + "AI_video_save": {"onLine":false,"offLine":true }, + "imageTxtFile":true, + "logChildProcessOffline":"logs/logChildProcess/offline", + "logChildProcessOnline":"logs/logChildProcess/online", + "StreamWaitingTime":240, + "StreamRecoveringTime":180 + + +} diff --git a/conf/errorDic.json b/conf/errorDic.json new file mode 100644 index 0000000..5809a4f --- /dev/null +++ b/conf/errorDic.json @@ -0,0 +1,6 @@ +{ + "101":"video uploading failure", + "102":"Stream or video ERROR", + "": + +} diff --git a/conf/master.json b/conf/master.json new file mode 100644 index 0000000..0cf452d --- /dev/null +++ b/conf/master.json @@ -0,0 +1,14 @@ +{ +"par":{ + "server":"212.129.223.66:19092", + "server2":"101.132.127.1:19092", + "server3":"192.168.11.242:9092", + "topic": ["dsp-alg-online-tasks","dsp-alg-offline-tasks","dsp-alg-task-results"], + "group_id":"testWw", + "kafka":"mintors/kafka", + "modelJson":"conf/model.json", + "logDir":"logs/master", + "StreamWaitingTime":240, + "logPrintInterval":60 + } +} diff --git a/conf/model.json b/conf/model.json new file mode 100644 index 0000000..b35e451 --- /dev/null +++ b/conf/model.json @@ -0,0 +1,17 @@ +{ + + "gpu_process":{"det_weights":"weights/yolov5/class5/best_5classes.pt","seg_nclass":2,"seg_weights": "weights/BiSeNet/checkpoint.pth" }, + + "post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"weights/yolov5/class5/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" }, + + "push_process":{ "OutVideoW":1920, "OutVideoH":1080 }, + "AI_video_save": {"onLine":false,"offLine":true }, + "imageTxtFile":true, + "logChildProcessOffline":"logs/logChildProcess/offline", + "logChildProcessOnline":"logs/logChildProcess/online", + "TaskStatusQueryUrl":"http://192.168.11.241:1011/api/web/serviceInst", + "StreamWaitingTime":240, + "StreamRecoveringTime":600 + + +} diff --git a/conf/platech.ttf b/conf/platech.ttf new file mode 100644 index 0000000..d66a970 Binary files /dev/null and b/conf/platech.ttf differ diff --git a/conf/send_oss.json b/conf/send_oss.json new file mode 100644 index 0000000..2600528 --- /dev/null +++ b/conf/send_oss.json @@ -0,0 +1,20 @@ +{ + "indir":"problems/images_tmp", + "outdir":"problems/images_save", + "jsonDir" : "mintors/kafka/", + "hearBeatTimeMs":30, + "logdir":"logs/send", + "videoBakDir":"problems/videos_save", + "ossPar":{"Epoint":"http://oss-cn-shanghai.aliyuncs.com", + "AId":"LTAI5tSJ62TLMUb4SZuf285A", + "ASt":"MWYynm30filZ7x0HqSHlU3pdLVNeI7", + "bucketName":"ta-tech-image" + }, + "vodPar":{ + "AId":"LTAI5tE7KWN9fsuGU7DyfYF4", + "ASt":"yPPCyfsqWgrTuoz5H4sisY0COclx8E" + }, + "kafkaPar":{"boostServer1":["192.168.11.242:9092"] ,"boostServer2":["101.132.127.1:19092"], "boostServer":["212.129.223.66:19092"] ,"topic":"dsp-alg-task-results"}, + "labelnamesFile":"weights/yolov5/class5/labelnames.json" + +} diff --git a/demo.py b/demo.py new file mode 100644 index 0000000..03225fa --- /dev/null +++ b/demo.py @@ -0,0 +1,90 @@ + +import cv2,os,time +from models.experimental import attempt_load +from segutils.segmodel import SegModel,get_largest_contours +from utils.torch_utils import select_device +from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_ +from utils.datasets import letterbox +import numpy as np +import torch + +def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3]): + #输入参数 + # im0s---原始图像列表 + # model---检测模型,segmodel---分割模型 + #输出:两个元素(列表,字符)构成的元组,[im0s[0],im0,det_xywh,iframe],strout + # [im0s[0],im0,det_xywh,iframe]中, + # im0s[0]--原始图像,im0--AI处理后的图像,iframe--帧号/暂时不需用到。 + # det_xywh--检测结果,是一个列表。 + # 其中每一个元素表示一个目标构成如:[float(cls_c), xc,yc,w,h, float(conf_c)] + # #cls_c--类别,如0,1,2,3; xc,yc,w,h--中心点坐标及宽;conf_c--得分, 取值范围在0-1之间 + # #strout---统计AI处理个环节的时间 + + # Letterbox + img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s] + # Stack + img = np.stack(img, 0) + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + + seg_pred,segstr = segmodel.eval(im0s[0] ) + pred = model(img,augment=False)[0] + datas = [[''], img, im0s, None,pred,seg_pred,10] + + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList) + + return p_result,timeOut + +def main(): + ##预先设置的参数 + device_='1' ##选定模型,可选 cpu,'0','1' + + ##以下参数目前不可改 + Detweights = "weights/yolov5/class5/best_5classes.pt" + seg_nclass = 2 + Segweights = "weights/BiSeNet/checkpoint.pth" + conf_thres,iou_thres,classes= 0.25,0.45,5 + labelnames = "weights/yolov5/class5/labelnames.json" + rainbows = [ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] + allowedList=[0,1,2,3] + + + ##加载模型,准备好显示字符 + device = select_device(device_) + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40,fontpath="conf/platech.ttf") + half = device.type != 'cpu' # half precision only supported on CUDA + model = attempt_load(Detweights, map_location=device) # load FP32 model + if half: model.half() + segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device) + + + + ##图像测试 + #url='images/examples/20220624_响水河_12300_1621.jpg' + impth = 'images/examples/' + outpth = 'images/results/' + folders = os.listdir(impth) + for i in range(len(folders)): + imgpath = os.path.join(impth, folders[i]) + im0s=[cv2.imread(imgpath)] + time00 = time.time() + p_result,timeOut = AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,half,device,conf_thres, iou_thres,allowedList) + time11 = time.time() + image_array = p_result[1] + cv2.imwrite( os.path.join( outpth,folders[i] ) ,image_array ) + print('----process:%s'%(folders[i]), (time.time() - time11) * 1000) + + + + + +if __name__=="__main__": + main() \ No newline at end of file diff --git a/images/examples/2022-07-27-13-35-23_frame-160-240_type-排口_UfNOMh78smt0wezB_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-35-23_frame-160-240_type-排口_UfNOMh78smt0wezB_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..0480431 Binary files /dev/null and b/images/examples/2022-07-27-13-35-23_frame-160-240_type-排口_UfNOMh78smt0wezB_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-35-32_frame-428-480_type-排污口_jFuIUO357Z6kBxXg_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-35-32_frame-428-480_type-排污口_jFuIUO357Z6kBxXg_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..bf276b7 Binary files /dev/null and b/images/examples/2022-07-27-13-35-32_frame-428-480_type-排污口_jFuIUO357Z6kBxXg_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-35-41_frame-516-720_type-排污口_o2QNRX4xrpcziV9w_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-35-41_frame-516-720_type-排污口_o2QNRX4xrpcziV9w_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..f2034de Binary files /dev/null and b/images/examples/2022-07-27-13-35-41_frame-516-720_type-排污口_o2QNRX4xrpcziV9w_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-35-50_frame-931-960_type-排污口_h6Xr2u0T4Y5ZkFUA_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-35-50_frame-931-960_type-排污口_h6Xr2u0T4Y5ZkFUA_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..a6e6e2e Binary files /dev/null and b/images/examples/2022-07-27-13-35-50_frame-931-960_type-排污口_h6Xr2u0T4Y5ZkFUA_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-35-59_frame-1068-1200_type-排污口_epkNQdPqMcgsC8Du_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-35-59_frame-1068-1200_type-排污口_epkNQdPqMcgsC8Du_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..e1ce01b Binary files /dev/null and b/images/examples/2022-07-27-13-35-59_frame-1068-1200_type-排污口_epkNQdPqMcgsC8Du_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-08_frame-1315-1440_type-排污口_28X6uDsAYzi0VmaG_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-08_frame-1315-1440_type-排污口_28X6uDsAYzi0VmaG_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..e95310b Binary files /dev/null and b/images/examples/2022-07-27-13-36-08_frame-1315-1440_type-排污口_28X6uDsAYzi0VmaG_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-16_frame-1519-1680_type-排污口_LUiuGqmd10XMTtKY_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-16_frame-1519-1680_type-排污口_LUiuGqmd10XMTtKY_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..5f273c0 Binary files /dev/null and b/images/examples/2022-07-27-13-36-16_frame-1519-1680_type-排污口_LUiuGqmd10XMTtKY_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-25_frame-1750-1920_type-排口_mVkyctd4HIuzNreF_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-25_frame-1750-1920_type-排口_mVkyctd4HIuzNreF_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..eaf9827 Binary files /dev/null and b/images/examples/2022-07-27-13-36-25_frame-1750-1920_type-排口_mVkyctd4HIuzNreF_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-34_frame-2116-2160_type-水生植被_QuXwfae6vck3j4qJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-34_frame-2116-2160_type-水生植被_QuXwfae6vck3j4qJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..f828240 Binary files /dev/null and b/images/examples/2022-07-27-13-36-34_frame-2116-2160_type-水生植被_QuXwfae6vck3j4qJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-43_frame-2329-2400_type-水生植被_EqPuHXg2hsxUA5Y7_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-43_frame-2329-2400_type-水生植被_EqPuHXg2hsxUA5Y7_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..331e08a Binary files /dev/null and b/images/examples/2022-07-27-13-36-43_frame-2329-2400_type-水生植被_EqPuHXg2hsxUA5Y7_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-36-52_frame-2604-2640_type-水生植被_QjZ4FYy3twCdr5ga_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-36-52_frame-2604-2640_type-水生植被_QjZ4FYy3twCdr5ga_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..e7db7cb Binary files /dev/null and b/images/examples/2022-07-27-13-36-52_frame-2604-2640_type-水生植被_QjZ4FYy3twCdr5ga_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-00_frame-2728-2880_type-水生植被_RC5sTu1qOdXH2WBh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-00_frame-2728-2880_type-水生植被_RC5sTu1qOdXH2WBh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..2da7850 Binary files /dev/null and b/images/examples/2022-07-27-13-37-00_frame-2728-2880_type-水生植被_RC5sTu1qOdXH2WBh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-10_frame-3117-3120_type-水生植被_genKJUQ2XFxYGSA3_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-10_frame-3117-3120_type-水生植被_genKJUQ2XFxYGSA3_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..f375678 Binary files /dev/null and b/images/examples/2022-07-27-13-37-10_frame-3117-3120_type-水生植被_genKJUQ2XFxYGSA3_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-19_frame-3131-3360_type-排口_AC6d4PocQ7W2DzrS_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-19_frame-3131-3360_type-排口_AC6d4PocQ7W2DzrS_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..ededab0 Binary files /dev/null and b/images/examples/2022-07-27-13-37-19_frame-3131-3360_type-排口_AC6d4PocQ7W2DzrS_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-28_frame-3517-3600_type-排污口_BPJyiMcu5X1mZfGw_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-28_frame-3517-3600_type-排污口_BPJyiMcu5X1mZfGw_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..3b59f36 Binary files /dev/null and b/images/examples/2022-07-27-13-37-28_frame-3517-3600_type-排污口_BPJyiMcu5X1mZfGw_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-37_frame-3792-3840_type-水生植被_HvUOAF1CncGgRiqJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-37_frame-3792-3840_type-水生植被_HvUOAF1CncGgRiqJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..8274f22 Binary files /dev/null and b/images/examples/2022-07-27-13-37-37_frame-3792-3840_type-水生植被_HvUOAF1CncGgRiqJ_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-46_frame-3992-4080_type-排污口_bdoy79MHKY6SqJz4_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-46_frame-3992-4080_type-排污口_bdoy79MHKY6SqJz4_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..2c11e7a Binary files /dev/null and b/images/examples/2022-07-27-13-37-46_frame-3992-4080_type-排污口_bdoy79MHKY6SqJz4_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-37-55_frame-4174-4320_type-排污口_fiKbtqDLXPZ5ANoe_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-37-55_frame-4174-4320_type-排污口_fiKbtqDLXPZ5ANoe_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..faba308 Binary files /dev/null and b/images/examples/2022-07-27-13-37-55_frame-4174-4320_type-排污口_fiKbtqDLXPZ5ANoe_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-38-05_frame-4485-4560_type-排口_ZPxLFlRT9ds1crzh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-38-05_frame-4485-4560_type-排口_ZPxLFlRT9ds1crzh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..a2d0068 Binary files /dev/null and b/images/examples/2022-07-27-13-38-05_frame-4485-4560_type-排口_ZPxLFlRT9ds1crzh_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-38-14_frame-4631-4800_type-排口_pyq8su0Y4xZvGNAc_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-38-14_frame-4631-4800_type-排口_pyq8su0Y4xZvGNAc_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..ac34829 Binary files /dev/null and b/images/examples/2022-07-27-13-38-14_frame-4631-4800_type-排口_pyq8su0Y4xZvGNAc_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-38-23_frame-4857-5040_type-水生植被_gB3oJ7ivRfOe0Lrj_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-38-23_frame-4857-5040_type-水生植被_gB3oJ7ivRfOe0Lrj_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..a1c8df4 Binary files /dev/null and b/images/examples/2022-07-27-13-38-23_frame-4857-5040_type-水生植被_gB3oJ7ivRfOe0Lrj_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/images/examples/2022-07-27-13-38-32_frame-5124-5280_type-水生植被_f836RQ9PDzvH5qIK_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg b/images/examples/2022-07-27-13-38-32_frame-5124-5280_type-水生植被_f836RQ9PDzvH5qIK_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg new file mode 100644 index 0000000..fce80eb Binary files /dev/null and b/images/examples/2022-07-27-13-38-32_frame-5124-5280_type-水生植被_f836RQ9PDzvH5qIK_s-off-P20220727133456697-30e25a4f15eb4756abd4571a2fcad2de_OR.jpg differ diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..2d10de3 Binary files /dev/null and b/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..2e39af9 Binary files /dev/null and b/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/__pycache__/common.cpython-37.pyc b/models/__pycache__/common.cpython-37.pyc new file mode 100644 index 0000000..8a36cab Binary files /dev/null and b/models/__pycache__/common.cpython-37.pyc differ diff --git a/models/__pycache__/common.cpython-38.pyc b/models/__pycache__/common.cpython-38.pyc new file mode 100644 index 0000000..3d5e7bf Binary files /dev/null and b/models/__pycache__/common.cpython-38.pyc differ diff --git a/models/__pycache__/experimental.cpython-37.pyc b/models/__pycache__/experimental.cpython-37.pyc new file mode 100644 index 0000000..75ca459 Binary files /dev/null and b/models/__pycache__/experimental.cpython-37.pyc differ diff --git a/models/__pycache__/experimental.cpython-38.pyc b/models/__pycache__/experimental.cpython-38.pyc new file mode 100644 index 0000000..f79b863 Binary files /dev/null and b/models/__pycache__/experimental.cpython-38.pyc differ diff --git a/models/__pycache__/yolo.cpython-38.pyc b/models/__pycache__/yolo.cpython-38.pyc new file mode 100644 index 0000000..32fcf56 Binary files /dev/null and b/models/__pycache__/yolo.cpython-38.pyc differ diff --git a/models/common.py b/models/common.py new file mode 100644 index 0000000..028dedd --- /dev/null +++ b/models/common.py @@ -0,0 +1,405 @@ +# YOLOv5 common modules + +import math +from copy import copy +from pathlib import Path + +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +from PIL import Image +from torch.cuda import amp + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized + +import warnings + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.LeakyReLU(0.1, inplace=True) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # filename: imgs = 'data/images/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_synchronized()] + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, str): # filename or uri + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) + + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) + + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if pprint: + print(str.rstrip(', ')) + if show: + img.show(self.files[i]) # show + if save: + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) + self.display(save=True, save_dir=save_dir) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/models/experimental.py b/models/experimental.py new file mode 100644 index 0000000..548353c --- /dev/null +++ b/models/experimental.py @@ -0,0 +1,134 @@ +# YOLOv5 experimental modules + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(GhostBottleneck, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble diff --git a/models/export.py b/models/export.py new file mode 100644 index 0000000..c527a47 --- /dev/null +++ b/models/export.py @@ -0,0 +1,123 @@ +"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats + +Usage: + $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1 +""" + +import argparse +import sys +import time + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn + +import models +from models.experimental import attempt_load +from utils.activations import Hardswish, SiLU +from utils.general import colorstr, check_img_size, check_requirements, set_logging +from utils.torch_utils import select_device + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only + parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + + # Update model + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + # elif isinstance(m, models.yolo.Detect): + # m.forward = m.forward_export # assign forward (optional) + model.model[-1].export = not opt.grid # set Detect() layer grid export + y = model(img) # dry run + + # TorchScript export ----------------------------------------------------------------------------------------------- + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img, strict=False) + ts.save(f) + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # ONNX export ------------------------------------------------------------------------------------------------------ + prefix = colorstr('ONNX:') + try: + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = opt.weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + output_names=['classes', 'boxes'] if y is None else ['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # CoreML export ---------------------------------------------------------------------------------------------------- + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {onnx.__version__}...') + # convert model from torchscript and apply pixel scaling as per detect.py + model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + f = opt.weights.replace('.pt', '.mlmodel') # filename + model.save(f) + print(f'{prefix} export success, saved as {f}') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # Finish + print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml new file mode 100644 index 0000000..a07a4dc --- /dev/null +++ b/models/hub/anchors.yaml @@ -0,0 +1,58 @@ +# Default YOLOv5 anchors for COCO data + + +# P5 ------------------------------------------------------------------------------------------------------------------- +# P5-640: +anchors_p5_640: + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 + + +# P6 ------------------------------------------------------------------------------------------------------------------- +# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 +anchors_p6_640: + - [ 9,11, 21,19, 17,41 ] # P3/8 + - [ 43,32, 39,70, 86,64 ] # P4/16 + - [ 65,131, 134,130, 120,265 ] # P5/32 + - [ 282,180, 247,354, 512,387 ] # P6/64 + +# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 +anchors_p6_1280: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 +anchors_p6_1920: + - [ 28,41, 67,59, 57,141 ] # P3/8 + - [ 144,103, 129,227, 270,205 ] # P4/16 + - [ 209,452, 455,396, 358,812 ] # P5/32 + - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + + +# P7 ------------------------------------------------------------------------------------------------------------------- +# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 +anchors_p7_640: + - [ 11,11, 13,30, 29,20 ] # P3/8 + - [ 30,46, 61,38, 39,92 ] # P4/16 + - [ 78,80, 146,66, 79,163 ] # P5/32 + - [ 149,150, 321,143, 157,303 ] # P6/64 + - [ 257,402, 359,290, 524,372 ] # P7/128 + +# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 +anchors_p7_1280: + - [ 19,22, 54,36, 32,77 ] # P3/8 + - [ 70,83, 138,71, 75,173 ] # P4/16 + - [ 165,159, 148,334, 375,151 ] # P5/32 + - [ 334,317, 251,626, 499,474 ] # P6/64 + - [ 750,326, 534,814, 1079,818 ] # P7/128 + +# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 +anchors_p7_1920: + - [ 29,34, 81,55, 47,115 ] # P3/8 + - [ 105,124, 207,107, 113,259 ] # P4/16 + - [ 247,238, 222,500, 563,227 ] # P5/32 + - [ 501,476, 376,939, 749,711 ] # P6/64 + - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml new file mode 100644 index 0000000..38dcc44 --- /dev/null +++ b/models/hub/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml new file mode 100644 index 0000000..ff7638c --- /dev/null +++ b/models/hub/yolov3-tiny.yaml @@ -0,0 +1,41 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml new file mode 100644 index 0000000..f2e7613 --- /dev/null +++ b/models/hub/yolov3.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml new file mode 100644 index 0000000..e772bff --- /dev/null +++ b/models/hub/yolov5-fpn.yaml @@ -0,0 +1,42 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 6, BottleneckCSP, [1024]], # 9 + ] + +# YOLOv5 FPN head +head: + [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml new file mode 100644 index 0000000..0633a90 --- /dev/null +++ b/models/hub/yolov5-p2.yaml @@ -0,0 +1,54 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 9 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 + [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) + + [ -1, 1, Conv, [ 128, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 + [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) + + [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml new file mode 100644 index 0000000..3728a11 --- /dev/null +++ b/models/hub/yolov5-p6.yaml @@ -0,0 +1,56 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml new file mode 100644 index 0000000..ca8f849 --- /dev/null +++ b/models/hub/yolov5-p7.yaml @@ -0,0 +1,67 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: 3 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 + [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], + [ -1, 3, C3, [ 1280, False ] ], # 13 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 + [ -1, 3, C3, [ 1024, False ] ], # 17 + + [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 21 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 25 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + + [ -1, 1, Conv, [ 1024, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 + [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + + [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + ] diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml new file mode 100644 index 0000000..340f95a --- /dev/null +++ b/models/hub/yolov5-panet.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 + ] + +# YOLOv5 PANet head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml new file mode 100644 index 0000000..11298b0 --- /dev/null +++ b/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml new file mode 100644 index 0000000..48afc86 --- /dev/null +++ b/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml new file mode 100644 index 0000000..f2d6667 --- /dev/null +++ b/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml new file mode 100644 index 0000000..1df577a --- /dev/null +++ b/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml new file mode 100644 index 0000000..5ebc021 --- /dev/null +++ b/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 768 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 + [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], + [ -1, 3, C3, [ 1024, False ] ], # 11 + ] + +# YOLOv5 head +head: + [ [ -1, 1, Conv, [ 768, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 + [ -1, 3, C3, [ 768, False ] ], # 15 + + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 19 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) + + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) + + [ -1, 1, Conv, [ 768, 3, 2 ] ], + [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 + [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) + + [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + ] diff --git a/models/yolo.py b/models/yolo.py new file mode 100644 index 0000000..f730a1e --- /dev/null +++ b/models/yolo.py @@ -0,0 +1,277 @@ +# YOLOv5 YOLO-specific modules + +import argparse +import logging +import sys +from copy import deepcopy + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3, C3TR]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml new file mode 100644 index 0000000..71ebf86 --- /dev/null +++ b/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml new file mode 100644 index 0000000..3c749c9 --- /dev/null +++ b/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml new file mode 100644 index 0000000..aca669d --- /dev/null +++ b/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml new file mode 100644 index 0000000..d3babdf --- /dev/null +++ b/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..dd0a19d --- /dev/null +++ b/readme.md @@ -0,0 +1,4 @@ +此程序为了DSP测试开发的demo,重点是AI_process的子函数 +环境配置正确后:python demo.py +测试images/examples下面的图像 +输出在images/results下面 diff --git a/segutils/GPUtils.py b/segutils/GPUtils.py new file mode 100644 index 0000000..72d8088 --- /dev/null +++ b/segutils/GPUtils.py @@ -0,0 +1,501 @@ +#@@ -1,43 +1,43 @@ +# GPUtil - GPU utilization +# +# A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi +# +# Author: Anders Krogh Mortensen (anderskm) +# Date: 16 January 2017 +# Web: https://github.com/anderskm/gputil +# +# LICENSE +# +# MIT License +# +# Copyright (c) 2017 anderskm +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from subprocess import Popen, PIPE +from distutils import spawn +import os +import math +import random +import time +import sys +import platform +import subprocess +import numpy as np + + +__version__ = '1.4.0' +class GPU: + def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu): + self.id = ID + self.uuid = uuid + self.load = load + self.memoryUtil = float(memoryUsed)/float(memoryTotal) + self.memoryTotal = memoryTotal + self.memoryUsed = memoryUsed + self.memoryFree = memoryFree + self.driver = driver + self.name = gpu_name + self.serial = serial + self.display_mode = display_mode + self.display_active = display_active + self.temperature = temp_gpu + + def __str__(self): + return str(self.__dict__) + + +class GPUProcess: + def __init__(self, pid, processName, gpuId, gpuUuid, gpuName, usedMemory, + uid, uname): + self.pid = pid + self.processName = processName + self.gpuId = gpuId + self.gpuUuid = gpuUuid + self.gpuName = gpuName + self.usedMemory = usedMemory + self.uid = uid + self.uname = uname + + def __str__(self): + return str(self.__dict__) + +def safeFloatCast(strNumber): + try: + number = float(strNumber) + except ValueError: + number = float('nan') + return number + +#def getGPUs(): +def getNvidiaSmiCmd(): + if platform.system() == "Windows": + # If the platform is Windows and nvidia-smi + # could not be found from the environment path, + #@@ -75,57 +94,97 @@ def getGPUs(): + nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive'] + else: + nvidia_smi = "nvidia-smi" + return nvidia_smi + + +def getGPUs(): + # Get ID, processing and memory utilization for all GPUs + nvidia_smi = getNvidiaSmiCmd() + try: + p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE) + stdout, stderror = p.communicate() + p = subprocess.run([ + nvidia_smi, + "--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout;#output = stdout.decode('UTF-8') + # output = output[2:-1] # Remove b' and ' from string added by python + #print(output) + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + #print(lines) + numDevices = len(lines)-1 + GPUs = [] + for g in range(numDevices): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + for i in range(12): + # print(vals[i]) + if (i == 0): + deviceIds = int(vals[i]) + elif (i == 1): + uuid = vals[i] + elif (i == 2): + gpuUtil = safeFloatCast(vals[i])/100 + elif (i == 3): + memTotal = safeFloatCast(vals[i]) + elif (i == 4): + memUsed = safeFloatCast(vals[i]) + elif (i == 5): + memFree = safeFloatCast(vals[i]) + elif (i == 6): + driver = vals[i] + elif (i == 7): + gpu_name = vals[i] + elif (i == 8): + serial = vals[i] + elif (i == 9): + display_active = vals[i] + elif (i == 10): + display_mode = vals[i] + elif (i == 11): + temp_gpu = safeFloatCast(vals[i]); + deviceIds = int(vals[0]) + uuid = vals[1] + gpuUtil = safeFloatCast(vals[2]) / 100 + memTotal = safeFloatCast(vals[3]) + memUsed = safeFloatCast(vals[4]) + memFree = safeFloatCast(vals[5]) + driver = vals[6] + gpu_name = vals[7] + serial = vals[8] + display_active = vals[9] + display_mode = vals[10] + temp_gpu = safeFloatCast(vals[11]); + GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu)) + return GPUs # (deviceIds, gpuUtil, memUtil) + + + +def getGPUProcesses(): + """Get all gpu compute processes.""" + + global gpuUuidToIdMap + gpuUuidToIdMap = {} + try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus + except: + pass + + + nvidia_smi = getNvidiaSmiCmd() + try: + p = subprocess.run([ + nvidia_smi, + "--query-compute-apps=pid,process_name,gpu_uuid,gpu_name,used_memory", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + numProcesses = len(lines) - 1 + processes = [] + for g in range(numProcesses): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + pid = int(vals[0]) + processName = vals[1] + gpuUuid = vals[2] + gpuName = vals[3] + usedMemory = safeFloatCast(vals[4]) + gpuId = gpuUuidToIdMap[gpuUuid] + if gpuId is None: + gpuId = -1 + + # get uid and uname owner of the pid + try: + p = subprocess.run(['ps', f'-p{pid}', '-oruid=,ruser='], + stdout=subprocess.PIPE, encoding='utf8') + uid, uname = p.stdout.split() + uid = int(uid) + except: + uid, uname = -1, '' + + processes.append(GPUProcess(pid, processName, gpuId, gpuUuid, + gpuName, usedMemory, uid, uname)) + return processes + + +def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # order = first | last | random | load | memory + # first --> select the GPU with the lowest ID (DEFAULT) + # last --> select the GPU with the highest ID + # random --> select a random available GPU + # load --> select the GPU with the lowest load + # memory --> select the GPU with the most memory available + # limit = 1 (DEFAULT), 2, ..., Inf + # Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned. + # Get device IDs, load and memory usage + GPUs = getGPUs() + # Determine, which GPUs are available + GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)] + # Discard unavailable GPUs + GPUs = [GPUs[g] for g in availAbleGPUindex] + # Sort available GPUs according to the order argument + if (order == 'first'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False) + elif (order == 'last'): + GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True) + elif (order == 'random'): + GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))] + elif (order == 'load'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False) + elif (order == 'memory'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False) + # Extract the number of desired GPUs, but limited to the total number of available GPUs + GPUs = GPUs[0:min(limit, len(GPUs))] + # Extract the device IDs from the GPUs and return them + deviceIds = [gpu.id for gpu in GPUs] + return deviceIds +#def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False): +# # Determine, which GPUs are available +# GPUavailability = np.zeros(len(GPUs)) +# for i in range(len(GPUs)): +# if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))): +# GPUavailability[i] = 1 +def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # Determine, which GPUs are available + GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs] + return GPUavailability +def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]): + #GPUs = getGPUs() + #firstAvailableGPU = np.NaN + #for i in range(len(GPUs)): + # if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory): + # firstAvailableGPU = GPUs[i].id + # break + #return firstAvailableGPU + for i in range(attempts): + if (verbose): + print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.') + # Get first available GPU + available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + # If an available GPU was found, break for loop. + if (available): + if (verbose): + print('GPU ' + str(available) + ' located!') + break + # If this is not the last attempt, sleep for 'interval' seconds + if (i != attempts-1): + time.sleep(interval) + # Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found + if (not(available)): + raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.') + # Return found GPU + return available +def showUtilization(all=False, attrList=None, useOldCode=False): + GPUs = getGPUs() + if (all): + if (useOldCode): + print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |') + print('------------------------------------------------------------------------------------------------------------------------------') + for gpu in GPUs: + print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'name','name':'Name'}, + {'attr':'serial','name':'Serial'}, + {'attr':'uuid','name':'UUID'}], + [{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0}, + {'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}], + [{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0}, + {'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0}, + {'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}], + [{'attr':'display_mode','name':'Display mode'}, + {'attr':'display_active','name':'Display active'}]] + + else: + if (useOldCode): + print(' ID GPU MEM') + print('--------------') + for gpu in GPUs: + print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}], + ] + + if (not useOldCode): + if (attrList is not None): + headerString = '' + GPUstrings = ['']*len(GPUs) + for attrGroup in attrList: + #print(attrGroup) + for attrDict in attrGroup: + headerString = headerString + '| ' + attrDict['name'] + ' ' + headerWidth = len(attrDict['name']) + minWidth = len(attrDict['name']) + + attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else '' + attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else '' + attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x + for gpu in GPUs: + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:' + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:d}').format(attr) + elif (isinstance(attr,str)): + attrStr = attr; + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = attr.encode('ascii','ignore') + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + minWidth = max(minWidth,len(attrStr)) + + headerString += ' '*max(0,minWidth-headerWidth) + + minWidthStr = str(minWidth - len(attrSuffix)) + + for gpuIdx,gpu in enumerate(GPUs): + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:' + minWidthStr + 'd}').format(attr) + elif (isinstance(attr,str)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr); + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore')) + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + GPUstrings[gpuIdx] += '| ' + attrStr + ' ' + + headerString = headerString + '|' + for gpuIdx,gpu in enumerate(GPUs): + GPUstrings[gpuIdx] += '|' + + headerSpacingString = '-' * len(headerString) + print(headerString) + print(headerSpacingString) + for GPUstring in GPUstrings: + print(GPUstring) + + +# Generate gpu uuid to id map +gpuUuidToIdMap = {} +try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus +except: + pass +def getGPUInfos(): + ###返回gpus:list,一个GPU为一个元素-对象 + ###########:有属性,'id','load','memoryFree', + ###########:'memoryTotal','memoryUsed','memoryUtil','name','serial''temperature','uuid',process + ###其中process:每一个计算进程是一个元素--对象 + ############:有属性,'gpuId','gpuName','gpuUuid', + ############:'gpuid','pid','processName','uid', 'uname','usedMemory' + gpus = getGPUs() + gpuUuidToIdMap={} + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + gpu.process=[] + indexx = [x.id for x in gpus ] + + process = getGPUProcesses() + for pre in process: + pre.gpuid = gpuUuidToIdMap[pre.gpuUuid] + gpuId = indexx.index(pre.gpuid ) + gpus[gpuId].process.append(pre ) + return gpus + +def get_available_gpu(gpuStatus): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + cuda=None + for gpus in gpuStatus: + if len(gpus.process) == 0: + cuda = gpus.id + return cuda + return cuda +def get_whether_gpuProcess(): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + gpuStatus=getGPUInfos() + gpuProcess=True + for gpus in gpuStatus: + if len(gpus.process) != 0: + gpuProcess = False + return gpuProcess + +def get_offlineProcess_gpu(gpuStatus,pidInfos): + gpu_onLine = [] + for gpu in gpuStatus: + for gpuProcess in gpu.process: + pid = gpuProcess.pid + if pid in pidInfos.keys(): + pidType = pidInfos[pid]['type'] + if pidType == 'onLine': + gpu_onLine.append(gpu) + gpu_offLine = set(gpuStatus) - set(gpu_onLine) + return list(gpu_offLine) +def arrange_offlineProcess(gpuStatus,pidInfos,modelMemory=1500): + cudaArrange=[] + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + for gpu in gpu_offLine: + leftMemory = gpu.memoryTotal*0.9 - gpu.memoryUsed + modelCnt = int(leftMemory// modelMemory) + + cudaArrange.extend( [gpu.id] * modelCnt ) + return cudaArrange +def get_potential_gpu(gpuStatus,pidInfos): + ###所有GPU上都有计算。需要为“在线任务”空出一块显卡。 + ###step1:查看所有显卡上是否有“在线任务” + + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + if len(gpu_offLine) == 0 : + return False + + ###step2,找出每张显卡上离线进程的数目 + offLineCnt = [ len(gpu.process) for gpu in gpu_offLine ] + minCntIndex =offLineCnt.index( min(offLineCnt)) + + pids = [x.pid for x in gpu_offLine[minCntIndex].process] + return {'cuda':gpu_offLine[minCntIndex].id,'pids':pids } +if __name__=='__main__': + #pres = getGPUProcesses() + #print('###line404:',pres) + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + print(gpu) + print(gpuUuidToIdMap) + pres = getGPUProcesses() + print('###line404:',pres) + for pre in pres: + print('#'*20) + for ken in ['gpuName','gpuUuid','pid','processName','uid','uname','usedMemory' ]: + print(ken,' ',pre.__getattribute__(ken )) + print(' ') + + diff --git a/segutils/__pycache__/GPUtils.cpython-38.pyc b/segutils/__pycache__/GPUtils.cpython-38.pyc new file mode 100644 index 0000000..0873cfb Binary files /dev/null and b/segutils/__pycache__/GPUtils.cpython-38.pyc differ diff --git a/segutils/__pycache__/segWaterBuilding.cpython-38.pyc b/segutils/__pycache__/segWaterBuilding.cpython-38.pyc new file mode 100644 index 0000000..05ab2a2 Binary files /dev/null and b/segutils/__pycache__/segWaterBuilding.cpython-38.pyc differ diff --git a/segutils/__pycache__/segmodel.cpython-38.pyc b/segutils/__pycache__/segmodel.cpython-38.pyc new file mode 100644 index 0000000..0c61eaf Binary files /dev/null and b/segutils/__pycache__/segmodel.cpython-38.pyc differ diff --git a/segutils/core/__init__.py b/segutils/core/__init__.py new file mode 100644 index 0000000..453f410 --- /dev/null +++ b/segutils/core/__init__.py @@ -0,0 +1 @@ +from . import nn, models, utils, data \ No newline at end of file diff --git a/segutils/core/__pycache__/__init__.cpython-36.pyc b/segutils/core/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..6ab6e7d Binary files /dev/null and b/segutils/core/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/__pycache__/__init__.cpython-38.pyc b/segutils/core/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..e5e05bd Binary files /dev/null and b/segutils/core/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/data/__init__.py b/segutils/core/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/data/__pycache__/__init__.cpython-36.pyc b/segutils/core/data/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..c18d558 Binary files /dev/null and b/segutils/core/data/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/data/__pycache__/__init__.cpython-38.pyc b/segutils/core/data/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..55e4fe5 Binary files /dev/null and b/segutils/core/data/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/data/dataloader/__init__.py b/segutils/core/data/dataloader/__init__.py new file mode 100644 index 0000000..b22f962 --- /dev/null +++ b/segutils/core/data/dataloader/__init__.py @@ -0,0 +1,23 @@ +""" +This module provides data loaders and transformers for popular vision datasets. +""" +from .mscoco import COCOSegmentation +from .cityscapes import CitySegmentation +from .ade import ADE20KSegmentation +from .pascal_voc import VOCSegmentation +from .pascal_aug import VOCAugSegmentation +from .sbu_shadow import SBUSegmentation + +datasets = { + 'ade20k': ADE20KSegmentation, + 'pascal_voc': VOCSegmentation, + 'pascal_aug': VOCAugSegmentation, + 'coco': COCOSegmentation, + 'citys': CitySegmentation, + 'sbu': SBUSegmentation, +} + + +def get_segmentation_dataset(name, **kwargs): + """Segmentation Datasets""" + return datasets[name.lower()](**kwargs) diff --git a/segutils/core/data/dataloader/__pycache__/__init__.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..5029d98 Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/ade.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/ade.cpython-36.pyc new file mode 100644 index 0000000..34b59dd Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/ade.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/cityscapes.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/cityscapes.cpython-36.pyc new file mode 100644 index 0000000..bdff2fc Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/cityscapes.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/mscoco.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/mscoco.cpython-36.pyc new file mode 100644 index 0000000..2c0d2e3 Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/mscoco.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/pascal_aug.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/pascal_aug.cpython-36.pyc new file mode 100644 index 0000000..fa3e95f Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/pascal_aug.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/pascal_voc.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/pascal_voc.cpython-36.pyc new file mode 100644 index 0000000..1743082 Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/pascal_voc.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/sbu_shadow.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/sbu_shadow.cpython-36.pyc new file mode 100644 index 0000000..1c3b63f Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/sbu_shadow.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/__pycache__/segbase.cpython-36.pyc b/segutils/core/data/dataloader/__pycache__/segbase.cpython-36.pyc new file mode 100644 index 0000000..e5e253b Binary files /dev/null and b/segutils/core/data/dataloader/__pycache__/segbase.cpython-36.pyc differ diff --git a/segutils/core/data/dataloader/ade.py b/segutils/core/data/dataloader/ade.py new file mode 100644 index 0000000..522ecbd --- /dev/null +++ b/segutils/core/data/dataloader/ade.py @@ -0,0 +1,172 @@ +"""Pascal ADE20K Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class ADE20KSegmentation(SegmentationDataset): + """ADE20K Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/ade' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = ADE20KSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'ADEChallengeData2016' + NUM_CLASS = 150 + + def __init__(self, root='../datasets/ade', split='test', mode=None, transform=None, **kwargs): + super(ADE20KSegmentation, self).__init__(root, split, mode, transform, **kwargs) + root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(root), "Please setup the dataset using ../datasets/ade20k.py" + self.images, self.masks = _get_ade20k_pairs(root, split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + print('Found {} images in the folder {}'.format(len(self.images), root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and to Tensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32') - 1) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 1 + + @property + def classes(self): + """Category names.""" + return ("wall", "building, edifice", "sky", "floor, flooring", "tree", + "ceiling", "road, route", "bed", "windowpane, window", "grass", + "cabinet", "sidewalk, pavement", + "person, individual, someone, somebody, mortal, soul", + "earth, ground", "door, double door", "table", "mountain, mount", + "plant, flora, plant life", "curtain, drape, drapery, mantle, pall", + "chair", "car, auto, automobile, machine, motorcar", + "water", "painting, picture", "sofa, couch, lounge", "shelf", + "house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair", + "seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press", + "lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion", + "base, pedestal, stand", "box", "column, pillar", "signboard, sign", + "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", + "skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox", + "grandstand, covered stand", "path", "stairs, steps", "runway", + "case, display case, showcase, vitrine", + "pool table, billiard table, snooker table", "pillow", + "screen door, screen", "stairway, staircase", "river", "bridge, span", + "bookcase", "blind, screen", "coffee table, cocktail table", + "toilet, can, commode, crapper, pot, potty, stool, throne", + "flower", "book", "hill", "bench", "countertop", + "stove, kitchen stove, range, kitchen range, cooking stove", + "palm, palm tree", "kitchen island", + "computer, computing machine, computing device, data processor, " + "electronic computer, information processing system", + "swivel chair", "boat", "bar", "arcade machine", + "hovel, hut, hutch, shack, shanty", + "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, " + "motorcoach, omnibus, passenger vehicle", + "towel", "light, light source", "truck, motortruck", "tower", + "chandelier, pendant, pendent", "awning, sunshade, sunblind", + "streetlight, street lamp", "booth, cubicle, stall, kiosk", + "television receiver, television, television set, tv, tv set, idiot " + "box, boob tube, telly, goggle box", + "airplane, aeroplane, plane", "dirt track", + "apparel, wearing apparel, dress, clothes", + "pole", "land, ground, soil", + "bannister, banister, balustrade, balusters, handrail", + "escalator, moving staircase, moving stairway", + "ottoman, pouf, pouffe, puff, hassock", + "bottle", "buffet, counter, sideboard", + "poster, posting, placard, notice, bill, card", + "stage", "van", "ship", "fountain", + "conveyer belt, conveyor belt, conveyer, conveyor, transporter", + "canopy", "washer, automatic washer, washing machine", + "plaything, toy", "swimming pool, swimming bath, natatorium", + "stool", "barrel, cask", "basket, handbasket", "waterfall, falls", + "tent, collapsible shelter", "bag", "minibike, motorbike", "cradle", + "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", + "trade name, brand name, brand, marque", "microwave, microwave oven", + "pot, flowerpot", "animal, animate being, beast, brute, creature, fauna", + "bicycle, bike, wheel, cycle", "lake", + "dishwasher, dish washer, dishwashing machine", + "screen, silver screen, projection screen", + "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", + "traffic light, traffic signal, stoplight", "tray", + "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, " + "dustbin, trash barrel, trash bin", + "fan", "pier, wharf, wharfage, dock", "crt screen", + "plate", "monitor, monitoring device", "bulletin board, notice board", + "shower", "radiator", "glass, drinking glass", "clock", "flag") + + +def _get_ade20k_pairs(folder, mode='train'): + img_paths = [] + mask_paths = [] + if mode == 'train': + img_folder = os.path.join(folder, 'images/training') + mask_folder = os.path.join(folder, 'annotations/training') + else: + img_folder = os.path.join(folder, 'images/validation') + mask_folder = os.path.join(folder, 'annotations/validation') + for filename in os.listdir(img_folder): + basename, _ = os.path.splitext(filename) + if filename.endswith(".jpg"): + imgpath = os.path.join(img_folder, filename) + maskname = basename + '.png' + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask:', maskpath) + + return img_paths, mask_paths + + +if __name__ == '__main__': + train_dataset = ADE20KSegmentation() diff --git a/segutils/core/data/dataloader/cityscapes.py b/segutils/core/data/dataloader/cityscapes.py new file mode 100644 index 0000000..7d5de71 --- /dev/null +++ b/segutils/core/data/dataloader/cityscapes.py @@ -0,0 +1,137 @@ +"""Prepare Cityscapes dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class CitySegmentation(SegmentationDataset): + """Cityscapes Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to Cityscapes folder. Default is './datasets/citys' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = CitySegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'cityscapes' + NUM_CLASS = 19 + + def __init__(self, root='../datasets/citys', split='train', mode=None, transform=None, **kwargs): + super(CitySegmentation, self).__init__(root, split, mode, transform, **kwargs) + # self.root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(self.root), "Please setup the dataset using ../datasets/cityscapes.py" + self.images, self.mask_paths = _get_city_pairs(self.root, self.split) + assert (len(self.images) == len(self.mask_paths)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + self._key = np.array([-1, -1, -1, -1, -1, -1, + -1, -1, 0, 1, -1, -1, + 2, 3, 4, -1, -1, -1, + 5, -1, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, + -1, -1, 16, 17, 18]) + self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32') + + def _class_to_index(self, mask): + # assert the value + values = np.unique(mask) + for value in values: + assert (value in self._mapping) + index = np.digitize(mask.ravel(), self._mapping, right=True) + return self._key[index].reshape(mask.shape) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.mask_paths[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = self._class_to_index(np.array(mask).astype('int32')) + return torch.LongTensor(np.array(target).astype('int32')) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_city_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + for filename in files: + if filename.endswith('.png'): + imgpath = os.path.join(root, filename) + foldername = os.path.basename(os.path.dirname(imgpath)) + maskname = filename.replace('leftImg8bit', 'gtFine_labelIds') + maskpath = os.path.join(mask_folder, foldername, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split in ('train', 'val'): + img_folder = os.path.join(folder, 'leftImg8bit/' + split) + mask_folder = os.path.join(folder, 'gtFine/' + split) + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + else: + assert split == 'trainval' + print('trainval set') + train_img_folder = os.path.join(folder, 'leftImg8bit/train') + train_mask_folder = os.path.join(folder, 'gtFine/train') + val_img_folder = os.path.join(folder, 'leftImg8bit/val') + val_mask_folder = os.path.join(folder, 'gtFine/val') + train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder) + val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder) + img_paths = train_img_paths + val_img_paths + mask_paths = train_mask_paths + val_mask_paths + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = CitySegmentation() diff --git a/segutils/core/data/dataloader/lip_parsing.py b/segutils/core/data/dataloader/lip_parsing.py new file mode 100644 index 0000000..245beda --- /dev/null +++ b/segutils/core/data/dataloader/lip_parsing.py @@ -0,0 +1,90 @@ +"""Look into Person Dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from core.data.dataloader.segbase import SegmentationDataset + + +class LIPSegmentation(SegmentationDataset): + """Look into person parsing dataset """ + + BASE_DIR = 'LIP' + NUM_CLASS = 20 + + def __init__(self, root='../datasets/LIP', split='train', mode=None, transform=None, **kwargs): + super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _trainval_image_dir = os.path.join(root, 'TrainVal_images') + _testing_image_dir = os.path.join(root, 'Testing_images') + _trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations') + if split == 'train': + _image_dir = os.path.join(_trainval_image_dir, 'train_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'train_id.txt') + elif split == 'val': + _image_dir = os.path.join(_trainval_image_dir, 'val_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'val_id.txt') + elif split == 'test': + _image_dir = os.path.join(_testing_image_dir, 'testing_images') + _split_f = os.path.join(_testing_image_dir, 'test_id.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), 'r') as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + '.jpg') + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + '.png') + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} {} images in the folder {}'.format(len(self.images), split, root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category name.""" + return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', + 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', + 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', + 'rightShoe') + + +if __name__ == '__main__': + dataset = LIPSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/segutils/core/data/dataloader/mscoco.py b/segutils/core/data/dataloader/mscoco.py new file mode 100644 index 0000000..6e280c8 --- /dev/null +++ b/segutils/core/data/dataloader/mscoco.py @@ -0,0 +1,136 @@ +"""MSCOCO Semantic Segmentation pretraining for VOC.""" +import os +import pickle +import torch +import numpy as np + +from tqdm import trange +from PIL import Image +from .segbase import SegmentationDataset + + +class COCOSegmentation(SegmentationDataset): + """COCO Semantic Segmentation Dataset for VOC Pre-training. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/coco' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = COCOSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, + 1, 64, 20, 63, 7, 72] + NUM_CLASS = 21 + + def __init__(self, root='../datasets/coco', split='train', mode=None, transform=None, **kwargs): + super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # lazy import pycocotools + from pycocotools.coco import COCO + from pycocotools import mask + if split == 'train': + print('train set') + ann_file = os.path.join(root, 'annotations/instances_train2017.json') + ids_file = os.path.join(root, 'annotations/train_ids.mx') + self.root = os.path.join(root, 'train2017') + else: + print('val set') + ann_file = os.path.join(root, 'annotations/instances_val2017.json') + ids_file = os.path.join(root, 'annotations/val_ids.mx') + self.root = os.path.join(root, 'val2017') + self.coco = COCO(ann_file) + self.coco_mask = mask + if os.path.exists(ids_file): + with open(ids_file, 'rb') as f: + self.ids = pickle.load(f) + else: + ids = list(self.coco.imgs.keys()) + self.ids = self._preprocess(ids, ids_file) + self.transform = transform + + def __getitem__(self, index): + coco = self.coco + img_id = self.ids[index] + img_metadata = coco.loadImgs(img_id)[0] + path = img_metadata['file_name'] + img = Image.open(os.path.join(self.root, path)).convert('RGB') + cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id)) + mask = Image.fromarray(self._gen_seg_mask( + cocotarget, img_metadata['height'], img_metadata['width'])) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.ids[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _gen_seg_mask(self, target, h, w): + mask = np.zeros((h, w), dtype=np.uint8) + coco_mask = self.coco_mask + for instance in target: + rle = coco_mask.frPyObjects(instance['Segmentation'], h, w) + m = coco_mask.decode(rle) + cat = instance['category_id'] + if cat in self.CAT_LIST: + c = self.CAT_LIST.index(cat) + else: + continue + if len(m.shape) < 3: + mask[:, :] += (mask == 0) * (m * c) + else: + mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8) + return mask + + def _preprocess(self, ids, ids_file): + print("Preprocessing mask, this will take a while." + \ + "But don't worry, it only run once for each split.") + tbar = trange(len(ids)) + new_ids = [] + for i in tbar: + img_id = ids[i] + cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id)) + img_metadata = self.coco.loadImgs(img_id)[0] + mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width']) + # more than 1k pixels + if (mask > 0).sum() > 1000: + new_ids.append(img_id) + tbar.set_description('Doing: {}/{}, got {} qualified images'. \ + format(i, len(ids), len(new_ids))) + print('Found number of qualified images: ', len(new_ids)) + with open(ids_file, 'wb') as f: + pickle.dump(new_ids, f) + return new_ids + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') diff --git a/segutils/core/data/dataloader/pascal_aug.py b/segutils/core/data/dataloader/pascal_aug.py new file mode 100644 index 0000000..1cbe238 --- /dev/null +++ b/segutils/core/data/dataloader/pascal_aug.py @@ -0,0 +1,104 @@ +"""Pascal Augmented VOC Semantic Segmentation Dataset.""" +import os +import torch +import scipy.io as sio +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCAugSegmentation(SegmentationDataset): + """Pascal VOC Augmented Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/voc' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCAugSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOCaug/dataset/' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCAugSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # train/val/test splits are pre-cut + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'cls') + _image_dir = os.path.join(_voc_root, 'img') + if split == 'train': + _split_f = os.path.join(_voc_root, 'trainval.txt') + elif split == 'val': + _split_f = os.path.join(_voc_root, 'val.txt') + else: + raise RuntimeError('Unknown dataset split: {}'.format(split)) + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".mat") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + target = self._load_mat(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, target = self._sync_transform(img, target) + elif self.mode == 'val': + img, target = self._val_sync_transform(img, target) + else: + raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode)) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, target, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _load_mat(self, filename): + mat = sio.loadmat(filename, mat_dtype=True, squeeze_me=True, struct_as_record=False) + mask = mat['GTcls'].Segmentation + return Image.fromarray(mask) + + def __len__(self): + return len(self.images) + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCAugSegmentation() \ No newline at end of file diff --git a/segutils/core/data/dataloader/pascal_voc.py b/segutils/core/data/dataloader/pascal_voc.py new file mode 100644 index 0000000..94db82c --- /dev/null +++ b/segutils/core/data/dataloader/pascal_voc.py @@ -0,0 +1,112 @@ +"""Pascal VOC Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCSegmentation(SegmentationDataset): + """Pascal VOC Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/VOCdevkit' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOC2012' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'SegmentationClass') + _image_dir = os.path.join(_voc_root, 'JPEGImages') + # train/val/test splits are pre-cut + _splits_dir = os.path.join(_voc_root, 'ImageSets/Segmentation') + if split == 'train': + _split_f = os.path.join(_splits_dir, 'train.txt') + elif split == 'val': + _split_f = os.path.join(_splits_dir, 'val.txt') + elif split == 'test': + _split_f = os.path.join(_splits_dir, 'test.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".png") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target == 255] = -1 + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCSegmentation() \ No newline at end of file diff --git a/segutils/core/data/dataloader/sbu_shadow.py b/segutils/core/data/dataloader/sbu_shadow.py new file mode 100644 index 0000000..0cf4ca9 --- /dev/null +++ b/segutils/core/data/dataloader/sbu_shadow.py @@ -0,0 +1,88 @@ +"""SBU Shadow Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class SBUSegmentation(SegmentationDataset): + """SBU Shadow Segmentation Dataset + """ + NUM_CLASS = 2 + + def __init__(self, root='../datasets/sbu', split='train', mode=None, transform=None, **kwargs): + super(SBUSegmentation, self).__init__(root, split, mode, transform, **kwargs) + assert os.path.exists(self.root) + self.images, self.masks = _get_sbu_pairs(self.root, self.split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target > 0] = 1 + return torch.from_numpy(target).long() + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_sbu_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + print(root) + for filename in files: + if filename.endswith('.jpg'): + imgpath = os.path.join(root, filename) + maskname = filename.replace('.jpg', '.png') + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split == 'train': + img_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowImages') + mask_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + else: + assert split in ('val', 'test') + img_folder = os.path.join(folder, 'SBU-Test/ShadowImages') + mask_folder = os.path.join(folder, 'SBU-Test/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = SBUSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/segutils/core/data/dataloader/segbase.py b/segutils/core/data/dataloader/segbase.py new file mode 100644 index 0000000..823436d --- /dev/null +++ b/segutils/core/data/dataloader/segbase.py @@ -0,0 +1,93 @@ +"""Base segmentation dataset""" +import random +import numpy as np + +from PIL import Image, ImageOps, ImageFilter + +__all__ = ['SegmentationDataset'] + + +class SegmentationDataset(object): + """Segmentation Base Dataset""" + + def __init__(self, root, split, mode, transform, base_size=520, crop_size=480): + super(SegmentationDataset, self).__init__() + self.root = root + self.transform = transform + self.split = split + self.mode = mode if mode is not None else split + self.base_size = base_size + self.crop_size = crop_size + + def _val_sync_transform(self, img, mask): + outsize = self.crop_size + short_size = outsize + w, h = img.size + if w > h: + oh = short_size + ow = int(1.0 * w * oh / h) + else: + ow = short_size + oh = int(1.0 * h * ow / w) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # center crop + w, h = img.size + x1 = int(round((w - outsize) / 2.)) + y1 = int(round((h - outsize) / 2.)) + img = img.crop((x1, y1, x1 + outsize, y1 + outsize)) + mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _sync_transform(self, img, mask): + # random mirror + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + crop_size = self.crop_size + # random scale (short edge) + short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) + w, h = img.size + if h > w: + ow = short_size + oh = int(1.0 * h * ow / w) + else: + oh = short_size + ow = int(1.0 * w * oh / h) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # pad crop + if short_size < crop_size: + padh = crop_size - oh if oh < crop_size else 0 + padw = crop_size - ow if ow < crop_size else 0 + img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) + mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) + # random crop crop_size + w, h = img.size + x1 = random.randint(0, w - crop_size) + y1 = random.randint(0, h - crop_size) + img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + # gaussian blur as in PSP + if random.random() < 0.5: + img = img.filter(ImageFilter.GaussianBlur(radius=random.random())) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _img_transform(self, img): + return np.array(img) + + def _mask_transform(self, mask): + return np.array(mask).astype('int32') + + @property + def num_class(self): + """Number of categories.""" + return self.NUM_CLASS + + @property + def pred_offset(self): + return 0 diff --git a/segutils/core/data/dataloader/utils.py b/segutils/core/data/dataloader/utils.py new file mode 100644 index 0000000..c0bd1ad --- /dev/null +++ b/segutils/core/data/dataloader/utils.py @@ -0,0 +1,69 @@ +import os +import hashlib +import errno +import tarfile +from six.moves import urllib +from torch.utils.model_zoo import tqdm + +def gen_bar_updater(): + pbar = tqdm(total=None) + + def bar_update(count, block_size, total_size): + if pbar.total is None and total_size: + pbar.total = total_size + progress_bytes = count * block_size + pbar.update(progress_bytes - pbar.n) + + return bar_update + +def check_integrity(fpath, md5=None): + if md5 is None: + return True + if not os.path.isfile(fpath): + return False + md5o = hashlib.md5() + with open(fpath, 'rb') as f: + # read in 1MB chunks + for chunk in iter(lambda: f.read(1024 * 1024), b''): + md5o.update(chunk) + md5c = md5o.hexdigest() + if md5c != md5: + return False + return True + +def makedir_exist_ok(dirpath): + try: + os.makedirs(dirpath) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + pass + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root.""" + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + makedir_exist_ok(root) + + # downloads file + if os.path.isfile(fpath) and check_integrity(fpath, md5): + print('Using downloaded and verified file: ' + fpath) + else: + try: + print('Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + except OSError: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + ' Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + +def download_extract(url, root, filename, md5): + download_url(url, root, filename, md5) + with tarfile.open(os.path.join(root, filename), "r") as tar: + tar.extractall(path=root) \ No newline at end of file diff --git a/segutils/core/data/downloader/__init__.py b/segutils/core/data/downloader/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/data/downloader/ade20k.py b/segutils/core/data/downloader/ade20k.py new file mode 100644 index 0000000..8187c48 --- /dev/null +++ b/segutils/core/data/downloader/ade20k.py @@ -0,0 +1,51 @@ +"""Prepare ADE20K dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/ade') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python setup_ade20k.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_ade(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', + '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'), + ( + 'http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', + 'e05747892219d10e9243933371a497e905a4860c'), ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + download_ade(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/data/downloader/cityscapes.py b/segutils/core/data/downloader/cityscapes.py new file mode 100644 index 0000000..3b65b88 --- /dev/null +++ b/segutils/core/data/downloader/cityscapes.py @@ -0,0 +1,54 @@ +"""Prepare Cityscapes dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, check_sha1 + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/citys') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python prepare_cityscapes.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_city(path, overwrite=False): + _CITY_DOWNLOAD_URLS = [ + ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), + ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for filename, checksum in _CITY_DOWNLOAD_URLS: + if not check_sha1(filename, checksum): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(filename)) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_city(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/data/downloader/mscoco.py b/segutils/core/data/downloader/mscoco.py new file mode 100644 index 0000000..6d509b6 --- /dev/null +++ b/segutils/core/data/downloader/mscoco.py @@ -0,0 +1,69 @@ +"""Prepare MS COCO datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, try_import_pycocotools + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/coco') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize MS COCO dataset.', + epilog='Example: python mscoco.py --download-dir ~/mscoco', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/mscoco/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +def download_coco(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://images.cocodataset.org/zips/train2017.zip', + '10ad623668ab00c62c096f0ed636d6aff41faca5'), + ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', + '8551ee4bb5860311e79dace7e79cb91e432e78b3'), + ('http://images.cocodataset.org/zips/val2017.zip', + '4950dc9d00dbe1c933ee0170f5797584351d2a41'), + # ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', + # '46cdcf715b6b4f67e980b529534e79c2edffe084'), + # test2017.zip, for those who want to attend the competition. + # ('http://images.cocodataset.org/zips/test2017.zip', + # '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'), + ] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename) as zf: + zf.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isdir(path) or not os.path.isdir(os.path.join(path, 'train2017')) \ + or not os.path.isdir(os.path.join(path, 'val2017')) \ + or not os.path.isdir(os.path.join(path, 'annotations')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_coco(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) + try_import_pycocotools() diff --git a/segutils/core/data/downloader/pascal_voc.py b/segutils/core/data/downloader/pascal_voc.py new file mode 100644 index 0000000..849c95b --- /dev/null +++ b/segutils/core/data/downloader/pascal_voc.py @@ -0,0 +1,100 @@ +"""Prepare PASCAL VOC datasets""" +import os +import sys +import shutil +import argparse +import tarfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/voc') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize PASCAL VOC dataset.', + epilog='Example: python pascal_voc.py --download-dir ~/VOCdevkit', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/VOCdevkit/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract VOC datasets into ``path`` + +def download_voc(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', + '34ed68851bce2a36e2a223fa52c661d592c66b3c'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', + '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', + '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + + +##################################################################################### +# Download and extract the VOC augmented segmentation dataset into ``path`` + +def download_aug(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', + '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] + makedirs(path) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + shutil.move(os.path.join(path, 'benchmark_RELEASE'), + os.path.join(path, 'VOCaug')) + filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] + # generate trainval.txt + with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: + for fname in filenames: + fname = os.path.join(path, fname) + with open(fname) as infile: + for line in infile: + outfile.write(line) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isfile(path) or not os.path.isdir(os.path.join(path, 'VOC2007')) \ + or not os.path.isdir(os.path.join(path, 'VOC2012')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_voc(path, overwrite=args.overwrite) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2007'), os.path.join(path, 'VOC2007')) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2012'), os.path.join(path, 'VOC2012')) + shutil.rmtree(os.path.join(path, 'VOCdevkit')) + + if not os.path.isdir(os.path.join(path, 'VOCaug')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_aug(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) diff --git a/segutils/core/data/downloader/sbu_shadow.py b/segutils/core/data/downloader/sbu_shadow.py new file mode 100644 index 0000000..cdcbdde --- /dev/null +++ b/segutils/core/data/downloader/sbu_shadow.py @@ -0,0 +1,56 @@ +"""Prepare SBU Shadow datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/sbu') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize SBU Shadow dataset.', + epilog='Example: python sbu_shadow.py --download-dir ~/SBU-shadow', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract SBU shadow datasets into ``path`` + +def download_sbu(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://www3.cs.stonybrook.edu/~cvl/content/datasets/shadow_db/SBU-shadow.zip'), + ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite) + # extract + with zipfile.ZipFile(filename, "r") as zf: + zf.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_sbu(_TARGET_DIR, overwrite=False) diff --git a/segutils/core/lib/psa/__pycache__/functional.cpython-36.pyc b/segutils/core/lib/psa/__pycache__/functional.cpython-36.pyc new file mode 100644 index 0000000..bf41206 Binary files /dev/null and b/segutils/core/lib/psa/__pycache__/functional.cpython-36.pyc differ diff --git a/segutils/core/lib/psa/functional.py b/segutils/core/lib/psa/functional.py new file mode 100644 index 0000000..8e66088 --- /dev/null +++ b/segutils/core/lib/psa/functional.py @@ -0,0 +1,5 @@ +from . import functions + + +def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): + return functions.psa_mask(input, psa_type, mask_H_, mask_W_) diff --git a/segutils/core/lib/psa/functions/__init__.py b/segutils/core/lib/psa/functions/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/segutils/core/lib/psa/functions/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/segutils/core/lib/psa/functions/__pycache__/__init__.cpython-36.pyc b/segutils/core/lib/psa/functions/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..f6e5711 Binary files /dev/null and b/segutils/core/lib/psa/functions/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/lib/psa/functions/__pycache__/psamask.cpython-36.pyc b/segutils/core/lib/psa/functions/__pycache__/psamask.cpython-36.pyc new file mode 100644 index 0000000..ba6b6f1 Binary files /dev/null and b/segutils/core/lib/psa/functions/__pycache__/psamask.cpython-36.pyc differ diff --git a/segutils/core/lib/psa/functions/psamask.py b/segutils/core/lib/psa/functions/psamask.py new file mode 100644 index 0000000..26f34a2 --- /dev/null +++ b/segutils/core/lib/psa/functions/psamask.py @@ -0,0 +1,39 @@ +import torch +from torch.autograd import Function +from .. import src + + +class PSAMask(Function): + @staticmethod + def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + num_, channels_, feature_H_, feature_W_ = input.size() + if mask_H_ is None and mask_W_ is None: + mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 + assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) + assert channels_ == mask_H_ * mask_W_ + half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 + output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) + if not input.is_cuda: + src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + output = output.cuda() + src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ + ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ + return output + + @staticmethod + def backward(ctx, grad_output): + psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ + mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ + grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) + if not grad_output.is_cuda: + src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + return grad_input, None, None, None + + +psa_mask = PSAMask.apply diff --git a/segutils/core/lib/psa/modules/__init__.py b/segutils/core/lib/psa/modules/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/segutils/core/lib/psa/modules/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/segutils/core/lib/psa/modules/psamask.py b/segutils/core/lib/psa/modules/psamask.py new file mode 100644 index 0000000..58ea4d9 --- /dev/null +++ b/segutils/core/lib/psa/modules/psamask.py @@ -0,0 +1,15 @@ +from torch import nn +from .. import functional as F + + +class PSAMask(nn.Module): + def __init__(self, psa_type=0, mask_H_=None, mask_W_=None): + super(PSAMask, self).__init__() + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ in None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + self.psa_type = psa_type + self.mask_H_ = mask_H_ + self.mask_W_ = mask_W_ + + def forward(self, input): + return F.psa_mask(input, self.psa_type, self.mask_H_, self.mask_W_) diff --git a/segutils/core/lib/psa/src/__init__.py b/segutils/core/lib/psa/src/__init__.py new file mode 100644 index 0000000..ead1cfe --- /dev/null +++ b/segutils/core/lib/psa/src/__init__.py @@ -0,0 +1,18 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') +print(cpu_path,gpu_path) +cpu = load('psamask_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'psamask.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('psamask_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'psamask_cuda.cu'), + ], build_directory=gpu_path, verbose=False) \ No newline at end of file diff --git a/segutils/core/lib/psa/src/__pycache__/__init__.cpython-36.pyc b/segutils/core/lib/psa/src/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..2532ecb Binary files /dev/null and b/segutils/core/lib/psa/src/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/lib/psa/src/cpu/operator.cpp b/segutils/core/lib/psa/src/cpu/operator.cpp new file mode 100644 index 0000000..e7b9f6c --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cpu, "PSAMASK forward (CPU)"); + m.def("psamask_backward", &psamask_backward_cpu, "PSAMASK backward (CPU)"); +} diff --git a/segutils/core/lib/psa/src/cpu/operator.h b/segutils/core/lib/psa/src/cpu/operator.h new file mode 100644 index 0000000..abc43cb --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); \ No newline at end of file diff --git a/segutils/core/lib/psa/src/cpu/psamask.cpp b/segutils/core/lib/psa/src/cpu/psamask.cpp new file mode 100644 index 0000000..eb33694 --- /dev/null +++ b/segutils/core/lib/psa/src/cpu/psamask.cpp @@ -0,0 +1,133 @@ +#include + +#ifndef min +#define min(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef max +#define max(a,b) (((a) > (b)) ? (a) : (b)) +#endif + +void psamask_collect_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_collect_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } + } + } +} + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/segutils/core/lib/psa/src/gpu/operator.cpp b/segutils/core/lib/psa/src/gpu/operator.cpp new file mode 100644 index 0000000..5a52f4a --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cuda, "PSAMASK forward (GPU)"); + m.def("psamask_backward", &psamask_backward_cuda, "PSAMASK backward (GPU)"); +} diff --git a/segutils/core/lib/psa/src/gpu/operator.h b/segutils/core/lib/psa/src/gpu/operator.h new file mode 100644 index 0000000..235a9e1 --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); diff --git a/segutils/core/lib/psa/src/gpu/psamask_cuda.cu b/segutils/core/lib/psa/src/gpu/psamask_cuda.cu new file mode 100644 index 0000000..f3fcb93 --- /dev/null +++ b/segutils/core/lib/psa/src/gpu/psamask_cuda.cu @@ -0,0 +1,128 @@ +#include + +// CUDA: grid stride looping +#ifndef CUDA_KERNEL_LOOP +#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) +#endif + +__global__ void psamask_collect_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_collect_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } +} + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/segutils/core/models/__init__.py b/segutils/core/models/__init__.py new file mode 100644 index 0000000..2a8b222 --- /dev/null +++ b/segutils/core/models/__init__.py @@ -0,0 +1,2 @@ +"""Model Zoo""" +from .model_zoo import get_model, get_model_list \ No newline at end of file diff --git a/segutils/core/models/__pycache__/__init__.cpython-36.pyc b/segutils/core/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..a439fcc Binary files /dev/null and b/segutils/core/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/__init__.cpython-38.pyc b/segutils/core/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..cc16af8 Binary files /dev/null and b/segutils/core/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/bisenet.cpython-36.pyc b/segutils/core/models/__pycache__/bisenet.cpython-36.pyc new file mode 100644 index 0000000..6b3558c Binary files /dev/null and b/segutils/core/models/__pycache__/bisenet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/bisenet.cpython-38.pyc b/segutils/core/models/__pycache__/bisenet.cpython-38.pyc new file mode 100644 index 0000000..b65d560 Binary files /dev/null and b/segutils/core/models/__pycache__/bisenet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/ccnet.cpython-36.pyc b/segutils/core/models/__pycache__/ccnet.cpython-36.pyc new file mode 100644 index 0000000..32d987b Binary files /dev/null and b/segutils/core/models/__pycache__/ccnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/ccnet.cpython-38.pyc b/segutils/core/models/__pycache__/ccnet.cpython-38.pyc new file mode 100644 index 0000000..d5c84e9 Binary files /dev/null and b/segutils/core/models/__pycache__/ccnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/cgnet.cpython-36.pyc b/segutils/core/models/__pycache__/cgnet.cpython-36.pyc new file mode 100644 index 0000000..524084d Binary files /dev/null and b/segutils/core/models/__pycache__/cgnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/cgnet.cpython-38.pyc b/segutils/core/models/__pycache__/cgnet.cpython-38.pyc new file mode 100644 index 0000000..9ec9508 Binary files /dev/null and b/segutils/core/models/__pycache__/cgnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/danet.cpython-36.pyc b/segutils/core/models/__pycache__/danet.cpython-36.pyc new file mode 100644 index 0000000..2878430 Binary files /dev/null and b/segutils/core/models/__pycache__/danet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/danet.cpython-38.pyc b/segutils/core/models/__pycache__/danet.cpython-38.pyc new file mode 100644 index 0000000..95cb578 Binary files /dev/null and b/segutils/core/models/__pycache__/danet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/deeplabv3.cpython-36.pyc b/segutils/core/models/__pycache__/deeplabv3.cpython-36.pyc new file mode 100644 index 0000000..25da2f6 Binary files /dev/null and b/segutils/core/models/__pycache__/deeplabv3.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/deeplabv3.cpython-38.pyc b/segutils/core/models/__pycache__/deeplabv3.cpython-38.pyc new file mode 100644 index 0000000..1b8d830 Binary files /dev/null and b/segutils/core/models/__pycache__/deeplabv3.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/deeplabv3_plus.cpython-36.pyc b/segutils/core/models/__pycache__/deeplabv3_plus.cpython-36.pyc new file mode 100644 index 0000000..0793537 Binary files /dev/null and b/segutils/core/models/__pycache__/deeplabv3_plus.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc b/segutils/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc new file mode 100644 index 0000000..00978df Binary files /dev/null and b/segutils/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/denseaspp.cpython-36.pyc b/segutils/core/models/__pycache__/denseaspp.cpython-36.pyc new file mode 100644 index 0000000..795d512 Binary files /dev/null and b/segutils/core/models/__pycache__/denseaspp.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/denseaspp.cpython-38.pyc b/segutils/core/models/__pycache__/denseaspp.cpython-38.pyc new file mode 100644 index 0000000..4681768 Binary files /dev/null and b/segutils/core/models/__pycache__/denseaspp.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/dfanet.cpython-36.pyc b/segutils/core/models/__pycache__/dfanet.cpython-36.pyc new file mode 100644 index 0000000..b81fd89 Binary files /dev/null and b/segutils/core/models/__pycache__/dfanet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/dfanet.cpython-38.pyc b/segutils/core/models/__pycache__/dfanet.cpython-38.pyc new file mode 100644 index 0000000..71e9e32 Binary files /dev/null and b/segutils/core/models/__pycache__/dfanet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/dinknet.cpython-38.pyc b/segutils/core/models/__pycache__/dinknet.cpython-38.pyc new file mode 100644 index 0000000..a3a46c9 Binary files /dev/null and b/segutils/core/models/__pycache__/dinknet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/dunet.cpython-36.pyc b/segutils/core/models/__pycache__/dunet.cpython-36.pyc new file mode 100644 index 0000000..f5a46bd Binary files /dev/null and b/segutils/core/models/__pycache__/dunet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/dunet.cpython-38.pyc b/segutils/core/models/__pycache__/dunet.cpython-38.pyc new file mode 100644 index 0000000..1f8dfd8 Binary files /dev/null and b/segutils/core/models/__pycache__/dunet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/encnet.cpython-36.pyc b/segutils/core/models/__pycache__/encnet.cpython-36.pyc new file mode 100644 index 0000000..91b5bcd Binary files /dev/null and b/segutils/core/models/__pycache__/encnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/encnet.cpython-38.pyc b/segutils/core/models/__pycache__/encnet.cpython-38.pyc new file mode 100644 index 0000000..16288ac Binary files /dev/null and b/segutils/core/models/__pycache__/encnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/enet.cpython-36.pyc b/segutils/core/models/__pycache__/enet.cpython-36.pyc new file mode 100644 index 0000000..dcbff56 Binary files /dev/null and b/segutils/core/models/__pycache__/enet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/enet.cpython-38.pyc b/segutils/core/models/__pycache__/enet.cpython-38.pyc new file mode 100644 index 0000000..87da4ab Binary files /dev/null and b/segutils/core/models/__pycache__/enet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/espnet.cpython-36.pyc b/segutils/core/models/__pycache__/espnet.cpython-36.pyc new file mode 100644 index 0000000..74c47b4 Binary files /dev/null and b/segutils/core/models/__pycache__/espnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/espnet.cpython-38.pyc b/segutils/core/models/__pycache__/espnet.cpython-38.pyc new file mode 100644 index 0000000..817df01 Binary files /dev/null and b/segutils/core/models/__pycache__/espnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/fcn.cpython-36.pyc b/segutils/core/models/__pycache__/fcn.cpython-36.pyc new file mode 100644 index 0000000..1631b50 Binary files /dev/null and b/segutils/core/models/__pycache__/fcn.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/fcn.cpython-38.pyc b/segutils/core/models/__pycache__/fcn.cpython-38.pyc new file mode 100644 index 0000000..6f62831 Binary files /dev/null and b/segutils/core/models/__pycache__/fcn.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/fcnv2.cpython-36.pyc b/segutils/core/models/__pycache__/fcnv2.cpython-36.pyc new file mode 100644 index 0000000..246374e Binary files /dev/null and b/segutils/core/models/__pycache__/fcnv2.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/fcnv2.cpython-38.pyc b/segutils/core/models/__pycache__/fcnv2.cpython-38.pyc new file mode 100644 index 0000000..de23e74 Binary files /dev/null and b/segutils/core/models/__pycache__/fcnv2.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/icnet.cpython-36.pyc b/segutils/core/models/__pycache__/icnet.cpython-36.pyc new file mode 100644 index 0000000..345b469 Binary files /dev/null and b/segutils/core/models/__pycache__/icnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/icnet.cpython-38.pyc b/segutils/core/models/__pycache__/icnet.cpython-38.pyc new file mode 100644 index 0000000..76def6c Binary files /dev/null and b/segutils/core/models/__pycache__/icnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/lednet.cpython-36.pyc b/segutils/core/models/__pycache__/lednet.cpython-36.pyc new file mode 100644 index 0000000..186d60f Binary files /dev/null and b/segutils/core/models/__pycache__/lednet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/lednet.cpython-38.pyc b/segutils/core/models/__pycache__/lednet.cpython-38.pyc new file mode 100644 index 0000000..f8dd8f6 Binary files /dev/null and b/segutils/core/models/__pycache__/lednet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/model_store.cpython-36.pyc b/segutils/core/models/__pycache__/model_store.cpython-36.pyc new file mode 100644 index 0000000..b337749 Binary files /dev/null and b/segutils/core/models/__pycache__/model_store.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/model_store.cpython-38.pyc b/segutils/core/models/__pycache__/model_store.cpython-38.pyc new file mode 100644 index 0000000..873ad70 Binary files /dev/null and b/segutils/core/models/__pycache__/model_store.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/model_zoo.cpython-36.pyc b/segutils/core/models/__pycache__/model_zoo.cpython-36.pyc new file mode 100644 index 0000000..199642f Binary files /dev/null and b/segutils/core/models/__pycache__/model_zoo.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/model_zoo.cpython-38.pyc b/segutils/core/models/__pycache__/model_zoo.cpython-38.pyc new file mode 100644 index 0000000..5cbe94d Binary files /dev/null and b/segutils/core/models/__pycache__/model_zoo.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/ocnet.cpython-36.pyc b/segutils/core/models/__pycache__/ocnet.cpython-36.pyc new file mode 100644 index 0000000..78b3cea Binary files /dev/null and b/segutils/core/models/__pycache__/ocnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/ocnet.cpython-38.pyc b/segutils/core/models/__pycache__/ocnet.cpython-38.pyc new file mode 100644 index 0000000..bcc7ee5 Binary files /dev/null and b/segutils/core/models/__pycache__/ocnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/psanet.cpython-36.pyc b/segutils/core/models/__pycache__/psanet.cpython-36.pyc new file mode 100644 index 0000000..9fd6713 Binary files /dev/null and b/segutils/core/models/__pycache__/psanet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/psanet.cpython-38.pyc b/segutils/core/models/__pycache__/psanet.cpython-38.pyc new file mode 100644 index 0000000..7846622 Binary files /dev/null and b/segutils/core/models/__pycache__/psanet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/pspnet.cpython-36.pyc b/segutils/core/models/__pycache__/pspnet.cpython-36.pyc new file mode 100644 index 0000000..4580707 Binary files /dev/null and b/segutils/core/models/__pycache__/pspnet.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/pspnet.cpython-38.pyc b/segutils/core/models/__pycache__/pspnet.cpython-38.pyc new file mode 100644 index 0000000..ba54f2e Binary files /dev/null and b/segutils/core/models/__pycache__/pspnet.cpython-38.pyc differ diff --git a/segutils/core/models/__pycache__/segbase.cpython-36.pyc b/segutils/core/models/__pycache__/segbase.cpython-36.pyc new file mode 100644 index 0000000..3b23196 Binary files /dev/null and b/segutils/core/models/__pycache__/segbase.cpython-36.pyc differ diff --git a/segutils/core/models/__pycache__/segbase.cpython-38.pyc b/segutils/core/models/__pycache__/segbase.cpython-38.pyc new file mode 100644 index 0000000..b9c25f5 Binary files /dev/null and b/segutils/core/models/__pycache__/segbase.cpython-38.pyc differ diff --git a/segutils/core/models/base_models.zip b/segutils/core/models/base_models.zip new file mode 100644 index 0000000..b7fb6b1 Binary files /dev/null and b/segutils/core/models/base_models.zip differ diff --git a/segutils/core/models/base_models/__init__.py b/segutils/core/models/base_models/__init__.py new file mode 100644 index 0000000..562aa28 --- /dev/null +++ b/segutils/core/models/base_models/__init__.py @@ -0,0 +1,6 @@ +from .densenet import * +from .resnet import * +from .resnetv1b import * +from .vgg import * +from .eespnet import * +from .xception import * diff --git a/segutils/core/models/base_models/__pycache__/__init__.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..f7e8ec8 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/__init__.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..afff4b2 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/densenet.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/densenet.cpython-36.pyc new file mode 100644 index 0000000..c291f3e Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/densenet.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/densenet.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/densenet.cpython-38.pyc new file mode 100644 index 0000000..8a0d374 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/densenet.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/eespnet.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/eespnet.cpython-36.pyc new file mode 100644 index 0000000..bc6bad7 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/eespnet.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/eespnet.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/eespnet.cpython-38.pyc new file mode 100644 index 0000000..6e6f427 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/eespnet.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/resnet.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000..95158df Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/resnet.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/resnet.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/resnet.cpython-38.pyc new file mode 100644 index 0000000..fecf67c Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/resnet.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-36.pyc new file mode 100644 index 0000000..7c92fe0 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc new file mode 100644 index 0000000..4b5bbd9 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/vgg.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/vgg.cpython-36.pyc new file mode 100644 index 0000000..c1d9f59 Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/vgg.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/vgg.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/vgg.cpython-38.pyc new file mode 100644 index 0000000..953aebe Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/vgg.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/xception.cpython-36.pyc b/segutils/core/models/base_models/__pycache__/xception.cpython-36.pyc new file mode 100644 index 0000000..1c1dfae Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/xception.cpython-36.pyc differ diff --git a/segutils/core/models/base_models/__pycache__/xception.cpython-38.pyc b/segutils/core/models/base_models/__pycache__/xception.cpython-38.pyc new file mode 100644 index 0000000..b311e0f Binary files /dev/null and b/segutils/core/models/base_models/__pycache__/xception.cpython-38.pyc differ diff --git a/segutils/core/models/base_models/densenet.py b/segutils/core/models/base_models/densenet.py new file mode 100644 index 0000000..733f21d --- /dev/null +++ b/segutils/core/models/base_models/densenet.py @@ -0,0 +1,237 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +from collections import OrderedDict + +__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', + 'dilated_densenet121', 'dilated_densenet161', 'dilated_densenet169', 'dilated_densenet201'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, 1, 1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, 3, 1, dilation, dilation, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, + growth_rate, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, + growth_rate, bn_size, drop_rate, dilation, norm_layer) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d): + super(_Transition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('relu', nn.ReLU(True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 1, 1, bias=False)) + self.add_module('pool', nn.AvgPool2d(2, 2)) + + +# Net +class DenseNet(nn.Module): #这是一个全新的构建模型的方法,<先构造模块后两步传递数据:features和classifier>;另一种常见的是,<边构造边传递数据> + + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, norm_layer=nn.BatchNorm2d, **kwargs): + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, 7, 2, 3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('relu0', nn.ReLU(True)), + ('pool0', nn.MaxPool2d(3, 2, 1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers, num_features, bn_size, growth_rate, drop_rate, norm_layer=norm_layer) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_features, num_features // 2, norm_layer=norm_layer) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + self.num_features = num_features + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + print('11',features.shape) + out = F.relu(features, True) + out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1) + out = self.classifier(out) + return out + + +class DilatedDenseNet(DenseNet): + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, dilate_scale=8, norm_layer=nn.BatchNorm2d, **kwargs): + super(DilatedDenseNet, self).__init__(growth_rate, block_config, num_init_features, + bn_size, drop_rate, num_classes, norm_layer) + assert (dilate_scale == 8 or dilate_scale == 16), "dilate_scale can only set as 8 or 16" + from functools import partial + if dilate_scale == 8: # output_stride + self.features.denseblock3.apply(partial(self._conv_dilate, dilate=2))#利用partial函数给 + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=4)) + del self.features.transition2.pool + del self.features.transition3.pool + elif dilate_scale == 16: + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=2)) + del self.features.transition3.pool + + def _conv_dilate(self, m, dilate): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + if m.kernel_size == (3, 3): + m.padding = (dilate, dilate) + m.dilation = (dilate, dilate) + + +# Specification +densenet_spec = {121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32])} + + +# Constructor +def get_densenet(num_layers, pretrained=False, **kwargs): + r"""Densenet-BC model from the + `"Densely Connected Convolutional Networks" `_ paper. + + Parameters + ---------- + num_layers : int + Number of layers for the variant of densenet. Options are 121, 161, 169, 201. + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default $TORCH_HOME/models + Location for keeping the model parameters. + """ + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) #初始化(加载权重) + return model + + +def get_dilated_densenet(num_layers, dilate_scale, pretrained=False, **kwargs): + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DilatedDenseNet(growth_rate, block_config, num_init_features, dilate_scale=dilate_scale) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + return model + + +def densenet121(**kwargs): + return get_densenet(121, **kwargs) + + +def densenet161(**kwargs): + return get_densenet(161, **kwargs) + + +def densenet169(**kwargs): + return get_densenet(169, **kwargs) + + +def densenet201(**kwargs): + return get_densenet(201, **kwargs) + + +def dilated_densenet121(dilate_scale, **kwargs): + return get_dilated_densenet(121, dilate_scale, **kwargs) + + +def dilated_densenet161(dilate_scale, **kwargs): + return get_dilated_densenet(161, dilate_scale, **kwargs) + + +def dilated_densenet169(dilate_scale, **kwargs): + return get_dilated_densenet(169, dilate_scale, **kwargs) + + +def dilated_densenet201(dilate_scale, **kwargs): + return get_dilated_densenet(201, dilate_scale, **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 512, 512).cuda() + model = dilated_densenet121(8).cuda() + outputs = model(img) + print(outputs.shape) + from torchsummary import summary + + summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + for name, parameters in model.named_parameters(): + print(name, ':', parameters.size()) diff --git a/segutils/core/models/base_models/eespnet.py b/segutils/core/models/base_models/eespnet.py new file mode 100644 index 0000000..7d087fd --- /dev/null +++ b/segutils/core/models/base_models/eespnet.py @@ -0,0 +1,202 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _ConvBN, _BNPReLU + +__all__ = ['EESP', 'EESPNet', 'eespnet'] + + +class EESP(nn.Module): + + def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d): + super(EESP, self).__init__() + self.stride = stride + n = int(out_channels / k) + n1 = out_channels - (k - 1) * n + assert down_method in ['avg', 'esp'], 'One of these is suppported (avg or esp)' + assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(n, n1) + self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer) + + map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8} + self.k_sizes = list() + for i in range(k): + ksize = int(3 + 2 * i) + ksize = ksize if ksize <= r_lim else 3 + self.k_sizes.append(ksize) + self.k_sizes.sort() + self.spp_dw = nn.ModuleList() + for i in range(k): + dilation = map_receptive_ksize[self.k_sizes[i]] + self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False)) + self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer) + self.br_after_cat = _BNPReLU(out_channels, norm_layer) + self.module_act = nn.PReLU(out_channels) + self.downAvg = True if down_method == 'avg' else False + + def forward(self, x): + output1 = self.proj_1x1(x) + output = [self.spp_dw[0](output1)] + for k in range(1, len(self.spp_dw)): + out_k = self.spp_dw[k](output1) + out_k = out_k + output[k - 1] + output.append(out_k) + expanded = self.conv_1x1_exp(self.br_after_cat(torch.cat(output, 1))) + del output + if self.stride == 2 and self.downAvg: + return expanded + + if expanded.size() == x.size(): + expanded = expanded + x + + return self.module_act(expanded) + + +class DownSampler(nn.Module): + + def __init__(self, in_channels, out_channels, k=4, r_lim=9, reinf=True, inp_reinf=3, norm_layer=None): + super(DownSampler, self).__init__() + channels_diff = out_channels - in_channels + self.eesp = EESP(in_channels, channels_diff, stride=2, k=k, + r_lim=r_lim, down_method='avg', norm_layer=norm_layer) + self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=2) + if reinf: + self.inp_reinf = nn.Sequential( + _ConvBNPReLU(inp_reinf, inp_reinf, 3, 1, 1), + _ConvBN(inp_reinf, out_channels, 1, 1)) + self.act = nn.PReLU(out_channels) + + def forward(self, x, x2=None): + avg_out = self.avg(x) + eesp_out = self.eesp(x) + output = torch.cat([avg_out, eesp_out], 1) + if x2 is not None: + w1 = avg_out.size(2) + while True: + x2 = F.avg_pool2d(x2, kernel_size=3, padding=1, stride=2) + w2 = x2.size(2) + if w2 == w1: + break + output = output + self.inp_reinf(x2) + + return self.act(output) + + +class EESPNet(nn.Module): + def __init__(self, num_classes=1000, scale=1, reinf=True, norm_layer=nn.BatchNorm2d): + super(EESPNet, self).__init__() + inp_reinf = 3 if reinf else None + reps = [0, 3, 7, 3] + r_lim = [13, 11, 9, 7, 5] + K = [4] * len(r_lim) + + # set out_channels + base, levels, base_s = 32, 5, 0 + out_channels = [base] * levels + for i in range(levels): + if i == 0: + base_s = int(base * scale) + base_s = math.ceil(base_s / K[0]) * K[0] + out_channels[i] = base if base_s > base else base_s + else: + out_channels[i] = base_s * pow(2, i) + if scale <= 1.5: + out_channels.append(1024) + elif scale in [1.5, 2]: + out_channels.append(1280) + else: + raise ValueError("Unknown scale value.") + + self.level1 = _ConvBNPReLU(3, out_channels[0], 3, 2, 1, norm_layer=norm_layer) + + self.level2_0 = DownSampler(out_channels[0], out_channels[1], k=K[0], r_lim=r_lim[0], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + + self.level3_0 = DownSampler(out_channels[1], out_channels[2], k=K[1], r_lim=r_lim[1], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level3 = nn.ModuleList() + for i in range(reps[1]): + self.level3.append(EESP(out_channels[2], out_channels[2], k=K[2], r_lim=r_lim[2], + norm_layer=norm_layer)) + + self.level4_0 = DownSampler(out_channels[2], out_channels[3], k=K[2], r_lim=r_lim[2], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level4 = nn.ModuleList() + for i in range(reps[2]): + self.level4.append(EESP(out_channels[3], out_channels[3], k=K[3], r_lim=r_lim[3], + norm_layer=norm_layer)) + + self.level5_0 = DownSampler(out_channels[3], out_channels[4], k=K[3], r_lim=r_lim[3], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level5 = nn.ModuleList() + for i in range(reps[2]): + self.level5.append(EESP(out_channels[4], out_channels[4], k=K[4], r_lim=r_lim[4], + norm_layer=norm_layer)) + + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[4], 3, 1, 1, + groups=out_channels[4], norm_layer=norm_layer)) + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[5], 1, 1, 0, + groups=K[4], norm_layer=norm_layer)) + + self.fc = nn.Linear(out_channels[5], num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, std=0.001) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, seg=True): + out_l1 = self.level1(x) + + out_l2 = self.level2_0(out_l1, x) + + out_l3_0 = self.level3_0(out_l2, x) + for i, layer in enumerate(self.level3): + if i == 0: + out_l3 = layer(out_l3_0) + else: + out_l3 = layer(out_l3) + + out_l4_0 = self.level4_0(out_l3, x) + for i, layer in enumerate(self.level4): + if i == 0: + out_l4 = layer(out_l4_0) + else: + out_l4 = layer(out_l4) + + if not seg: + out_l5_0 = self.level5_0(out_l4) # down-sampled + for i, layer in enumerate(self.level5): + if i == 0: + out_l5 = layer(out_l5_0) + else: + out_l5 = layer(out_l5) + + output_g = F.adaptive_avg_pool2d(out_l5, output_size=1) + output_g = F.dropout(output_g, p=0.2, training=self.training) + output_1x1 = output_g.view(output_g.size(0), -1) + + return self.fc(output_1x1) + return out_l1, out_l2, out_l3, out_l4 + + +def eespnet(pretrained=False, **kwargs): + model = EESPNet(**kwargs) + if pretrained: + raise ValueError("Don't support pretrained") + return model + + +if __name__ == '__main__': + img = torch.randn(1, 3, 224, 224) + model = eespnet() + out = model(img) diff --git a/segutils/core/models/base_models/hrnet.py b/segutils/core/models/base_models/hrnet.py new file mode 100644 index 0000000..775b809 --- /dev/null +++ b/segutils/core/models/base_models/hrnet.py @@ -0,0 +1,371 @@ +import torch +import torch.nn as nn + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, padding=1, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, 1, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, + fuse_method, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + super(HighResolutionModule, self).__init__() + assert num_branches == len(num_blocks) + assert num_branches == len(num_channels) + assert num_branches == len(num_inchannels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels, norm_layer=norm_layer) + self.fuse_layers = self._make_fuse_layers(norm_layer) + self.relu = nn.ReLU(True) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + 1, stride, bias=False), + norm_layer(num_channels[branch_index] * block.expansion)) + + layers = list() + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], + stride, downsample, norm_layer=norm_layer)) + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels, norm_layer=nn.BatchNorm2d): + branches = list() + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels, norm_layer=norm_layer)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self, norm_layer=nn.BatchNorm2d): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = list() + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, bias=False), + norm_layer(num_inchannels[i]), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = list() + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = list() + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +class HighResolutionNet(nn.Module): + def __init__(self, blocks, num_channels, num_modules, num_branches, num_blocks, + fuse_method, norm_layer=nn.BatchNorm2d, **kwargs): + super(HighResolutionNet, self).__init__() + self.num_branches = num_branches + + # deep stem + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True)) + + self.layer1 = self._make_layer(Bottleneck, 64, 64, 4, norm_layer=norm_layer) + + # stage 2 + num_channel, block = num_channels[0], blocks[0] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer([256], channels, norm_layer) + self.stage2, pre_stage_channels = self._make_stage(num_modules[0], num_branches[0], + num_blocks[0], channels, block, + fuse_method[0], channels, + norm_layer=norm_layer) + + # stage 3 + num_channel, block = num_channels[1], blocks[1] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage3, pre_stage_channels = self._make_stage(num_modules[1], num_branches[1], + num_blocks[1], channels, block, + fuse_method[1], channels, + norm_layer=norm_layer) + + # stage 4 + num_channel, block = num_channels[2], blocks[2] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage4, pre_stage_channels = self._make_stage(num_modules[2], num_branches[2], + num_blocks[2], channels, block, + fuse_method[2], channels, + norm_layer=norm_layer) + + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels, norm_layer) + + self.classifier = nn.Linear(2048, 1000) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion)) + + layers = list() + layers.append(block(inplanes, planes, stride, downsample=downsample, norm_layer=norm_layer)) + inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(inplanes, planes, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer, norm_layer=nn.BatchNorm2d): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = list() + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, padding=1, bias=False), + norm_layer(num_channels_cur_layer[i]), + nn.ReLU(True))) + else: + transition_layers.append(None) + else: + conv3x3s = list() + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] if j == i - num_branches_pre else in_channels + conv3x3s.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, num_modules, num_branches, num_blocks, num_channels, block, + fuse_method, num_inchannels, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + modules = list() + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, + fuse_method, reset_multi_scale_output, norm_layer=norm_layer)) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def _make_head(self, pre_stage_channels, norm_layer=nn.BatchNorm2d): + head_block = Bottleneck + head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = list() + for i, channels in enumerate(pre_stage_channels): + incre_module = self._make_layer(head_block, channels, head_channels[i], 1) + incre_modules.append(incre_module) + incre_modules = nn.ModuleList(incre_modules) + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = head_channels[i] * head_block.expansion + out_channels = head_channels[i + 1] * head_block.expansion + + downsamp_module = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1), + norm_layer(out_channels), + nn.ReLU(True)) + + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d(head_channels[3] * head_block.expansion, 2048, 1), + norm_layer(2048), + nn.ReLU(True)) + + return incre_modules, downsamp_modules, final_layer + + def forward(self, x): + x = self.conv1(x) + x = self.layer1(x) + + x_list = list() + for i in range(self.num_branches[0]): + if self.transition1[i] is not None: + tmp = self.transition1[i](x) + print(tmp.size()) + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.num_branches[1]): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.num_branches[2]): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + # Classification Head + y = self.incre_modules[0](y_list[0]) + for i in range(len(self.downsamp_modules)): + y = self.incre_modules[i + 1](y_list[i + 1]) + self.downsamp_modules[i](y) + + y = self.final_layer(y) + + y = F.avg_pool2d(y, kernel_size=y.size() + [2:]).view(y.size(0), -1) + + y = self.classifier(y) + + return y + + +blocks = [BasicBlock, BasicBlock, BasicBlock] +num_modules = [1, 1, 1] +num_branches = [2, 3, 4] +num_blocks = [[4, 4], [4, 4, 4], [4, 4, 4, 4]] +num_channels = [[256, 256], [32, 64, 128], [32, 64, 128, 256]] +fuse_method = ['sum', 'sum', 'sum'] + +if __name__ == '__main__': + img = torch.randn(1, 3, 256, 256) + model = HighResolutionNet(blocks, num_channels, num_modules, num_branches, num_blocks, fuse_method) + output = model(img) diff --git a/segutils/core/models/base_models/mobilenetv2.py b/segutils/core/models/base_models/mobilenetv2.py new file mode 100644 index 0000000..4e4c093 --- /dev/null +++ b/segutils/core/models/base_models/mobilenetv2.py @@ -0,0 +1,158 @@ +"""MobileNet and MobileNetV2.""" +import torch +import torch.nn as nn + +from core.nn import _ConvBNReLU, _DepthwiseConv, InvertedResidual + +__all__ = ['MobileNet', 'MobileNetV2', 'get_mobilenet', 'get_mobilenet_v2', + 'mobilenet1_0', 'mobilenet_v2_1_0', 'mobilenet0_75', 'mobilenet_v2_0_75', + 'mobilenet0_5', 'mobilenet_v2_0_5', 'mobilenet0_25', 'mobilenet_v2_0_25'] + + +class MobileNet(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNet, self).__init__() + conv_dw_setting = [ + [64, 1, 1], + [128, 2, 2], + [256, 2, 2], + [512, 6, 2], + [1024, 2, 2]] + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, norm_layer=norm_layer)] + + for c, n, s in conv_dw_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(_DepthwiseConv(input_channels, out_channels, stride, norm_layer)) + input_channels = out_channels + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Linear(int(1024 * multiplier), num_classes) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +class MobileNetV2(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNetV2, self).__init__() + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1]] + # building first layer + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, relu6=True, norm_layer=norm_layer)] + + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(InvertedResidual(input_channels, out_channels, stride, t, norm_layer)) + input_channels = out_channels + + # building last several layers + features.append(_ConvBNReLU(input_channels, last_channels, 1, relu6=True, norm_layer=norm_layer)) + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Sequential( + nn.Dropout2d(0.2), + nn.Linear(last_channels, num_classes)) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +# Constructor +def get_mobilenet(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNet(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def get_mobilenet_v2(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNetV2(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def mobilenet1_0(**kwargs): + return get_mobilenet(1.0, **kwargs) + + +def mobilenet_v2_1_0(**kwargs): + return get_mobilenet_v2(1.0, **kwargs) + + +def mobilenet0_75(**kwargs): + return get_mobilenet(0.75, **kwargs) + + +def mobilenet_v2_0_75(**kwargs): + return get_mobilenet_v2(0.75, **kwargs) + + +def mobilenet0_5(**kwargs): + return get_mobilenet(0.5, **kwargs) + + +def mobilenet_v2_0_5(**kwargs): + return get_mobilenet_v2(0.5, **kwargs) + + +def mobilenet0_25(**kwargs): + return get_mobilenet(0.25, **kwargs) + + +def mobilenet_v2_0_25(**kwargs): + return get_mobilenet_v2(0.25, **kwargs) + + +if __name__ == '__main__': + model = mobilenet0_5() diff --git a/segutils/core/models/base_models/resnet.py b/segutils/core/models/base_models/resnet.py new file mode 100644 index 0000000..b95b8c6 --- /dev/null +++ b/segutils/core/models/base_models/resnet.py @@ -0,0 +1,226 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = norm_layer(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = norm_layer(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=nn.BatchNorm2d): + super(ResNet, self).__init__() + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + return model + + +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) + return model + + +if __name__ == '__main__': + import torch + img = torch.randn(4, 3, 224, 224) + model = resnet50(True) + output = model(img) \ No newline at end of file diff --git a/segutils/core/models/base_models/resnetv1b.py b/segutils/core/models/base_models/resnetv1b.py new file mode 100644 index 0000000..21d67b7 --- /dev/null +++ b/segutils/core/models/base_models/resnetv1b.py @@ -0,0 +1,264 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNetV1b', 'resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', + 'resnet101_v1b', 'resnet152_v1b', 'resnet152_v1s', 'resnet101_v1s', 'resnet50_v1s'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +class BasicBlockV1b(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BasicBlockV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation, + dilation=previous_dilation, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class BottleneckV1b(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BottleneckV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNetV1b(nn.Module): + + def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False, + zero_init_residual=False, norm_layer=nn.BatchNorm2d): + self.inplanes = 128 if deep_stem else 64 + super(ResNetV1b, self).__init__() + if deep_stem: + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 1, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 128, 3, 1, 1, bias=False) + ) + else: + self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, BottleneckV1b): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlockV1b): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion), + ) + + layers = [] + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, dilation=dilation, + previous_dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet18']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet34_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet34']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet50']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet101_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet101']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet152_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet152']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet50', root=root)), strict=False) + return model + + +def resnet101_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet101', root=root)), strict=False) + return model + + +def resnet152_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet152', root=root)), strict=False) + return model + + +if __name__ == '__main__': + import torch + + img = torch.randn(4, 3, 224, 224) + model = resnet50_v1b(True) + output = model(img) diff --git a/segutils/core/models/base_models/resnext.py b/segutils/core/models/base_models/resnext.py new file mode 100644 index 0000000..8daf287 --- /dev/null +++ b/segutils/core/models/base_models/resnext.py @@ -0,0 +1,154 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNext', 'resnext50_32x4d', 'resnext101_32x8d'] + +model_urls = { + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, **kwargs): + super(Bottleneck, self).__init__() + width = int(planes * (base_width / 64.)) * groups + + self.conv1 = nn.Conv2d(inplanes, width, 1, bias=False) + self.bn1 = norm_layer(width) + self.conv2 = nn.Conv2d(width, width, 3, stride, dilation, dilation, groups, bias=False) + self.bn2 = norm_layer(width) + self.conv3 = nn.Conv2d(width, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNext(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, + width_per_group=64, dilated=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(ResNext, self).__init__() + self.inplanes = 64 + self.groups = groups + self.base_width = width_per_group + + self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion) + ) + + layers = list() + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, dilation=2, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, + dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnext50_32x4d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + model = ResNext(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext50_32x4d']) + model.load_state_dict(state_dict) + return model + + +def resnext101_32x8d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + model = ResNext(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext101_32x8d']) + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + model = resnext101_32x8d() diff --git a/segutils/core/models/base_models/vgg.py b/segutils/core/models/base_models/vgg.py new file mode 100644 index 0000000..fe5c163 --- /dev/null +++ b/segutils/core/models/base_models/vgg.py @@ -0,0 +1,191 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + + +class VGG(nn.Module): + def __init__(self, features, num_classes=1000, init_weights=True): + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes) + ) + if init_weights: + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += (conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)) + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfg = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def vgg11(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11'])) + return model + + +def vgg11_bn(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) + return model + + +def vgg13(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13'])) + return model + + +def vgg13_bn(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn'])) + return model + + +def vgg16(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16'])) + return model + + +def vgg16_bn(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn'])) + return model + + +def vgg19(pretrained=False, **kwargs): + """VGG 19-layer model (configuration "E") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19'])) + return model + + +def vgg19_bn(pretrained=False, **kwargs): + """VGG 19-layer model (configuration 'E') with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'])) + return model + + +if __name__ == '__main__': + img = torch.randn((4, 3, 480, 480)) + model = vgg16(pretrained=False) + out = model(img) diff --git a/segutils/core/models/base_models/xception.py b/segutils/core/models/base_models/xception.py new file mode 100644 index 0000000..51832f1 --- /dev/null +++ b/segutils/core/models/base_models/xception.py @@ -0,0 +1,411 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['Enc', 'FCAttention', 'Xception65', 'Xception71', 'get_xception', 'get_xception_71', 'get_xception_a'] + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, 0, dilation, groups=in_channels, + bias=bias) + self.bn = norm_layer(in_channels) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias) + + def forward(self, x): + x = self.fix_padding(x, self.kernel_size, self.dilation) + x = self.conv1(x) + x = self.bn(x) + x = self.pointwise(x) + + return x + + def fix_padding(self, x, kernel_size, dilation): + kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) + return padded_inputs + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, stride=1, dilation=1, norm_layer=None, + start_with_relu=True, grow_first=True, is_last=False): + super(Block, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(True) + rep = list() + filters = in_channels + if grow_first: + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + filters = out_channels + for i in range(reps - 1): + if grow_first or start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(filters)) + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + elif is_last: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Xception65(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception65, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True) + self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class Xception71(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception71, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = nn.Sequential( + Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True), + Block(256, 728, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True)) + self.block3 = Block(728, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# ------------------------------------------------- +# For DFANet +# ------------------------------------------------- +class BlockA(nn.Module): + def __init__(self, in_channels, out_channels, stride=1, dilation=1, norm_layer=None, start_with_relu=True): + super(BlockA, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(False) + rep = list() + inter_channels = out_channels // 4 + + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + else: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, 1, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Enc(nn.Module): + def __init__(self, in_channels, out_channels, blocks, norm_layer=nn.BatchNorm2d): + super(Enc, self).__init__() + block = list() + block.append(BlockA(in_channels, out_channels, 2, norm_layer=norm_layer)) + for i in range(blocks - 1): + block.append(BlockA(out_channels, out_channels, 1, norm_layer=norm_layer)) + self.block = nn.Sequential(*block) + + def forward(self, x): + return self.block(x) + + +class FCAttention(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d): + super(FCAttention, self).__init__() + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(in_channels, 1000) + self.conv = nn.Sequential( + nn.Conv2d(1000, in_channels, 1, bias=False), + norm_layer(in_channels), + nn.ReLU(False)) + + def forward(self, x): + n, c, _, _ = x.size() + att = self.avgpool(x).view(n, c) + att = self.fc(att).view(n, 1000, 1, 1) + att = self.conv(att) + return x * att.expand_as(x) + + +class XceptionA(nn.Module): + def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d): + super(XceptionA, self).__init__() + self.conv1 = nn.Sequential(nn.Conv2d(3, 8, 3, 2, 1, bias=False), + norm_layer(8), + nn.ReLU(True)) + + self.enc2 = Enc(8, 48, 4, norm_layer=norm_layer) + self.enc3 = Enc(48, 96, 6, norm_layer=norm_layer) + self.enc4 = Enc(96, 192, 4, norm_layer=norm_layer) + + self.fca = FCAttention(192, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(192, num_classes) + + def forward(self, x): + x = self.conv1(x) + + x = self.enc2(x) + x = self.enc3(x) + x = self.enc4(x) + x = self.fca(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# Constructor +def get_xception(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception65(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception', root=root))) + return model + + +def get_xception_71(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception71(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception71', root=root))) + return model + + +def get_xception_a(pretrained=False, root='~/.torch/models', **kwargs): + model = XceptionA(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception_a', root=root))) + return model + + +if __name__ == '__main__': + model = get_xception_a() diff --git a/segutils/core/models/bisenet.py b/segutils/core/models/bisenet.py new file mode 100644 index 0000000..09d335d --- /dev/null +++ b/segutils/core/models/bisenet.py @@ -0,0 +1,298 @@ +"""Bilateral Segmentation Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from core.models.base_models.resnet import resnet18,resnet50 +from core.nn import _ConvBNReLU + +__all__ = ['BiSeNet', 'get_bisenet', 'get_bisenet_resnet18_citys'] + + +class BiSeNet(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + self.head = _BiSeHead(256, 64, nclass, **kwargs) + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + x = self.head(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + + + if outsize: + print('######using torch resize#######',outsize) + x = F.interpolate(x, outsize, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + #return tuple(outputs) + return outputs[0] +class BiSeNet_MultiOutput(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet_MultiOutput, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + assert isinstance(nclass,list) + self.outCnt = len(nclass) + for ii,nclassii in enumerate(nclass): + setattr(self,'head%d'%(ii) , _BiSeHead(256, 64, nclassii, **kwargs)) + + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False,smooth_kernel=0): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + for ii in range(self.outCnt): + x = getattr(self,'head%d'%(ii))(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + if smooth_kernel>0: + gaussian_kernel = torch.from_numpy(np.ones((1,1,smooth_kernel,smooth_kernel)) ) + + pad = int((smooth_kernel - 1)/2) + if not gaussian_kernel.is_cuda: + gaussian_kernel = gaussian_kernel.to(x.device) + #print(gaussian_kernel.dtype,gaussian_kernel,outputs[0].dtype) + outputs = [ x.unsqueeze(1).double() for x in outputs] + outputs = [torch.conv2d(x, gaussian_kernel, padding=pad) for x in outputs ] + outputs = [ x.squeeze(1).long() for x in outputs] + #return tuple(outputs) + return outputs + +class _BiSeHead(nn.Module): + def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BiSeHead, self).__init__() + self.block = nn.Sequential( + _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + x = self.block(x) + return x + + +class SpatialPath(nn.Module): + """Spatial path""" + + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(SpatialPath, self).__init__() + inter_channels = 64 + self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer) + self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv7x7(x) + x = self.conv3x3_1(x) + x = self.conv3x3_2(x) + x = self.conv1x1(x) + + return x + + +class _GlobalAvgPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_GlobalAvgPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class AttentionRefinmentModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(AttentionRefinmentModule, self).__init__() + self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x): + x = self.conv3x3(x) + attention = self.channel_attention(x) + x = x * attention + return x + + +class ContextPath(nn.Module): + def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextPath, self).__init__() + if backbone == 'resnet18': + pretrained = resnet18(pretrained=pretrained_base, **kwargs) + elif backbone=='resnet50': + pretrained = resnet50(pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.conv1 = pretrained.conv1 + self.bn1 = pretrained.bn1 + self.relu = pretrained.relu + self.maxpool = pretrained.maxpool + self.layer1 = pretrained.layer1 + self.layer2 = pretrained.layer2 + self.layer3 = pretrained.layer3 + self.layer4 = pretrained.layer4 + + inter_channels = 128 + self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer) + + self.arms = nn.ModuleList( + [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs), + AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)] + ) + self.refines = nn.ModuleList( + [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)] + ) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + + context_blocks = [] + context_blocks.append(x) + x = self.layer2(x) + context_blocks.append(x) + c3 = self.layer3(x) + context_blocks.append(c3) + c4 = self.layer4(c3) + context_blocks.append(c4) + context_blocks.reverse() + + global_context = self.global_context(c4) + last_feature = global_context + context_outputs = [] + for i, (feature, arm, refine) in enumerate(zip(context_blocks[:2], self.arms, self.refines)): + feature = arm(feature) + feature += last_feature + last_feature = F.interpolate(feature, size=context_blocks[i + 1].size()[2:], + mode='bilinear', align_corners=True) + last_feature = refine(last_feature) + context_outputs.append(last_feature) + + return context_outputs + + +class FeatureFusion(nn.Module): + def __init__(self, in_channels, out_channels, reduction=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFusion, self).__init__() + self.conv1x1 = _ConvBNReLU(in_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels // reduction, 1, 1, 0, norm_layer=norm_layer), + _ConvBNReLU(out_channels // reduction, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x1, x2): + fusion = torch.cat([x1, x2], dim=1) + out = self.conv1x1(fusion) + attention = self.channel_attention(out) + out = out + out * attention + return out + + +def get_bisenet(dataset='citys', backbone='resnet18', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = BiSeNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('bisenet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_bisenet_resnet18_citys(**kwargs): + return get_bisenet('citys', 'resnet18', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 224, 224) + # model = BiSeNet(19, backbone='resnet18') + # print(model.exclusive) + input = torch.rand(2, 3, 224, 224) + model = BiSeNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/ccnet.py b/segutils/core/models/ccnet.py new file mode 100644 index 0000000..b06ca03 --- /dev/null +++ b/segutils/core/models/ccnet.py @@ -0,0 +1,166 @@ +"""Criss-Cross Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CrissCrossAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +#失败:NameError: name '_C' is not defined + +__all__ = ['CCNet', 'get_ccnet', 'get_ccnet_resnet50_citys', 'get_ccnet_resnet101_citys', + 'get_ccnet_resnet152_citys', 'get_ccnet_resnet50_ade', 'get_ccnet_resnet101_ade', + 'get_ccnet_resnet152_ade'] + + +class CCNet(SegBaseModel): + r"""CCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zilong Huang, et al. "CCNet: Criss-Cross Attention for Semantic Segmentation." + arXiv preprint arXiv:1811.11721 (2018). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(CCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _CCHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _CCHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_CCHead, self).__init__() + self.rcca = _RCCAModule(2048, 512, norm_layer, **kwargs) + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.rcca(x) + x = self.out(x) + return x + + +class _RCCAModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_RCCAModule, self).__init__() + inter_channels = in_channels // 4 + self.conva = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + self.cca = CrissCrossAttention(inter_channels) + self.convb = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.bottleneck = nn.Sequential( + nn.Conv2d(in_channels + inter_channels, out_channels, 3, padding=1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1)) + + def forward(self, x, recurrence=1): + out = self.conva(x) + for i in range(recurrence): + out = self.cca(out) + out = self.convb(out) + out = torch.cat([x, out], dim=1) + out = self.bottleneck(out) + + return out + + +def get_ccnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = CCNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('ccnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_ccnet_resnet50_citys(**kwargs): + return get_ccnet('citys', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_citys(**kwargs): + return get_ccnet('citys', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_citys(**kwargs): + return get_ccnet('citys', 'resnet152', **kwargs) + + +def get_ccnet_resnet50_ade(**kwargs): + return get_ccnet('ade20k', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_ade(**kwargs): + return get_ccnet('ade20k', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_ade(**kwargs): + return get_ccnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # model = get_ccnet_resnet50_citys() + # img = torch.randn(1, 3, 480, 480) + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = CCNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/cgnet.py b/segutils/core/models/cgnet.py new file mode 100644 index 0000000..85cb4e6 --- /dev/null +++ b/segutils/core/models/cgnet.py @@ -0,0 +1,228 @@ +"""Context Guided Network for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _BNPReLU + +__all__ = ['CGNet', 'get_cgnet', 'get_cgnet_citys'] + + +class CGNet(nn.Module): + r"""CGNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Tianyi Wu, et al. "CGNet: A Light-weight Context Guided Network for Semantic Segmentation." + arXiv preprint arXiv:1811.08201 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, M=3, N=21, **kwargs): + super(CGNet, self).__init__() + # stage 1 + self.stage1_0 = _ConvBNPReLU(3, 32, 3, 2, 1, **kwargs) + self.stage1_1 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + self.stage1_2 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + + self.sample1 = _InputInjection(1) + self.sample2 = _InputInjection(2) + self.bn_prelu1 = _BNPReLU(32 + 3, **kwargs) + + # stage 2 + self.stage2_0 = ContextGuidedBlock(32 + 3, 64, dilation=2, reduction=8, down=True, residual=False, **kwargs) + self.stage2 = nn.ModuleList() + for i in range(0, M - 1): + self.stage2.append(ContextGuidedBlock(64, 64, dilation=2, reduction=8, **kwargs)) + self.bn_prelu2 = _BNPReLU(128 + 3, **kwargs) + + # stage 3 + self.stage3_0 = ContextGuidedBlock(128 + 3, 128, dilation=4, reduction=16, down=True, residual=False, **kwargs) + self.stage3 = nn.ModuleList() + for i in range(0, N - 1): + self.stage3.append(ContextGuidedBlock(128, 128, dilation=4, reduction=16, **kwargs)) + self.bn_prelu3 = _BNPReLU(256, **kwargs) + + self.head = nn.Sequential( + nn.Dropout2d(0.1, False), + nn.Conv2d(256, nclass, 1)) + + self.__setattr__('exclusive', ['stage1_0', 'stage1_1', 'stage1_2', 'sample1', 'sample2', + 'bn_prelu1', 'stage2_0', 'stage2', 'bn_prelu2', 'stage3_0', + 'stage3', 'bn_prelu3', 'head']) + + def forward(self, x): + size = x.size()[2:] + # stage1 + out0 = self.stage1_0(x) + out0 = self.stage1_1(out0) + out0 = self.stage1_2(out0) + + inp1 = self.sample1(x) + inp2 = self.sample2(x) + + # stage 2 + out0_cat = self.bn_prelu1(torch.cat([out0, inp1], dim=1)) + out1_0 = self.stage2_0(out0_cat) + for i, layer in enumerate(self.stage2): + if i == 0: + out1 = layer(out1_0) + else: + out1 = layer(out1) + out1_cat = self.bn_prelu2(torch.cat([out1, out1_0, inp2], dim=1)) + + # stage 3 + out2_0 = self.stage3_0(out1_cat) + for i, layer in enumerate(self.stage3): + if i == 0: + out2 = layer(out2_0) + else: + out2 = layer(out2) + out2_cat = self.bn_prelu3(torch.cat([out2_0, out2], dim=1)) + + outputs = [] + out = self.head(out2_cat) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + #return tuple(outputs) + return outputs[0] + + +class _ChannelWiseConv(nn.Module): + def __init__(self, in_channels, out_channels, dilation=1, **kwargs): + super(_ChannelWiseConv, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, 3, 1, dilation, dilation, groups=in_channels, bias=False) + + def forward(self, x): + x = self.conv(x) + return x + + +class _FGlo(nn.Module): + def __init__(self, in_channels, reduction=16, **kwargs): + super(_FGlo, self).__init__() + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels // reduction), + nn.ReLU(True), + nn.Linear(in_channels // reduction, in_channels), + nn.Sigmoid()) + + def forward(self, x): + n, c, _, _ = x.size() + out = self.gap(x).view(n, c) + out = self.fc(out).view(n, c, 1, 1) + return x * out + + +class _InputInjection(nn.Module): + def __init__(self, ratio): + super(_InputInjection, self).__init__() + self.pool = nn.ModuleList() + for i in range(0, ratio): + self.pool.append(nn.AvgPool2d(3, 2, 1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +class _ConcatInjection(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConcatInjection, self).__init__() + self.bn = norm_layer(in_channels) + self.prelu = nn.PReLU(in_channels) + + def forward(self, x1, x2): + out = torch.cat([x1, x2], dim=1) + out = self.bn(out) + out = self.prelu(out) + return out + + +class ContextGuidedBlock(nn.Module): + def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False, + residual=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextGuidedBlock, self).__init__() + inter_channels = out_channels // 2 if not down else out_channels + if down: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer, **kwargs) + self.reduce = nn.Conv2d(inter_channels * 2, out_channels, 1, bias=False) + else: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.f_loc = _ChannelWiseConv(inter_channels, inter_channels, **kwargs) + self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation, **kwargs) + self.bn = norm_layer(inter_channels * 2) + self.prelu = nn.PReLU(inter_channels * 2) + self.f_glo = _FGlo(out_channels, reduction, **kwargs) + self.down = down + self.residual = residual + + def forward(self, x): + out = self.conv(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], dim=1) + joi_feat = self.prelu(self.bn(joi_feat)) + if self.down: + joi_feat = self.reduce(joi_feat) + + out = self.f_glo(joi_feat) + if self.residual: + out = out + x + + return out + + +def get_cgnet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = CGNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('cgnet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_cgnet_citys(**kwargs): + return get_cgnet('citys', '', **kwargs) + + +if __name__ == '__main__': + # model = get_cgnet_citys() + # print(model) + input = torch.rand(2, 3, 224, 224) + model = CGNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/danet.py b/segutils/core/models/danet.py new file mode 100644 index 0000000..7dae5d3 --- /dev/null +++ b/segutils/core/models/danet.py @@ -0,0 +1,232 @@ +"""Dual Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['DANet', 'get_danet', 'get_danet_resnet50_citys', + 'get_danet_resnet101_citys', 'get_danet_resnet152_citys'] + + +class DANet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu. + "Dual Attention Network for Scene Segmentation." *CVPR*, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DAHead(2048, nclass, aux, **kwargs) + + self.__setattr__('exclusive', ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x0 = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + outputs.append(x0) + + if self.aux: + x1 = F.interpolate(x[1], size, mode='bilinear', align_corners=True) + x2 = F.interpolate(x[2], size, mode='bilinear', align_corners=True) + outputs.append(x1) + outputs.append(x2) + #return outputs + return outputs[0] + +class _PositionAttentionModule(nn.Module): + """ Position attention module""" + + def __init__(self, in_channels, **kwargs): + super(_PositionAttentionModule, self).__init__() + self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_d = nn.Conv2d(in_channels, in_channels, 1) + self.alpha = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1) + feat_c = self.conv_c(x).view(batch_size, -1, height * width) + attention_s = self.softmax(torch.bmm(feat_b, feat_c)) + feat_d = self.conv_d(x).view(batch_size, -1, height * width) + feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width) + out = self.alpha * feat_e + x + + return out + + +class _ChannelAttentionModule(nn.Module): + """Channel attention module""" + + def __init__(self, **kwargs): + super(_ChannelAttentionModule, self).__init__() + self.beta = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_a = x.view(batch_size, -1, height * width) + feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1) + attention = torch.bmm(feat_a, feat_a_transpose) + attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention + attention = self.softmax(attention_new) + + feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width) + out = self.beta * feat_e + x + + return out + + +class _DAHead(nn.Module): + def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DAHead, self).__init__() + self.aux = aux + inter_channels = in_channels // 4 + self.conv_p1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.pam = _PositionAttentionModule(inter_channels, **kwargs) + self.cam = _ChannelAttentionModule(**kwargs) + self.conv_p2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.out = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + if aux: + self.conv_p3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + self.conv_c3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + feat_p = self.conv_p1(x) + feat_p = self.pam(feat_p) + feat_p = self.conv_p2(feat_p) + + feat_c = self.conv_c1(x) + feat_c = self.cam(feat_c) + feat_c = self.conv_c2(feat_c) + + feat_fusion = feat_p + feat_c + + outputs = [] + fusion_out = self.out(feat_fusion) + outputs.append(fusion_out) + if self.aux: + p_out = self.conv_p3(feat_p) + c_out = self.conv_c3(feat_c) + outputs.append(p_out) + outputs.append(c_out) + + return tuple(outputs) + + +def get_danet(dataset='citys', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""Dual Attention Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_danet(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('danet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_danet_resnet50_citys(**kwargs): + return get_danet('citys', 'resnet50', **kwargs) + + +def get_danet_resnet101_citys(**kwargs): + return get_danet('citys', 'resnet101', **kwargs) + + +def get_danet_resnet152_citys(**kwargs): + return get_danet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_danet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3,512,512) + model = DANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/deeplabv3.py b/segutils/core/models/deeplabv3.py new file mode 100644 index 0000000..98d0c02 --- /dev/null +++ b/segutils/core/models/deeplabv3.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['DeepLabV3', 'get_deeplabv3', 'get_deeplabv3_resnet50_voc', 'get_deeplabv3_resnet101_voc', + 'get_deeplabv3_resnet152_voc', 'get_deeplabv3_resnet50_ade', 'get_deeplabv3_resnet101_ade', + 'get_deeplabv3_resnet152_ade'] + + +class DeepLabV3(SegBaseModel): + r"""DeepLabV3 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation." + arXiv preprint arXiv:1706.05587 (2017). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(DeepLabV3, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DeepLabHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1) + ) + + def forward(self, x): + x = self.aspp(x) + return self.block(x) + + +class _ASPPConv(nn.Module): + def __init__(self, in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs): + super(_ASPPConv, self).__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + return self.block(x) + + +class _AsppPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs): + super(_AsppPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class _ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs, **kwargs): + super(_ASPP, self).__init__() + out_channels = 256 + self.b0 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs) + self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs) + self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs) + self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.5) + ) + + def forward(self, x): + feat1 = self.b0(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + x = self.project(x) + return x + + +def get_deeplabv3(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_resnet50_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet152', **kwargs) + + +def get_deeplabv3_resnet50_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_resnet50_voc() + img = torch.randn(2, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/deeplabv3_plus.py b/segutils/core/models/deeplabv3_plus.py new file mode 100644 index 0000000..9b5a703 --- /dev/null +++ b/segutils/core/models/deeplabv3_plus.py @@ -0,0 +1,142 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_models.xception import get_xception +from .deeplabv3 import _ASPP +from .fcn import _FCNHead +from ..nn import _ConvBNReLU + +__all__ = ['DeepLabV3Plus', 'get_deeplabv3_plus', 'get_deeplabv3_plus_xception_voc'] + + +class DeepLabV3Plus(nn.Module): + r"""DeepLabV3Plus + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'xception'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Encoder-Decoder with Atrous Separable Convolution for Semantic + Image Segmentation." + """ + + def __init__(self, nclass, backbone='xception', aux=True, pretrained_base=True, dilated=True, **kwargs): + super(DeepLabV3Plus, self).__init__() + self.aux = aux + self.nclass = nclass + output_stride = 8 if dilated else 32 + + self.pretrained = get_xception(pretrained=pretrained_base, output_stride=output_stride, **kwargs) + + # deeplabv3 plus + self.head = _DeepLabHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(728, nclass, **kwargs) + + def base_forward(self, x): + # Entry flow + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv2(x) + x = self.pretrained.bn2(x) + x = self.pretrained.relu(x) + + x = self.pretrained.block1(x) + # add relu here + x = self.pretrained.relu(x) + low_level_feat = x + + x = self.pretrained.block2(x) + x = self.pretrained.block3(x) + + # Middle flow + x = self.pretrained.midflow(x) + mid_level_feat = x + + # Exit flow + x = self.pretrained.block20(x) + x = self.pretrained.relu(x) + x = self.pretrained.conv3(x) + x = self.pretrained.bn3(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv4(x) + x = self.pretrained.bn4(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv5(x) + x = self.pretrained.bn5(x) + x = self.pretrained.relu(x) + return low_level_feat, mid_level_feat, x + + def forward(self, x): + size = x.size()[2:] + c1, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4, c1) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, **kwargs) + self.c1_block = _ConvBNReLU(c1_channels, 48, 3, padding=1, norm_layer=norm_layer) + self.block = nn.Sequential( + _ConvBNReLU(304, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.5), + _ConvBNReLU(256, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1)) + + def forward(self, x, c1): + size = c1.size()[2:] + c1 = self.c1_block(c1) + x = self.aspp(x) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + return self.block(torch.cat([x, c1], dim=1)) + + +def get_deeplabv3_plus(dataset='pascal_voc', backbone='xception', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3Plus(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict( + torch.load(get_model_file('deeplabv3_plus_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_plus_xception_voc(**kwargs): + return get_deeplabv3_plus('pascal_voc', 'xception', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_plus_xception_voc() diff --git a/segutils/core/models/denseaspp.py b/segutils/core/models/denseaspp.py new file mode 100644 index 0000000..1582375 --- /dev/null +++ b/segutils/core/models/denseaspp.py @@ -0,0 +1,198 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models.densenet import * +from core.models.fcn import _FCNHead + +__all__ = ['DenseASPP', 'get_denseaspp', 'get_denseaspp_densenet121_citys', + 'get_denseaspp_densenet161_citys', 'get_denseaspp_densenet169_citys', 'get_denseaspp_densenet201_citys'] + + +class DenseASPP(nn.Module): + def __init__(self, nclass, backbone='densenet121', aux=False, jpu=False, + pretrained_base=True, dilate_scale=8, **kwargs): + super(DenseASPP, self).__init__() + self.nclass = nclass + self.aux = aux + self.dilate_scale = dilate_scale + if backbone == 'densenet121': + self.pretrained = dilated_densenet121(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet161': + self.pretrained = dilated_densenet161(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet169': + self.pretrained = dilated_densenet169(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet201': + self.pretrained = dilated_densenet201(dilate_scale, pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + in_channels = self.pretrained.num_features + + self.head = _DenseASPPHead(in_channels, nclass) + + if aux: + self.auxlayer = _FCNHead(in_channels, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + #print('size', size) #torch.Size([512, 512]) + features = self.pretrained.features(x) + #print('22',features.shape) #torch.Size([2, 1024, 64, 64]) + if self.dilate_scale > 8: + features = F.interpolate(features, scale_factor=2, mode='bilinear', align_corners=True) + outputs = [] + x = self.head(features) #torch.Size([2, 4, 64, 64]) + #print('x.shape',x.shape) + x = F.interpolate(x, size, mode='bilinear', align_corners=True)#直接64到512。。。。效果还这么好! + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(features) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _DenseASPPHead(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DenseASPPHead, self).__init__() + self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs) + self.block = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(in_channels + 5 * 64, nclass, 1) + ) + + def forward(self, x): + x = self.dense_aspp_block(x) + return self.block(x) + + +class _DenseASPPConv(nn.Sequential): + def __init__(self, in_channels, inter_channels, out_channels, atrous_rate, + drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPConv, self).__init__() + self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)), + self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)), + self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu2', nn.ReLU(True)), + self.drop_rate = drop_rate + + def forward(self, x): + features = super(_DenseASPPConv, self).forward(x) + if self.drop_rate > 0: + features = F.dropout(features, p=self.drop_rate, training=self.training) + return features + + +class _DenseASPPBlock(nn.Module): + def __init__(self, in_channels, inter_channels1, inter_channels2, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPBlock, self).__init__() + self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1, + norm_layer, norm_kwargs) + self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1, + norm_layer, norm_kwargs) + self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1, + norm_layer, norm_kwargs) + self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1, + norm_layer, norm_kwargs) + self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1, + norm_layer, norm_kwargs) + + def forward(self, x): + aspp3 = self.aspp_3(x) + x = torch.cat([aspp3, x], dim=1) + + aspp6 = self.aspp_6(x) + x = torch.cat([aspp6, x], dim=1) + + aspp12 = self.aspp_12(x) + x = torch.cat([aspp12, x], dim=1) + + aspp18 = self.aspp_18(x) + x = torch.cat([aspp18, x], dim=1) + + aspp24 = self.aspp_24(x) + x = torch.cat([aspp24, x], dim=1) + + return x + + +def get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""DenseASPP + + Parameters + ---------- + dataset : str, default citys + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + # >>> model = get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False) + # >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DenseASPP(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('denseaspp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_denseaspp_densenet121_citys(**kwargs): + return get_denseaspp('citys', 'densenet121', **kwargs) + + +def get_denseaspp_densenet161_citys(**kwargs): + return get_denseaspp('citys', 'densenet161', **kwargs) + + +def get_denseaspp_densenet169_citys(**kwargs): + return get_denseaspp('citys', 'densenet169', **kwargs) + + +def get_denseaspp_densenet201_citys(**kwargs): + return get_denseaspp('citys', 'densenet201', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_denseaspp_densenet121_citys() + # outputs = model(img) + input = torch.rand(2, 3, 512, 512) + model = DenseASPP(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/dfanet.py b/segutils/core/models/dfanet.py new file mode 100644 index 0000000..15e3be0 --- /dev/null +++ b/segutils/core/models/dfanet.py @@ -0,0 +1,129 @@ +""" Deep Feature Aggregation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import Enc, FCAttention, get_xception_a +from core.nn import _ConvBNReLU + +__all__ = ['DFANet', 'get_dfanet', 'get_dfanet_citys'] + + +class DFANet(nn.Module): + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(DFANet, self).__init__() + self.pretrained = get_xception_a(pretrained_base, **kwargs) + + self.enc2_2 = Enc(240, 48, 4, **kwargs) + self.enc3_2 = Enc(144, 96, 6, **kwargs) + self.enc4_2 = Enc(288, 192, 4, **kwargs) + self.fca_2 = FCAttention(192, **kwargs) + + self.enc2_3 = Enc(240, 48, 4, **kwargs) + self.enc3_3 = Enc(144, 96, 6, **kwargs) + self.enc3_4 = Enc(288, 192, 4, **kwargs) + self.fca_3 = FCAttention(192, **kwargs) + + self.enc2_1_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_2_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_3_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.conv_fusion = _ConvBNReLU(32, 32, 1, **kwargs) + + self.fca_1_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_2_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_3_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.conv_out = nn.Conv2d(32, nclass, 1) + + self.__setattr__('exclusive', ['enc2_2', 'enc3_2', 'enc4_2', 'fca_2', 'enc2_3', 'enc3_3', 'enc3_4', 'fca_3', + 'enc2_1_reduce', 'enc2_2_reduce', 'enc2_3_reduce', 'conv_fusion', 'fca_1_reduce', + 'fca_2_reduce', 'fca_3_reduce', 'conv_out']) + + def forward(self, x): + # backbone + stage1_conv1 = self.pretrained.conv1(x) + stage1_enc2 = self.pretrained.enc2(stage1_conv1) + stage1_enc3 = self.pretrained.enc3(stage1_enc2) + stage1_enc4 = self.pretrained.enc4(stage1_enc3) + stage1_fca = self.pretrained.fca(stage1_enc4) + stage1_out = F.interpolate(stage1_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage2 + stage2_enc2 = self.enc2_2(torch.cat([stage1_enc2, stage1_out], dim=1)) + stage2_enc3 = self.enc3_2(torch.cat([stage1_enc3, stage2_enc2], dim=1)) + stage2_enc4 = self.enc4_2(torch.cat([stage1_enc4, stage2_enc3], dim=1)) + stage2_fca = self.fca_2(stage2_enc4) + stage2_out = F.interpolate(stage2_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage3 + stage3_enc2 = self.enc2_3(torch.cat([stage2_enc2, stage2_out], dim=1)) + stage3_enc3 = self.enc3_3(torch.cat([stage2_enc3, stage3_enc2], dim=1)) + stage3_enc4 = self.enc3_4(torch.cat([stage2_enc4, stage3_enc3], dim=1)) + stage3_fca = self.fca_3(stage3_enc4) + + stage1_enc2_decoder = self.enc2_1_reduce(stage1_enc2) + stage2_enc2_docoder = F.interpolate(self.enc2_2_reduce(stage2_enc2), scale_factor=2, + mode='bilinear', align_corners=True) + stage3_enc2_decoder = F.interpolate(self.enc2_3_reduce(stage3_enc2), scale_factor=4, + mode='bilinear', align_corners=True) + fusion = stage1_enc2_decoder + stage2_enc2_docoder + stage3_enc2_decoder + fusion1 = self.conv_fusion(fusion) + + stage1_fca_decoder = F.interpolate(self.fca_1_reduce(stage1_fca), scale_factor=4, + mode='bilinear', align_corners=True) + stage2_fca_decoder = F.interpolate(self.fca_2_reduce(stage2_fca), scale_factor=8, + mode='bilinear', align_corners=True) + stage3_fca_decoder = F.interpolate(self.fca_3_reduce(stage3_fca), scale_factor=16, + mode='bilinear', align_corners=True) + #print(fusion.shape,stage1_fca_decoder.shape,stage2_fca_decoder.shape,stage3_fca_decoder.shape) + fusion2 = fusion1 + stage1_fca_decoder + stage2_fca_decoder + stage3_fca_decoder + + outputs = list() + out = self.conv_out(fusion2) + out1 = F.interpolate(out, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(out1) + + #return tuple(outputs) + return outputs[0] + +def get_dfanet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DFANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dfanet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dfanet_citys(**kwargs): + return get_dfanet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_dfanet_citys() + input = torch.rand(2, 3, 512, 512) + model = DFANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/dinknet.py b/segutils/core/models/dinknet.py new file mode 100644 index 0000000..a36b90c --- /dev/null +++ b/segutils/core/models/dinknet.py @@ -0,0 +1,359 @@ +""" +Codes of LinkNet based on https://github.com/snakers4/spacenet-three +""" +import torch +import torch.nn as nn +from torch.autograd import Variable +from torchvision import models +import torch.nn.functional as F + +from functools import partial + +nonlinearity = partial(F.relu,inplace=True) + +class Dblock_more_dilate(nn.Module): + def __init__(self,channel): + super(Dblock_more_dilate, self).__init__() + self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1) + self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=2, padding=2) + self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=4, padding=4) + self.dilate4 = nn.Conv2d(channel, channel, kernel_size=3, dilation=8, padding=8) + self.dilate5 = nn.Conv2d(channel, channel, kernel_size=3, dilation=16, padding=16) + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + dilate1_out = nonlinearity(self.dilate1(x)) + dilate2_out = nonlinearity(self.dilate2(dilate1_out)) + dilate3_out = nonlinearity(self.dilate3(dilate2_out)) + dilate4_out = nonlinearity(self.dilate4(dilate3_out)) + dilate5_out = nonlinearity(self.dilate5(dilate4_out)) + out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out + dilate5_out + return out + +class Dblock(nn.Module): + def __init__(self,channel): + super(Dblock, self).__init__() + self.dilate1 = nn.Conv2d(channel, channel, kernel_size=3, dilation=1, padding=1) + self.dilate2 = nn.Conv2d(channel, channel, kernel_size=3, dilation=2, padding=2) + self.dilate3 = nn.Conv2d(channel, channel, kernel_size=3, dilation=4, padding=4) + self.dilate4 = nn.Conv2d(channel, channel, kernel_size=3, dilation=8, padding=8) + #self.dilate5 = nn.Conv2d(channel, channel, kernel_size=3, dilation=16, padding=16) + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): + if m.bias is not None: + m.bias.data.zero_() + + def forward(self, x): + dilate1_out = nonlinearity(self.dilate1(x)) + dilate2_out = nonlinearity(self.dilate2(dilate1_out)) + dilate3_out = nonlinearity(self.dilate3(dilate2_out)) + dilate4_out = nonlinearity(self.dilate4(dilate3_out)) + #dilate5_out = nonlinearity(self.dilate5(dilate4_out)) + out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out# + dilate5_out + return out + +class DecoderBlock(nn.Module): + def __init__(self, in_channels, n_filters): + super(DecoderBlock,self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1) + self.norm1 = nn.BatchNorm2d(in_channels // 4) + self.relu1 = nonlinearity + + self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride=2, padding=1, output_padding=1) + self.norm2 = nn.BatchNorm2d(in_channels // 4) + self.relu2 = nonlinearity + + self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1) + self.norm3 = nn.BatchNorm2d(n_filters) + self.relu3 = nonlinearity + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + x = self.deconv2(x) + x = self.norm2(x) + x = self.relu2(x) + x = self.conv3(x) + x = self.norm3(x) + x = self.relu3(x) + return x + +class DinkNet34_less_pool(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet34_more_dilate, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + + self.dblock = Dblock_more_dilate(256) + + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + + #Center + e3 = self.dblock(e3) + + # Decoder + d3 = self.decoder3(e3) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + + # Final Classification + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet34(nn.Module): + def __init__(self, num_classes=1, num_channels=3): + super(DinkNet34, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock(512) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet50(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet50, self).__init__() + + filters = [256, 512, 1024, 2048] + resnet = models.resnet50(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock_more_dilate(2048) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class DinkNet101(nn.Module): + def __init__(self, num_classes=1): + super(DinkNet101, self).__init__() + + filters = [256, 512, 1024, 2048] + resnet = models.resnet101(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.dblock = Dblock_more_dilate(2048) + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3, padding=1) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 3, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Center + e4 = self.dblock(e4) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out + +class LinkNet34(nn.Module): + def __init__(self, num_classes=1): + super(LinkNet34, self).__init__() + + filters = [64, 128, 256, 512] + resnet = models.resnet34(pretrained=True) + self.firstconv = resnet.conv1 + self.firstbn = resnet.bn1 + self.firstrelu = resnet.relu + self.firstmaxpool = resnet.maxpool + self.encoder1 = resnet.layer1 + self.encoder2 = resnet.layer2 + self.encoder3 = resnet.layer3 + self.encoder4 = resnet.layer4 + + self.decoder4 = DecoderBlock(filters[3], filters[2]) + self.decoder3 = DecoderBlock(filters[2], filters[1]) + self.decoder2 = DecoderBlock(filters[1], filters[0]) + self.decoder1 = DecoderBlock(filters[0], filters[0]) + + self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 3, stride=2) + self.finalrelu1 = nonlinearity + self.finalconv2 = nn.Conv2d(32, 32, 3) + self.finalrelu2 = nonlinearity + self.finalconv3 = nn.Conv2d(32, num_classes, 2, padding=1) + + def forward(self, x): + # Encoder + x = self.firstconv(x) + x = self.firstbn(x) + x = self.firstrelu(x) + x = self.firstmaxpool(x) + e1 = self.encoder1(x) + e2 = self.encoder2(e1) + e3 = self.encoder3(e2) + e4 = self.encoder4(e3) + + # Decoder + d4 = self.decoder4(e4) + e3 + d3 = self.decoder3(d4) + e2 + d2 = self.decoder2(d3) + e1 + d1 = self.decoder1(d2) + out = self.finaldeconv1(d1) + out = self.finalrelu1(out) + out = self.finalconv2(out) + out = self.finalrelu2(out) + out = self.finalconv3(out) + + #return F.sigmoid(out) + return out \ No newline at end of file diff --git a/segutils/core/models/dunet.py b/segutils/core/models/dunet.py new file mode 100644 index 0000000..affc476 --- /dev/null +++ b/segutils/core/models/dunet.py @@ -0,0 +1,172 @@ +"""Decoders Matter for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['DUNet', 'get_dunet', 'get_dunet_resnet50_pascal_voc', + 'get_dunet_resnet101_pascal_voc', 'get_dunet_resnet152_pascal_voc'] + + +# The model may be wrong because lots of details missing in paper. +class DUNet(SegBaseModel): + """Decoders Matter for Semantic Segmentation + + Reference: + Zhi Tian, Tong He, Chunhua Shen, and Youliang Yan. + "Decoders Matter for Semantic Segmentation: + Data-Dependent Decoding Enables Flexible Feature Aggregation." CVPR, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DUNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DUHead(2144, **kwargs) + self.dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, 256, **kwargs) + self.aux_dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + + self.__setattr__('exclusive', + ['dupsample', 'head', 'auxlayer', 'aux_dupsample'] if aux else ['dupsample', 'head']) + + def forward(self, x): + c1, c2, c3, c4 = self.base_forward(x)#继承自SegBaseModel;返回的是resnet的layer1,2,3,4的输出 + outputs = [] + x = self.head(c2, c3, c4) + x = self.dupsample(x) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = self.aux_dupsample(auxout) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class FeatureFused(nn.Module): + """Module for fused features""" + + def __init__(self, inter_channels=48, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFused, self).__init__() + self.conv2 = nn.Sequential( + nn.Conv2d(512, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + self.conv3 = nn.Sequential( + nn.Conv2d(1024, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + size = c4.size()[2:] + c2 = self.conv2(F.interpolate(c2, size, mode='bilinear', align_corners=True)) + c3 = self.conv3(F.interpolate(c3, size, mode='bilinear', align_corners=True)) + fused_feature = torch.cat([c4, c3, c2], dim=1) + return fused_feature + + +class _DUHead(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DUHead, self).__init__() + self.fuse = FeatureFused(norm_layer=norm_layer, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(in_channels, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True), + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + fused_feature = self.fuse(c2, c3, c4) + out = self.block(fused_feature) + return out + + +class DUpsampling(nn.Module): + """DUsampling module""" + + def __init__(self, in_channels, out_channels, scale_factor=2, **kwargs): + super(DUpsampling, self).__init__() + self.scale_factor = scale_factor + self.conv_w = nn.Conv2d(in_channels, out_channels * scale_factor * scale_factor, 1, bias=False) + + def forward(self, x): + x = self.conv_w(x) + n, c, h, w = x.size() + + # N, C, H, W --> N, W, H, C + x = x.permute(0, 3, 2, 1).contiguous() + + # N, W, H, C --> N, W, H * scale, C // scale + x = x.view(n, w, h * self.scale_factor, c // self.scale_factor) + + # N, W, H * scale, C // scale --> N, H * scale, W, C // scale + x = x.permute(0, 2, 1, 3).contiguous() + + # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) + x = x.view(n, h * self.scale_factor, w * self.scale_factor, c // (self.scale_factor * self.scale_factor)) + + # N, H * scale, W * scale, C // (scale ** 2) -- > N, C // (scale ** 2), H * scale, W * scale + x = x.permute(0, 3, 1, 2) + + return x + +def get_dunet(dataset='pascal_voc', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DUNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dunet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dunet_resnet50_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet50', **kwargs) + + +def get_dunet_resnet101_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet101', **kwargs) + + +def get_dunet_resnet152_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 256, 256) + # model = get_dunet_resnet50_pascal_voc() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = DUNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + input = torch.randn(1, 3, 512, 512) + flop, params = profile(model, inputs=(input, )) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/encnet.py b/segutils/core/models/encnet.py new file mode 100644 index 0000000..585557b --- /dev/null +++ b/segutils/core/models/encnet.py @@ -0,0 +1,212 @@ +"""Context Encoding for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['EncNet', 'EncModule', 'get_encnet', 'get_encnet_resnet50_ade', + 'get_encnet_resnet101_ade', 'get_encnet_resnet152_ade'] + + +class EncNet(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, se_loss=True, lateral=False, + pretrained_base=True, **kwargs): + super(EncNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _EncHead(2048, nclass, se_loss=se_loss, lateral=lateral, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + features = self.base_forward(x) + + x = list(self.head(*features)) + x[0] = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + if self.aux: + auxout = self.auxlayer(features[2]) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + x.append(auxout) + return tuple(x) + + +class _EncHead(nn.Module): + def __init__(self, in_channels, nclass, se_loss=True, lateral=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_EncHead, self).__init__() + self.lateral = lateral + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + if lateral: + self.connect = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(512, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + nn.Sequential( + nn.Conv2d(1024, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + ]) + self.fusion = nn.Sequential( + nn.Conv2d(3 * 512, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.encmodule = EncModule(512, nclass, ncodes=32, se_loss=se_loss, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.conv6 = nn.Sequential( + nn.Dropout(0.1, False), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, *inputs): + feat = self.conv5(inputs[-1]) + if self.lateral: + c2 = self.connect[0](inputs[1]) + c3 = self.connect[1](inputs[2]) + feat = self.fusion(torch.cat([feat, c2, c3], 1)) + outs = list(self.encmodule(feat)) + outs[0] = self.conv6(outs[0]) + return tuple(outs) + + +class EncModule(nn.Module): + def __init__(self, in_channels, nclass, ncodes=32, se_loss=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(EncModule, self).__init__() + self.se_loss = se_loss + self.encoding = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 1, bias=False), + norm_layer(in_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + Encoding(D=in_channels, K=ncodes), + nn.BatchNorm1d(ncodes), + nn.ReLU(True), + Mean(dim=1) + ) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels), + nn.Sigmoid() + ) + if self.se_loss: + self.selayer = nn.Linear(in_channels, nclass) + + def forward(self, x): + en = self.encoding(x) + b, c, _, _ = x.size() + gamma = self.fc(en) + y = gamma.view(b, c, 1, 1) + outputs = [F.relu_(x + x * y)] + if self.se_loss: + outputs.append(self.selayer(en)) + return tuple(outputs) + + +class Encoding(nn.Module): + def __init__(self, D, K): + super(Encoding, self).__init__() + # init codewords and smoothing factor + self.D, self.K = D, K + self.codewords = nn.Parameter(torch.Tensor(K, D), requires_grad=True) + self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True) + self.reset_params() + + def reset_params(self): + std1 = 1. / ((self.K * self.D) ** (1 / 2)) + self.codewords.data.uniform_(-std1, std1) + self.scale.data.uniform_(-1, 0) + + def forward(self, X): + # input X is a 4D tensor + assert (X.size(1) == self.D) + B, D = X.size(0), self.D + if X.dim() == 3: + # BxDxN -> BxNxD + X = X.transpose(1, 2).contiguous() + elif X.dim() == 4: + # BxDxHxW -> Bx(HW)xD + X = X.view(B, D, -1).transpose(1, 2).contiguous() + else: + raise RuntimeError('Encoding Layer unknown input dims!') + # assignment weights BxNxK + A = F.softmax(self.scale_l2(X, self.codewords, self.scale), dim=2) + # aggregate + E = self.aggregate(A, X, self.codewords) + return E + + def __repr__(self): + return self.__class__.__name__ + '(' \ + + 'N x' + str(self.D) + '=>' + str(self.K) + 'x' \ + + str(self.D) + ')' + + @staticmethod + def scale_l2(X, C, S): + S = S.view(1, 1, C.size(0), 1) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + SL = S * (X - C) + SL = SL.pow(2).sum(3) + return SL + + @staticmethod + def aggregate(A, X, C): + A = A.unsqueeze(3) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + E = A * (X - C) + E = E.sum(1) + return E + + +class Mean(nn.Module): + def __init__(self, dim, keep_dim=False): + super(Mean, self).__init__() + self.dim = dim + self.keep_dim = keep_dim + + def forward(self, input): + return input.mean(self.dim, self.keep_dim) + + +def get_encnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = EncNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('encnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_encnet_resnet50_ade(**kwargs): + return get_encnet('ade20k', 'resnet50', **kwargs) + + +def get_encnet_resnet101_ade(**kwargs): + return get_encnet('ade20k', 'resnet101', **kwargs) + + +def get_encnet_resnet152_ade(**kwargs): + return get_encnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 224, 224) + model = get_encnet_resnet50_ade() + outputs = model(img) diff --git a/segutils/core/models/enet.py b/segutils/core/models/enet.py new file mode 100644 index 0000000..853fc65 --- /dev/null +++ b/segutils/core/models/enet.py @@ -0,0 +1,243 @@ +"""Efficient Neural Network""" +import torch +import torch.nn as nn + +__all__ = ['ENet', 'get_enet', 'get_enet_citys'] + + +class ENet(nn.Module): + """Efficient Neural Network""" + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=None, **kwargs): + super(ENet, self).__init__() + self.initial = InitialBlock(13, **kwargs) + + self.bottleneck1_0 = Bottleneck(16, 16, 64, downsampling=True, **kwargs) + self.bottleneck1_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_2 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_3 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_4 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck2_0 = Bottleneck(64, 32, 128, downsampling=True, **kwargs) + self.bottleneck2_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck2_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck2_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck2_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck3_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck3_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck3_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck3_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck4_0 = UpsamplingBottleneck(128, 16, 64, **kwargs) + self.bottleneck4_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck4_2 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck5_0 = UpsamplingBottleneck(64, 4, 16, **kwargs) + self.bottleneck5_1 = Bottleneck(16, 4, 16, **kwargs) + + self.fullconv = nn.ConvTranspose2d(16, nclass, 2, 2, bias=False) + + self.__setattr__('exclusive', ['bottleneck1_0', 'bottleneck1_1', 'bottleneck1_2', 'bottleneck1_3', + 'bottleneck1_4', 'bottleneck2_0', 'bottleneck2_1', 'bottleneck2_2', + 'bottleneck2_3', 'bottleneck2_4', 'bottleneck2_5', 'bottleneck2_6', + 'bottleneck2_7', 'bottleneck2_8', 'bottleneck3_1', 'bottleneck3_2', + 'bottleneck3_3', 'bottleneck3_4', 'bottleneck3_5', 'bottleneck3_6', + 'bottleneck3_7', 'bottleneck3_8', 'bottleneck4_0', 'bottleneck4_1', + 'bottleneck4_2', 'bottleneck5_0', 'bottleneck5_1', 'fullconv']) + + def forward(self, x): + # init + x = self.initial(x) + + # stage 1 + x, max_indices1 = self.bottleneck1_0(x) + x = self.bottleneck1_1(x) + x = self.bottleneck1_2(x) + x = self.bottleneck1_3(x) + x = self.bottleneck1_4(x) + + # stage 2 + x, max_indices2 = self.bottleneck2_0(x) + x = self.bottleneck2_1(x) + x = self.bottleneck2_2(x) + x = self.bottleneck2_3(x) + x = self.bottleneck2_4(x) + x = self.bottleneck2_5(x) + x = self.bottleneck2_6(x) + x = self.bottleneck2_7(x) + x = self.bottleneck2_8(x) + + # stage 3 + x = self.bottleneck3_1(x) + x = self.bottleneck3_2(x) + x = self.bottleneck3_3(x) + x = self.bottleneck3_4(x) + x = self.bottleneck3_6(x) + x = self.bottleneck3_7(x) + x = self.bottleneck3_8(x) + + # stage 4 + x = self.bottleneck4_0(x, max_indices2) + x = self.bottleneck4_1(x) + x = self.bottleneck4_2(x) + + # stage 5 + x = self.bottleneck5_0(x, max_indices1) + x = self.bottleneck5_1(x) + + # out + x = self.fullconv(x) + return tuple([x]) + + +class InitialBlock(nn.Module): + """ENet initial block""" + + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(InitialBlock, self).__init__() + self.conv = nn.Conv2d(3, out_channels, 3, 2, 1, bias=False) + self.maxpool = nn.MaxPool2d(2, 2) + self.bn = norm_layer(out_channels + 3) + self.act = nn.PReLU() + + def forward(self, x): + x_conv = self.conv(x) + x_pool = self.maxpool(x) + x = torch.cat([x_conv, x_pool], dim=1) + x = self.bn(x) + x = self.act(x) + return x + + +class Bottleneck(nn.Module): + """Bottlenecks include regular, asymmetric, downsampling, dilated""" + + def __init__(self, in_channels, inter_channels, out_channels, dilation=1, asymmetric=False, + downsampling=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(Bottleneck, self).__init__() + self.downsamping = downsampling + if downsampling: + self.maxpool = nn.MaxPool2d(2, 2, return_indices=True) + self.conv_down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + + if downsampling: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 2, stride=2, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + if asymmetric: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (5, 1), padding=(2, 0), bias=False), + nn.Conv2d(inter_channels, inter_channels, (1, 5), padding=(0, 2), bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, dilation=dilation, padding=dilation, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + self.conv3 = nn.Sequential( + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x): + identity = x + if self.downsamping: + identity, max_indices = self.maxpool(identity) + identity = self.conv_down(identity) + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out = self.act(out + identity) + + if self.downsamping: + return out, max_indices + else: + return out + + +class UpsamplingBottleneck(nn.Module): + """upsampling Block""" + + def __init__(self, in_channels, inter_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(UpsamplingBottleneck, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.upsampling = nn.MaxUnpool2d(2) + + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.ConvTranspose2d(inter_channels, inter_channels, 2, 2, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x, max_indices): + out_up = self.conv(x) + out_up = self.upsampling(out_up, max_indices) + + out_ext = self.block(x) + out = self.act(out_up + out_ext) + return out + + +def get_enet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ENet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('enet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_enet_citys(**kwargs): + return get_enet('citys', '', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(1, 3, 512, 512) + model = get_enet_citys() + output = model(img) diff --git a/segutils/core/models/espnet.py b/segutils/core/models/espnet.py new file mode 100644 index 0000000..82651f4 --- /dev/null +++ b/segutils/core/models/espnet.py @@ -0,0 +1,134 @@ +"ESPNetv2: A Light-weight, Power Efficient, and General Purpose for Semantic Segmentation" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import eespnet, EESP +from core.nn import _ConvBNPReLU, _BNPReLU + + +class ESPNetV2(nn.Module): + r"""ESPNetV2 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Sachin Mehta, et al. "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network." + arXiv preprint arXiv:1811.11431 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(ESPNetV2, self).__init__() + self.pretrained = eespnet(pretrained=pretrained_base, **kwargs) + self.proj_L4_C = _ConvBNPReLU(256, 128, 1, **kwargs) + self.pspMod = nn.Sequential( + EESP(256, 128, stride=1, k=4, r_lim=7, **kwargs), + _PSPModule(128, 128, **kwargs)) + self.project_l3 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(128, nclass, 1, bias=False)) + self.act_l3 = _BNPReLU(nclass, **kwargs) + self.project_l2 = _ConvBNPReLU(64 + nclass, nclass, 1, **kwargs) + self.project_l1 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(32 + nclass, nclass, 1, bias=False)) + + self.aux = aux + + self.__setattr__('exclusive', ['proj_L4_C', 'pspMod', 'project_l3', 'act_l3', 'project_l2', 'project_l1']) + + def forward(self, x): + size = x.size()[2:] + out_l1, out_l2, out_l3, out_l4 = self.pretrained(x, seg=True) + out_l4_proj = self.proj_L4_C(out_l4) + up_l4_to_l3 = F.interpolate(out_l4_proj, scale_factor=2, mode='bilinear', align_corners=True) + merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1)) + proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4) + proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act) + out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True) + merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1)) + out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True) + merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1)) + + outputs = list() + merge1_l1 = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True) + outputs.append(merge1_l1) + if self.aux: + # different from paper + auxout = F.interpolate(proj_merge_l3_bef_act, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +# different from PSPNet +class _PSPModule(nn.Module): + def __init__(self, in_channels, out_channels=1024, sizes=(1, 2, 4, 8), **kwargs): + super(_PSPModule, self).__init__() + self.stages = nn.ModuleList( + [nn.Conv2d(in_channels, in_channels, 3, 1, 1, groups=in_channels, bias=False) for _ in sizes]) + self.project = _ConvBNPReLU(in_channels * (len(sizes) + 1), out_channels, 1, 1, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for stage in self.stages: + x = F.avg_pool2d(x, kernel_size=3, stride=2, padding=1) + upsampled = F.interpolate(stage(x), size, mode='bilinear', align_corners=True) + feats.append(upsampled) + return self.project(torch.cat(feats, dim=1)) + + +def get_espnet(dataset='pascal_voc', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ESPNetV2(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('espnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_espnet_citys(**kwargs): + return get_espnet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_espnet_citys() + input = torch.rand(2, 3, 224, 224) + model =ESPNetV2(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/segutils/core/models/fcn.py b/segutils/core/models/fcn.py new file mode 100644 index 0000000..bc54fb4 --- /dev/null +++ b/segutils/core/models/fcn.py @@ -0,0 +1,235 @@ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +import sys +sys.path.extend(['/home/thsw2/WJ/src/yolov5/segutils/','../..','..' ]) +from core.models.base_models.vgg import vgg16 + +__all__ = ['get_fcn32s', 'get_fcn16s', 'get_fcn8s', + 'get_fcn32s_vgg16_voc', 'get_fcn16s_vgg16_voc', 'get_fcn8s_vgg16_voc'] + + +class FCN32s(nn.Module): + """There are some difference from original fcn""" + + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, + norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN32s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.head = _FCNHead(512, nclass, norm_layer) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + pool5 = self.pretrained(x) + + outputs = [] + out = self.head(pool5) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class FCN16s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN16s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool4 = nn.Sequential(*self.pretrained[:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool4']) + + def forward(self, x): + pool4 = self.pool4(x) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + out = F.interpolate(fuse_pool4, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +class FCN8s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN8s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool3 = nn.Sequential(*self.pretrained[:17]) + self.pool4 = nn.Sequential(*self.pretrained[17:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool3 = nn.Conv2d(256, nclass, 1) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', + ['head', 'score_pool3', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool3', + 'score_pool4']) + + def forward(self, x): + pool3 = self.pool3(x) + pool4 = self.pool4(pool3) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + score_pool3 = self.score_pool3(pool3) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + upscore_pool4 = F.interpolate(fuse_pool4, score_pool3.size()[2:], mode='bilinear', align_corners=True) + fuse_pool3 = upscore_pool4 + score_pool3 + + out = F.interpolate(fuse_pool3, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(inplace=True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn32s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN32s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn32s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn16s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN16s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn16s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn8s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN8s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn8s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn32s_vgg16_voc(**kwargs): + return get_fcn32s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn16s_vgg16_voc(**kwargs): + return get_fcn16s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn8s_vgg16_voc(**kwargs): + return get_fcn8s('pascal_voc', 'vgg16', **kwargs) + + +if __name__ == "__main__": + model = FCN16s(21) + print(model) + input = torch.rand(2, 3, 224,224) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss) + print(loss.shape) + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) diff --git a/segutils/core/models/fcnv2.py b/segutils/core/models/fcnv2.py new file mode 100644 index 0000000..6bc4954 --- /dev/null +++ b/segutils/core/models/fcnv2.py @@ -0,0 +1,82 @@ +"""Fully Convolutional Network with Stride of 8""" +from __future__ import division + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel + +__all__ = ['FCN', 'get_fcn', 'get_fcn_resnet50_voc', + 'get_fcn_resnet101_voc', 'get_fcn_resnet152_voc'] + + +class FCN(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(FCN, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _FCNHead(2048, nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn_resnet50_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet50', **kwargs) + + +def get_fcn_resnet101_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet101', **kwargs) + + +def get_fcn_resnet152_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet152', **kwargs) diff --git a/segutils/core/models/hrnet.py b/segutils/core/models/hrnet.py new file mode 100644 index 0000000..8ad08e3 --- /dev/null +++ b/segutils/core/models/hrnet.py @@ -0,0 +1,29 @@ +"""High-Resolution Representations for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +class HRNet(nn.Module): + """HRNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Ke Sun. "High-Resolution Representations for Labeling Pixels and Regions." + arXiv preprint arXiv:1904.04514 (2019). + """ + def __init__(self, nclass, backbone='', aux=False, pretrained_base=False, **kwargs): + super(HRNet, self).__init__() + + def forward(self, x): + pass \ No newline at end of file diff --git a/segutils/core/models/icnet.py b/segutils/core/models/icnet.py new file mode 100644 index 0000000..fed14a4 --- /dev/null +++ b/segutils/core/models/icnet.py @@ -0,0 +1,180 @@ +"""Image Cascade Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['ICNet', 'get_icnet', 'get_icnet_resnet50_citys', + 'get_icnet_resnet101_citys', 'get_icnet_resnet152_citys'] + + +class ICNet(SegBaseModel): + """Image Cascade Network""" + + def __init__(self, nclass, backbone='resnet50', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(ICNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.conv_sub1 = nn.Sequential( + _ConvBNReLU(3, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 64, 3, 2, **kwargs) + ) + + self.ppm = PyramidPoolingModule() + + self.head = _ICHead(nclass, **kwargs) + + self.__setattr__('exclusive', ['conv_sub1', 'head']) + + def forward(self, x): + # sub 1 + x_sub1 = self.conv_sub1(x) + + # sub 2 + x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=True) + _, x_sub2, _, _ = self.base_forward(x_sub2) + + # sub 4 + x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear', align_corners=True) + _, _, _, x_sub4 = self.base_forward(x_sub4) + # add PyramidPoolingModule + x_sub4 = self.ppm(x_sub4) + outputs = self.head(x_sub1, x_sub2, x_sub4) + + return tuple(outputs) + +class PyramidPoolingModule(nn.Module): + def __init__(self, pyramids=[1,2,3,6]): + super(PyramidPoolingModule, self).__init__() + self.pyramids = pyramids + + def forward(self, input): + feat = input + height, width = input.shape[2:] + for bin_size in self.pyramids: + x = F.adaptive_avg_pool2d(input, output_size=bin_size) + x = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True) + feat = feat + x + return feat + +class _ICHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ICHead, self).__init__() + #self.cff_12 = CascadeFeatureFusion(512, 64, 128, nclass, norm_layer, **kwargs) + self.cff_12 = CascadeFeatureFusion(128, 64, 128, nclass, norm_layer, **kwargs) + self.cff_24 = CascadeFeatureFusion(2048, 512, 128, nclass, norm_layer, **kwargs) + + self.conv_cls = nn.Conv2d(128, nclass, 1, bias=False) + + def forward(self, x_sub1, x_sub2, x_sub4): + outputs = list() + x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2) + outputs.append(x_24_cls) + #x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) + x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1) + outputs.append(x_12_cls) + + up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear', align_corners=True) + up_x2 = self.conv_cls(up_x2) + outputs.append(up_x2) + up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(up_x8) + # 1 -> 1/4 -> 1/8 -> 1/16 + outputs.reverse() + + return outputs + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, + groups=1, norm_layer=nn.BatchNorm2d, bias=False, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU(True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class CascadeFeatureFusion(nn.Module): + """CFF Unit""" + + def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(CascadeFeatureFusion, self).__init__() + self.conv_low = nn.Sequential( + nn.Conv2d(low_channels, out_channels, 3, padding=2, dilation=2, bias=False), + norm_layer(out_channels) + ) + self.conv_high = nn.Sequential( + nn.Conv2d(high_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.conv_low_cls = nn.Conv2d(out_channels, nclass, 1, bias=False) + + def forward(self, x_low, x_high): + x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) + x_low = self.conv_low(x_low) + x_high = self.conv_high(x_high) + x = x_low + x_high + x = F.relu(x, inplace=True) + x_low_cls = self.conv_low_cls(x_low) + + return x, x_low_cls + + +def get_icnet(dataset='citys', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = ICNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('icnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_icnet_resnet50_citys(**kwargs): + return get_icnet('citys', 'resnet50', **kwargs) + + +def get_icnet_resnet101_citys(**kwargs): + return get_icnet('citys', 'resnet101', **kwargs) + + +def get_icnet_resnet152_citys(**kwargs): + return get_icnet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(1, 3, 256, 256) + # model = get_icnet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = ICNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + #print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/lednet.py b/segutils/core/models/lednet.py new file mode 100644 index 0000000..03c05bd --- /dev/null +++ b/segutils/core/models/lednet.py @@ -0,0 +1,211 @@ +"""LEDNet: A Lightweight Encoder-Decoder Network for Real-time Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU + +__all__ = ['LEDNet', 'get_lednet', 'get_lednet_citys'] + +class LEDNet(nn.Module): + r"""LEDNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Yu Wang, et al. "LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation." + arXiv preprint arXiv:1905.02423 (2019). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(LEDNet, self).__init__() + self.encoder = nn.Sequential( + Downsampling(3, 32), + SSnbt(32, **kwargs), SSnbt(32, **kwargs), SSnbt(32, **kwargs), + Downsampling(32, 64), + SSnbt(64, **kwargs), SSnbt(64, **kwargs), + Downsampling(64, 128), + SSnbt(128, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 17, **kwargs), + ) + self.decoder = APNModule(128, nclass) + + self.__setattr__('exclusive', ['encoder', 'decoder']) + + def forward(self, x): + size = x.size()[2:] + x = self.encoder(x) + x = self.decoder(x) + outputs = list() + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + #return tuple(outputs) + return outputs[0] + +class Downsampling(nn.Module): + def __init__(self, in_channels, out_channels, **kwargs): + super(Downsampling, self).__init__() + self.conv1 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.conv2 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.pool = nn.MaxPool2d(kernel_size=2, stride=1) + + def forward(self, x): + x1 = self.conv1(x) + x1 = self.pool(x1) + + x2 = self.conv2(x) + x2 = self.pool(x2) + + return torch.cat([x1, x2], dim=1) + + +class SSnbt(nn.Module): + def __init__(self, in_channels, dilation=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(SSnbt, self).__init__() + inter_channels = in_channels // 2 + self.branch1 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.branch2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.relu = nn.ReLU(True) + + @staticmethod + def channel_shuffle(x, groups): + n, c, h, w = x.size() + + channels_per_group = c // groups + x = x.view(n, groups, channels_per_group, h, w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(n, -1, h, w) + + return x + + def forward(self, x): + # channels split + x1, x2 = x.split(x.size(1) // 2, 1) + + x1 = self.branch1(x1) + x2 = self.branch2(x2) + + out = torch.cat([x1, x2], dim=1) + out = self.relu(out + x) + out = self.channel_shuffle(out, groups=2) + + return out + + +class APNModule(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(APNModule, self).__init__() + self.conv1 = _ConvBNReLU(in_channels, in_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv2 = _ConvBNReLU(in_channels, in_channels, 5, 2, 2, norm_layer=norm_layer) + self.conv3 = _ConvBNReLU(in_channels, in_channels, 7, 2, 3, norm_layer=norm_layer) + self.level1 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level2 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level3 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level4 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level5 = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(in_channels, nclass, 1)) + + def forward(self, x): + w, h = x.size()[2:] + branch3 = self.conv1(x) + branch2 = self.conv2(branch3) + branch1 = self.conv3(branch2) + + out = self.level1(branch1) + out = F.interpolate(out, ((w + 3) // 4, (h + 3) // 4), mode='bilinear', align_corners=True) + out = self.level2(branch2) + out + out = F.interpolate(out, ((w + 1) // 2, (h + 1) // 2), mode='bilinear', align_corners=True) + out = self.level3(branch3) + out + out = F.interpolate(out, (w, h), mode='bilinear', align_corners=True) + out = self.level4(x) * out + out = self.level5(x) + out + return out + + +def get_lednet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = LEDNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('lednet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_lednet_citys(**kwargs): + return get_lednet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_lednet_citys() + input = torch.rand(2, 3, 224, 224) + model =LEDNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/model_store.py b/segutils/core/models/model_store.py new file mode 100644 index 0000000..9e64675 --- /dev/null +++ b/segutils/core/models/model_store.py @@ -0,0 +1,68 @@ +"""Model store which provides pretrained models.""" +from __future__ import print_function + +import os +import zipfile + +from ..utils.download import download, check_sha1 + +__all__ = ['get_model_file', 'get_resnet_file'] + +_model_sha1 = {name: checksum for checksum, name in [ + ('25c4b50959ef024fcc050213a06b614899f94b3d', 'resnet50'), + ('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'), + ('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'), +]} + +encoding_repo_url = 'https://hangzh.s3.amazonaws.com/' +_url_format = '{repo_url}encoding/models/{file_name}.zip' + + +def short_hash(name): + if name not in _model_sha1: + raise ValueError('Pretrained model for {name} is not available.'.format(name=name)) + return _model_sha1[name][:8] + + +def get_resnet_file(name, root='~/.torch/models'): + file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name)) + root = os.path.expanduser(root) + + file_path = os.path.join(root, file_name + '.pth') + sha1_hash = _model_sha1[name] + if os.path.exists(file_path): + if check_sha1(file_path, sha1_hash): + return file_path + else: + print('Mismatch in the content of model file {} detected.' + + ' Downloading again.'.format(file_path)) + else: + print('Model file {} is not found. Downloading.'.format(file_path)) + + if not os.path.exists(root): + os.makedirs(root) + + zip_file_path = os.path.join(root, file_name + '.zip') + repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url) + if repo_url[-1] != '/': + repo_url = repo_url + '/' + download(_url_format.format(repo_url=repo_url, file_name=file_name), + path=zip_file_path, + overwrite=True) + with zipfile.ZipFile(zip_file_path) as zf: + zf.extractall(root) + os.remove(zip_file_path) + + if check_sha1(file_path, sha1_hash): + return file_path + else: + raise ValueError('Downloaded file has different hash. Please try again.') + + +def get_model_file(name, root='~/.torch/models'): + root = os.path.expanduser(root) + file_path = os.path.join(root, name + '.pth') + if os.path.exists(file_path): + return file_path + else: + raise ValueError('Model file is not found. Downloading or trainning.') diff --git a/segutils/core/models/model_zoo.py b/segutils/core/models/model_zoo.py new file mode 100644 index 0000000..7f8cd11 --- /dev/null +++ b/segutils/core/models/model_zoo.py @@ -0,0 +1,122 @@ +"""Model store which handles pretrained models """ +from .fcn import * +from .fcnv2 import * +from .pspnet import * +from .deeplabv3 import * +from .deeplabv3_plus import * +from .danet import * +from .denseaspp import * +from .bisenet import * +from .encnet import * +from .dunet import * +from .icnet import * +from .enet import * +from .ocnet import * +from .ccnet import * +from .psanet import * +from .cgnet import * +from .espnet import * +from .lednet import * +from .dfanet import * + +__all__ = ['get_model', 'get_model_list', 'get_segmentation_model'] + +_models = { + 'fcn32s_vgg16_voc': get_fcn32s_vgg16_voc, + 'fcn16s_vgg16_voc': get_fcn16s_vgg16_voc, + 'fcn8s_vgg16_voc': get_fcn8s_vgg16_voc, + 'fcn_resnet50_voc': get_fcn_resnet50_voc, + 'fcn_resnet101_voc': get_fcn_resnet101_voc, + 'fcn_resnet152_voc': get_fcn_resnet152_voc, + 'psp_resnet50_voc': get_psp_resnet50_voc, + 'psp_resnet50_ade': get_psp_resnet50_ade, + 'psp_resnet101_voc': get_psp_resnet101_voc, + 'psp_resnet101_ade': get_psp_resnet101_ade, + 'psp_resnet101_citys': get_psp_resnet101_citys, + 'psp_resnet101_coco': get_psp_resnet101_coco, + 'deeplabv3_resnet50_voc': get_deeplabv3_resnet50_voc, + 'deeplabv3_resnet101_voc': get_deeplabv3_resnet101_voc, + 'deeplabv3_resnet152_voc': get_deeplabv3_resnet152_voc, + 'deeplabv3_resnet50_ade': get_deeplabv3_resnet50_ade, + 'deeplabv3_resnet101_ade': get_deeplabv3_resnet101_ade, + 'deeplabv3_resnet152_ade': get_deeplabv3_resnet152_ade, + 'deeplabv3_plus_xception_voc': get_deeplabv3_plus_xception_voc, + 'danet_resnet50_ciyts': get_danet_resnet50_citys, + 'danet_resnet101_citys': get_danet_resnet101_citys, + 'danet_resnet152_citys': get_danet_resnet152_citys, + 'denseaspp_densenet121_citys': get_denseaspp_densenet121_citys, + 'denseaspp_densenet161_citys': get_denseaspp_densenet161_citys, + 'denseaspp_densenet169_citys': get_denseaspp_densenet169_citys, + 'denseaspp_densenet201_citys': get_denseaspp_densenet201_citys, + 'bisenet_resnet18_citys': get_bisenet_resnet18_citys, + 'encnet_resnet50_ade': get_encnet_resnet50_ade, + 'encnet_resnet101_ade': get_encnet_resnet101_ade, + 'encnet_resnet152_ade': get_encnet_resnet152_ade, + 'dunet_resnet50_pascal_voc': get_dunet_resnet50_pascal_voc, + 'dunet_resnet101_pascal_voc': get_dunet_resnet101_pascal_voc, + 'dunet_resnet152_pascal_voc': get_dunet_resnet152_pascal_voc, + 'icnet_resnet50_citys': get_icnet_resnet50_citys, + 'icnet_resnet101_citys': get_icnet_resnet101_citys, + 'icnet_resnet152_citys': get_icnet_resnet152_citys, + 'enet_citys': get_enet_citys, + 'base_ocnet_resnet101_citys': get_base_ocnet_resnet101_citys, + 'pyramid_ocnet_resnet101_citys': get_pyramid_ocnet_resnet101_citys, + 'asp_ocnet_resnet101_citys': get_asp_ocnet_resnet101_citys, + 'ccnet_resnet50_citys': get_ccnet_resnet50_citys, + 'ccnet_resnet101_citys': get_ccnet_resnet101_citys, + 'ccnet_resnet152_citys': get_ccnet_resnet152_citys, + 'ccnet_resnet50_ade': get_ccnet_resnet50_ade, + 'ccnet_resnet101_ade': get_ccnet_resnet101_ade, + 'ccnet_resnet152_ade': get_ccnet_resnet152_ade, + 'psanet_resnet50_voc': get_psanet_resnet50_voc, + 'psanet_resnet101_voc': get_psanet_resnet101_voc, + 'psanet_resnet152_voc': get_psanet_resnet152_voc, + 'psanet_resnet50_citys': get_psanet_resnet50_citys, + 'psanet_resnet101_citys': get_psanet_resnet101_citys, + 'psanet_resnet152_citys': get_psanet_resnet152_citys, + 'cgnet_citys': get_cgnet_citys, + 'espnet_citys': get_espnet_citys, + 'lednet_citys': get_lednet_citys, + 'dfanet_citys': get_dfanet_citys, +} + + +def get_model(name, **kwargs): + name = name.lower() + if name not in _models: + err_str = '"%s" is not among the following model list:\n\t' % (name) + err_str += '%s' % ('\n\t'.join(sorted(_models.keys()))) + raise ValueError(err_str) + net = _models[name](**kwargs) + return net + + +def get_model_list(): + return _models.keys() + + +def get_segmentation_model(model, **kwargs): + models = { + 'fcn32s': get_fcn32s, + 'fcn16s': get_fcn16s, + 'fcn8s': get_fcn8s, + 'fcn': get_fcn, + 'psp': get_psp, + 'deeplabv3': get_deeplabv3, + 'deeplabv3_plus': get_deeplabv3_plus, + 'danet': get_danet, + 'denseaspp': get_denseaspp, + 'bisenet': get_bisenet, + 'encnet': get_encnet, + 'dunet': get_dunet, + 'icnet': get_icnet, + 'enet': get_enet, + 'ocnet': get_ocnet, + 'ccnet': get_ccnet, + 'psanet': get_psanet, + 'cgnet': get_cgnet, + 'espnet': get_espnet, + 'lednet': get_lednet, + 'dfanet': get_dfanet, + } + return models[model](**kwargs) diff --git a/segutils/core/models/ocnet.py b/segutils/core/models/ocnet.py new file mode 100644 index 0000000..1e1e85c --- /dev/null +++ b/segutils/core/models/ocnet.py @@ -0,0 +1,361 @@ +""" Object Context Network for Scene Parsing""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['OCNet', 'get_ocnet', 'get_base_ocnet_resnet101_citys', + 'get_pyramid_ocnet_resnet101_citys', 'get_asp_ocnet_resnet101_citys'] + + +class OCNet(SegBaseModel): + r"""OCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Yuhui Yuan, Jingdong Wang. "OCNet: Object Context Network for Scene Parsing." + arXiv preprint arXiv:1809.00916 (2018). + """ + + def __init__(self, nclass, backbone='resnet101', oc_arch='base', aux=False, pretrained_base=True, **kwargs): + super(OCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _OCHead(nclass, oc_arch, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _OCHead(nn.Module): + def __init__(self, nclass, oc_arch, norm_layer=nn.BatchNorm2d, **kwargs): + super(_OCHead, self).__init__() + if oc_arch == 'base': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + BaseOCModule(512, 512, 256, 256, scales=([1]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'pyramid': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + PyramidOCModule(512, 512, 256, 512, scales=([1, 2, 3, 6]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'asp': + self.context = ASPOCModule(2048, 512, 256, 512, norm_layer=norm_layer, **kwargs) + else: + raise ValueError("Unknown OC architecture!") + + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.context(x) + return self.out(x) + + +class BaseAttentionBlock(nn.Module): + """The basic implementation for self-attention block/non-local block.""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(BaseAttentionBlock, self).__init__() + self.scale = scale + self.key_channels = key_channels + self.value_channels = value_channels + if scale > 1: + self.pool = nn.MaxPool2d(scale) + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + if self.scale > 1: + x = self.pool(x) + + value = self.f_value(x).view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query = self.f_query(x).view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key = self.f_key(x).view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query, key) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.bmm(sim_map, value).permute(0, 2, 1).contiguous() + context = context.view(batch_size, self.value_channels, *x.size()[2:]) + context = self.W(context) + if self.scale > 1: + context = F.interpolate(context, size=(w, h), mode='bilinear', align_corners=True) + + return context + + +class BaseOCModule(nn.Module): + """Base-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, concat=True, **kwargs): + super(BaseOCModule, self).__init__() + self.stages = nn.ModuleList([ + BaseAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + in_channels = in_channels * 2 if concat else in_channels + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + self.concat = concat + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = priors[0] + for i in range(1, len(priors)): + context += priors[i] + if self.concat: + context = torch.cat([context, x], 1) + out = self.project(context) + return out + + +class PyramidAttentionBlock(nn.Module): + """The basic implementation for pyramid self-attention block/non-local block""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidAttentionBlock, self).__init__() + self.scale = scale + self.value_channels = value_channels + self.key_channels = key_channels + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + + local_x = list() + local_y = list() + step_w, step_h = w // self.scale, h // self.scale + for i in range(self.scale): + for j in range(self.scale): + start_x, start_y = step_w * i, step_h * j + end_x, end_y = min(start_x + step_w, w), min(start_y + step_h, h) + if i == (self.scale - 1): + end_x = w + if j == (self.scale - 1): + end_y = h + local_x += [start_x, end_x] + local_y += [start_y, end_y] + + value = self.f_value(x) + query = self.f_query(x) + key = self.f_key(x) + + local_list = list() + local_block_cnt = (self.scale ** 2) * 2 + for i in range(0, local_block_cnt, 2): + value_local = value[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + query_local = query[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + key_local = key[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + + w_local, h_local = value_local.size(2), value_local.size(3) + value_local = value_local.contiguous().view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query_local = query_local.contiguous().view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key_local = key_local.contiguous().view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query_local, key_local) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context_local = torch.bmm(sim_map, value_local).permute(0, 2, 1).contiguous() + context_local = context_local.view(batch_size, self.value_channels, w_local, h_local) + local_list.append(context_local) + + context_list = list() + for i in range(0, self.scale): + row_tmp = list() + for j in range(self.scale): + row_tmp.append(local_list[j + i * self.scale]) + context_list.append(torch.cat(row_tmp, 3)) + + context = torch.cat(context_list, 2) + context = self.W(context) + + return context + + +class PyramidOCModule(nn.Module): + """Pyramid-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidOCModule, self).__init__() + self.stages = nn.ModuleList([ + PyramidAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + self.up_dr = nn.Sequential( + nn.Conv2d(in_channels, in_channels * len(scales), 1), + norm_layer(in_channels * len(scales)), + nn.ReLU(True) + ) + self.project = nn.Sequential( + nn.Conv2d(in_channels * len(scales) * 2, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = [self.up_dr(x)] + for i in range(len(priors)): + context += [priors[i]] + context = torch.cat(context, 1) + out = self.project(context) + return out + + +class ASPOCModule(nn.Module): + """ASP-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + atrous_rates=(12, 24, 36), norm_layer=nn.BatchNorm2d, **kwargs): + super(ASPOCModule, self).__init__() + self.context = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + norm_layer(out_channels), + nn.ReLU(True), + BaseOCModule(out_channels, out_channels, key_channels, value_channels, ([2]), norm_layer, False, **kwargs)) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate1, dilation=rate1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b2 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate2, dilation=rate2, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b3 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate3, dilation=rate3, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b4 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + + self.project = nn.Sequential( + nn.Conv2d(out_channels * 5, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + feat1 = self.context(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + out = self.project(out) + return out + + +def get_ocnet(dataset='citys', backbone='resnet50', oc_arch='base', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = OCNet(datasets[dataset].NUM_CLASS, backbone=backbone, oc_arch=oc_arch, + pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('%s_ocnet_%s_%s' % ( + oc_arch, backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_base_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'base', **kwargs) + + +def get_pyramid_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'pyramid', **kwargs) + + +def get_asp_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'asp', **kwargs) + + +if __name__ == '__main__': + #img = torch.randn(1, 3, 256, 256) + #model = get_asp_ocnet_resnet101_citys() + # outputs = model(img) + input = torch.rand(1, 3, 224,224) + model=OCNet(4,pretrained_base=False) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss,loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) \ No newline at end of file diff --git a/segutils/core/models/psanet.py b/segutils/core/models/psanet.py new file mode 100644 index 0000000..82361f3 --- /dev/null +++ b/segutils/core/models/psanet.py @@ -0,0 +1,163 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + # psa_out_channels = crop_size // 8 ** 2 + self.psa = _PointwiseSpatialAttention(2048, 3600, norm_layer) + + self.conv_post = _ConvBNReLU(1024, 2048, 1, norm_layer=norm_layer) + self.project = nn.Sequential( + _ConvBNReLU(4096, 512, 3, padding=1, norm_layer=norm_layer), + nn.Dropout2d(0.1, False), + nn.Conv2d(512, nclass, 1)) + + def forward(self, x): + global_feature = self.psa(x) + out = self.conv_post(global_feature) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _PointwiseSpatialAttention(nn.Module):# + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PointwiseSpatialAttention, self).__init__() + reduced_channels = 512 + self.collect_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + self.distribute_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + + def forward(self, x): + collect_fm = self.collect_attention(x) + distribute_fm = self.distribute_attention(x) + psa_fm = torch.cat([collect_fm, distribute_fm], dim=1) + return psa_fm + + +class _AttentionGeneration(nn.Module):#-->Z:(n,C2,H,W),不是原文over-completed的做法。 + def __init__(self, in_channels, reduced_channels, out_channels, norm_layer, **kwargs): + super(_AttentionGeneration, self).__init__() + self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer) + self.attention = nn.Sequential( + _ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer), + nn.Conv2d(reduced_channels, out_channels, 1, bias=False)) + + self.reduced_channels = reduced_channels + + def forward(self, x): + reduce_x = self.conv_reduce(x) + attention = self.attention(reduce_x) + n, c, h, w = attention.size()#c=out_channels=3600, + attention = attention.view(n, c, -1)#(n,3600,H*W) + reduce_x = reduce_x.view(n, self.reduced_channels, -1)#(n,512,H*W) + print(reduce_x.shape,attention.shape) + fm = torch.bmm(reduce_x, torch.softmax(attention, dim=1)) + fm = fm.view(n, self.reduced_channels, h, w)#(n,512,60,60) + + return fm + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = PSANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/psanet_offical.py b/segutils/core/models/psanet_offical.py new file mode 100644 index 0000000..54531a3 --- /dev/null +++ b/segutils/core/models/psanet_offical.py @@ -0,0 +1,255 @@ +import torch +from torch import nn +import torch.nn.functional as F +import core.lib.psa.functional as PF +import modeling.backbone.resnet_real as models + +#运行失败,compact可以运行,但over-completed运行不了。也是跟psamask的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,导入不了_C模块出错) +# +# from . import functions +# +# +# def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): +# return functions.psa_mask(input, psa_type, mask_H_, mask_W_) +# +# +# import torch +# from torch.autograd import Function +# from .. import src + + +# class PSAMask(Function): +# @staticmethod +# def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): +# assert psa_type in [0, 1] # 0-col, 1-dis +# assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) +# num_, channels_, feature_H_, feature_W_ = input.size() +# if mask_H_ is None and mask_W_ is None: +# mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 +# assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) +# assert channels_ == mask_H_ * mask_W_ +# half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 +# output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) +# if not input.is_cuda: +# src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# output = output.cuda() +# src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ +# ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ +# return output +# +# @staticmethod +# def backward(ctx, grad_output): +# psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ +# mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ +# grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) +# if not grad_output.is_cuda: +# src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# return grad_input, None, None, None + + +# psa_mask = PSAMask.apply + + +class PSA(nn.Module): + def __init__(self, in_channels=2048, mid_channels=512, psa_type=2, compact=False, shrink_factor=2, mask_h=59, + mask_w=59, normalization_factor=1.0, psa_softmax=True): + super(PSA, self).__init__() + assert psa_type in [0, 1, 2] + self.psa_type = psa_type + self.compact = compact + self.shrink_factor = shrink_factor + self.mask_h = mask_h + self.mask_w = mask_w + self.psa_softmax = psa_softmax + if normalization_factor is None: + normalization_factor = mask_h * mask_w + self.normalization_factor = normalization_factor + + self.reduce = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + if psa_type == 2: + self.reduce_p = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention_p = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + self.proj = nn.Sequential( + nn.Conv2d(mid_channels * (2 if psa_type == 2 else 1), in_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + out = x + if self.psa_type in [0, 1]: + x = self.reduce(x) + n, c, h, w = x.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1#可以理解为这样做的目的是向上取整。 + w = (w - 1) // self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + y = self.attention(x) + if self.compact: + if self.psa_type == 1: + y = y.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y = PF.psa_mask(y, self.psa_type, self.mask_h, self.mask_w) + if self.psa_softmax: + y = F.softmax(y, dim=1) + x = torch.bmm(x.view(n, c, h * w), y.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + elif self.psa_type == 2: + x_col = self.reduce(x) + x_dis = self.reduce_p(x) + n, c, h, w = x_col.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + x_col = F.interpolate(x_col, size=(h, w), mode='bilinear', align_corners=True) + x_dis = F.interpolate(x_dis, size=(h, w), mode='bilinear', align_corners=True) + y_col = self.attention(x_col) + y_dis = self.attention_p(x_dis) + if self.compact: + y_dis = y_dis.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y_col = PF.psa_mask(y_col, 0, self.mask_h, self.mask_w) + y_dis = PF.psa_mask(y_dis, 1, self.mask_h, self.mask_w) + if self.psa_softmax: + y_col = F.softmax(y_col, dim=1) + y_dis = F.softmax(y_dis, dim=1) + x_col = torch.bmm(x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x_dis = torch.bmm(x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x = torch.cat([x_col, x_dis], 1) + x = self.proj(x) + if self.shrink_factor != 1: + h = (h - 1) * self.shrink_factor + 1 + w = (w - 1) * self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + return torch.cat((out, x), 1) + + +class PSANet(nn.Module): + def __init__(self, layers=50, dropout=0.1, classes=2, zoom_factor=8, use_psa=True, psa_type=2, compact=False, + shrink_factor=2, mask_h=59, mask_w=59, normalization_factor=1.0, psa_softmax=True, + criterion=nn.CrossEntropyLoss(ignore_index=255), pretrained=True): + super(PSANet, self).__init__() + assert layers in [50, 101, 152] + assert classes > 1 + assert zoom_factor in [1, 2, 4, 8] + assert psa_type in [0, 1, 2] + self.zoom_factor = zoom_factor + self.use_psa = use_psa + self.criterion = criterion + + if layers == 50: + resnet = models.resnet50(pretrained=pretrained,deep_base=True) + elif layers == 101: + resnet = models.resnet101(pretrained=pretrained) + else: + resnet = models.resnet152(pretrained=pretrained) + self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) + self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 + + for n, m in self.layer3.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + for n, m in self.layer4.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + + fea_dim = 2048 + if use_psa: + self.psa = PSA(fea_dim, 512, psa_type, compact, shrink_factor, mask_h, mask_w, normalization_factor, psa_softmax) + fea_dim *= 2 + self.cls = nn.Sequential( + nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(512), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(512, classes, kernel_size=1) + ) + if self.training: + self.aux = nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(256, classes, kernel_size=1) + ) + + def forward(self, x, y=None): + x_size = x.size() + assert (x_size[2] - 1) % 8 == 0 and (x_size[3] - 1) % 8 == 0 + h = int((x_size[2] - 1) / 8 * self.zoom_factor + 1) + w = int((x_size[3] - 1) / 8 * self.zoom_factor + 1) + + x = self.layer0(x) + x = self.layer1(x) + x = self.layer2(x) + x_tmp = self.layer3(x) + x = self.layer4(x_tmp) + if self.use_psa: + x = self.psa(x) + x = self.cls(x) + if self.zoom_factor != 1: + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + + if self.training: + aux = self.aux(x_tmp) + if self.zoom_factor != 1: + aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True) + main_loss = self.criterion(x, y) + aux_loss = self.criterion(aux, y) + return x.max(1)[1], main_loss, aux_loss + else: + return x + + +if __name__ == '__main__': + import os + os.environ["CUDA_VISIBLE_DEVICES"] = '0' + crop_h = crop_w = 465 + input = torch.rand(4, 3, crop_h, crop_w).cuda() + compact = False + mask_h, mask_w = None, None + shrink_factor = 2 + if compact: + mask_h = (crop_h - 1) // (8 * shrink_factor) + 1 + mask_w = (crop_w - 1) // (8 * shrink_factor) + 1 + else: + assert (mask_h is None and mask_w is None) or (mask_h is not None and mask_w is not None) + if mask_h is None and mask_w is None: + mask_h = 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1 + mask_w = 2 * ((crop_w - 1) // (8 * shrink_factor) + 1) - 1 + else: + assert (mask_h % 2 == 1) and (mask_h >= 3) and (mask_h <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + assert (mask_w % 2 == 1) and (mask_w >= 3) and (mask_w <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + + model = PSANet(layers=50, dropout=0.1, classes=21, zoom_factor=8, use_psa=True, psa_type=2, compact=compact, + shrink_factor=shrink_factor, mask_h=mask_h, mask_w=mask_w, psa_softmax=True, pretrained=False).cuda() + print(model) + model.eval() + output = model(input) + print('PSANet', output.size()) diff --git a/segutils/core/models/psanet_old.py b/segutils/core/models/psanet_old.py new file mode 100644 index 0000000..71a6db7 --- /dev/null +++ b/segutils/core/models/psanet_old.py @@ -0,0 +1,208 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CollectAttention, DistributeAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + + +#运行失败,name '_C' is not defined。也是跟psa_block模块的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,找不到文件出错) + + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet', aux=False, pretrained_base=False, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + self.collect = _CollectModule(2048, 512, 60, 60, norm_layer, **kwargs) + self.distribute = _DistributeModule(2048, 512, 60, 60, norm_layer, **kwargs) + + self.conv_post = nn.Sequential( + nn.Conv2d(1024, 2048, 1, bias=False), + norm_layer(2048), + nn.ReLU(True)) + self.project = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + global_feature_collect = self.collect(x) + global_feature_distribute = self.distribute(x) + + global_feature = torch.cat([global_feature_collect, global_feature_distribute], dim=1) + out = self.conv_post(global_feature) + out = F.interpolate(out, scale_factor=2, mode='bilinear', align_corners=True) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _CollectModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_CollectModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.collect_attention = CollectAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + # shrink + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + ca = self.collect_attention(x_adaption) + global_feature_collect_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + ca_i = ca[i].view(ca.shape[1], -1) + global_feature_collect_list.append( + torch.mm(x_shrink_i, ca_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_collect = torch.cat(global_feature_collect_list) + + return global_feature_collect + + +class _DistributeModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_DistributeModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.distribute_attention = DistributeAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + da = self.distribute_attention(x_adaption) + global_feature_distribute_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + da_i = da[i].view(da.shape[1], -1) + global_feature_distribute_list.append( + torch.mm(x_shrink_i, da_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_distribute = torch.cat(global_feature_distribute_list) + + return global_feature_distribute + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + # from ..data.dataloader import datasets + model = PSANet(4, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + # if pretrained: + # from .model_store import get_model_file + # device = torch.device(kwargs['local_rank']) + # model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + # map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/segutils/core/models/pspnet.py b/segutils/core/models/pspnet.py new file mode 100644 index 0000000..6960e57 --- /dev/null +++ b/segutils/core/models/pspnet.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSPNet', 'get_psp', 'get_psp_resnet50_voc', 'get_psp_resnet50_ade', 'get_psp_resnet101_voc', + 'get_psp_resnet101_ade', 'get_psp_resnet101_citys', 'get_psp_resnet101_coco'] + + +class PSPNet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zhao, Hengshuang, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. + "Pyramid scene parsing network." *CVPR*, 2017 + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSPNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSPHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +def _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + +class _PyramidPooling(nn.Module): + def __init__(self, in_channels, **kwargs): + super(_PyramidPooling, self).__init__() + out_channels = int(in_channels / 4) + self.avgpool1 = nn.AdaptiveAvgPool2d(1) + self.avgpool2 = nn.AdaptiveAvgPool2d(2) + self.avgpool3 = nn.AdaptiveAvgPool2d(3) + self.avgpool4 = nn.AdaptiveAvgPool2d(6) + self.conv1 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv2 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv3 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv4 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feat1 = F.interpolate(self.conv1(self.avgpool1(x)), size, mode='bilinear', align_corners=True) + feat2 = F.interpolate(self.conv2(self.avgpool2(x)), size, mode='bilinear', align_corners=True) + feat3 = F.interpolate(self.conv3(self.avgpool3(x)), size, mode='bilinear', align_corners=True) + feat4 = F.interpolate(self.conv4(self.avgpool4(x)), size, mode='bilinear', align_corners=True) + return torch.cat([x, feat1, feat2, feat3, feat4], dim=1) + + +class _PSPHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_PSPHead, self).__init__() + self.psp = _PyramidPooling(2048, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + self.block = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + x = self.psp(x) + return self.block(x) + + +def get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('psp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psp_resnet50_voc(**kwargs): + return get_psp('pascal_voc', 'resnet50', **kwargs) + + +def get_psp_resnet50_ade(**kwargs): + return get_psp('ade20k', 'resnet50', **kwargs) + + +def get_psp_resnet101_voc(**kwargs): + return get_psp('pascal_voc', 'resnet101', **kwargs) + + +def get_psp_resnet101_ade(**kwargs): + return get_psp('ade20k', 'resnet101', **kwargs) + + +def get_psp_resnet101_citys(**kwargs): + return get_psp('citys', 'resnet101', **kwargs) + + +def get_psp_resnet101_coco(**kwargs): + return get_psp('coco', 'resnet101', **kwargs) + + +if __name__ == '__main__': + # model = get_psp_resnet50_voc() + # img = torch.randn(4, 3, 480, 480) + # output = model(img) + input = torch.rand(2, 3, 512, 512) + model = PSPNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/segutils/core/models/segbase.py b/segutils/core/models/segbase.py new file mode 100644 index 0000000..dd06266 --- /dev/null +++ b/segutils/core/models/segbase.py @@ -0,0 +1,60 @@ +"""Base Model for Semantic Segmentation""" +import torch.nn as nn + +from ..nn import JPU +from .base_models.resnetv1b import resnet50_v1s, resnet101_v1s, resnet152_v1s + +__all__ = ['SegBaseModel'] + + +class SegBaseModel(nn.Module): + r"""Base Model for Semantic Segmentation + + Parameters + ---------- + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + """ + + def __init__(self, nclass, aux, backbone='resnet50', jpu=False, pretrained_base=True, **kwargs): + super(SegBaseModel, self).__init__() + dilated = False if jpu else True + self.aux = aux + self.nclass = nclass + if backbone == 'resnet50': + self.pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet101': + self.pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet152': + self.pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + + self.jpu = JPU([512, 1024, 2048], width=512, **kwargs) if jpu else None + + def base_forward(self, x): + """forwarding pre-trained network""" + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + x = self.pretrained.maxpool(x) + c1 = self.pretrained.layer1(x) + c2 = self.pretrained.layer2(c1) + c3 = self.pretrained.layer3(c2) + c4 = self.pretrained.layer4(c3) + + if self.jpu: + return self.jpu(c1, c2, c3, c4) + else: + return c1, c2, c3, c4 #返回的是layer1,2,3,4的输出 + + def evaluate(self, x): + """evaluating network with inputs and targets""" + return self.forward(x)[0] + + def demo(self, x): + pred = self.forward(x) + if self.aux: + pred = pred[0] + return pred diff --git a/segutils/core/nn.zip b/segutils/core/nn.zip new file mode 100644 index 0000000..eea3167 Binary files /dev/null and b/segutils/core/nn.zip differ diff --git a/segutils/core/nn/__init__.py b/segutils/core/nn/__init__.py new file mode 100644 index 0000000..218bee9 --- /dev/null +++ b/segutils/core/nn/__init__.py @@ -0,0 +1,7 @@ +"""Seg NN Modules""" +#from .sync_bn.syncbn import * +#from .syncbn import * +from .ca_block import * +from .psa_block import * +from .jpu import * +from .basic import * \ No newline at end of file diff --git a/segutils/core/nn/__pycache__/__init__.cpython-36.pyc b/segutils/core/nn/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..1a7ec4d Binary files /dev/null and b/segutils/core/nn/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/nn/__pycache__/__init__.cpython-38.pyc b/segutils/core/nn/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..742e10a Binary files /dev/null and b/segutils/core/nn/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/nn/__pycache__/basic.cpython-36.pyc b/segutils/core/nn/__pycache__/basic.cpython-36.pyc new file mode 100644 index 0000000..16fd916 Binary files /dev/null and b/segutils/core/nn/__pycache__/basic.cpython-36.pyc differ diff --git a/segutils/core/nn/__pycache__/basic.cpython-38.pyc b/segutils/core/nn/__pycache__/basic.cpython-38.pyc new file mode 100644 index 0000000..e109f42 Binary files /dev/null and b/segutils/core/nn/__pycache__/basic.cpython-38.pyc differ diff --git a/segutils/core/nn/__pycache__/ca_block.cpython-36.pyc b/segutils/core/nn/__pycache__/ca_block.cpython-36.pyc new file mode 100644 index 0000000..73d0d24 Binary files /dev/null and b/segutils/core/nn/__pycache__/ca_block.cpython-36.pyc differ diff --git a/segutils/core/nn/__pycache__/ca_block.cpython-38.pyc b/segutils/core/nn/__pycache__/ca_block.cpython-38.pyc new file mode 100644 index 0000000..7fda675 Binary files /dev/null and b/segutils/core/nn/__pycache__/ca_block.cpython-38.pyc differ diff --git a/segutils/core/nn/__pycache__/jpu.cpython-36.pyc b/segutils/core/nn/__pycache__/jpu.cpython-36.pyc new file mode 100644 index 0000000..c2795d4 Binary files /dev/null and b/segutils/core/nn/__pycache__/jpu.cpython-36.pyc differ diff --git a/segutils/core/nn/__pycache__/jpu.cpython-38.pyc b/segutils/core/nn/__pycache__/jpu.cpython-38.pyc new file mode 100644 index 0000000..cfc7e47 Binary files /dev/null and b/segutils/core/nn/__pycache__/jpu.cpython-38.pyc differ diff --git a/segutils/core/nn/__pycache__/psa_block.cpython-36.pyc b/segutils/core/nn/__pycache__/psa_block.cpython-36.pyc new file mode 100644 index 0000000..27c7aa8 Binary files /dev/null and b/segutils/core/nn/__pycache__/psa_block.cpython-36.pyc differ diff --git a/segutils/core/nn/__pycache__/psa_block.cpython-38.pyc b/segutils/core/nn/__pycache__/psa_block.cpython-38.pyc new file mode 100644 index 0000000..907baa4 Binary files /dev/null and b/segutils/core/nn/__pycache__/psa_block.cpython-38.pyc differ diff --git a/segutils/core/nn/basic.py b/segutils/core/nn/basic.py new file mode 100644 index 0000000..3b5a186 --- /dev/null +++ b/segutils/core/nn/basic.py @@ -0,0 +1,134 @@ +"""Basic Module for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['_ConvBNPReLU', '_ConvBN', '_BNPReLU', '_ConvBNReLU', '_DepthwiseConv', 'InvertedResidual'] + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU6(False) if relu6 else nn.ReLU(False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class _ConvBNPReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNPReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.prelu(x) + return x + + +class _ConvBN(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBN, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class _BNPReLU(nn.Module): + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BNPReLU, self).__init__() + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.bn(x) + x = self.prelu(x) + return x + + +# ----------------------------------------------------------------- +# For PSPNet +# ----------------------------------------------------------------- +class _PSPModule(nn.Module): + def __init__(self, in_channels, sizes=(1, 2, 3, 6), **kwargs): + super(_PSPModule, self).__init__() + out_channels = int(in_channels / 4) + self.avgpools = nn.ModuleList() + self.convs = nn.ModuleList() + for size in sizes: + self.avgpool.append(nn.AdaptiveAvgPool2d(size)) + self.convs.append(_ConvBNReLU(in_channels, out_channels, 1, **kwargs)) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for (avgpool, conv) in enumerate(zip(self.avgpools, self.convs)): + feats.append(F.interpolate(conv(avgpool(x)), size, mode='bilinear', align_corners=True)) + return torch.cat(feats, dim=1) + + +# ----------------------------------------------------------------- +# For MobileNet +# ----------------------------------------------------------------- +class _DepthwiseConv(nn.Module): + """conv_dw in MobileNet""" + + def __init__(self, in_channels, out_channels, stride, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DepthwiseConv, self).__init__() + self.conv = nn.Sequential( + _ConvBNReLU(in_channels, in_channels, 3, stride, 1, groups=in_channels, norm_layer=norm_layer), + _ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer)) + + def forward(self, x): + return self.conv(x) + + +# ----------------------------------------------------------------- +# For MobileNetV2 +# ----------------------------------------------------------------- +class InvertedResidual(nn.Module): + def __init__(self, in_channels, out_channels, stride, expand_ratio, norm_layer=nn.BatchNorm2d, **kwargs): + super(InvertedResidual, self).__init__() + assert stride in [1, 2] + self.use_res_connect = stride == 1 and in_channels == out_channels + + layers = list() + inter_channels = int(round(in_channels * expand_ratio)) + if expand_ratio != 1: + # pw + layers.append(_ConvBNReLU(in_channels, inter_channels, 1, relu6=True, norm_layer=norm_layer)) + layers.extend([ + # dw + _ConvBNReLU(inter_channels, inter_channels, 3, stride, 1, + groups=inter_channels, relu6=True, norm_layer=norm_layer), + # pw-linear + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels)]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +if __name__ == '__main__': + x = torch.randn(1, 32, 64, 64) + model = InvertedResidual(32, 64, 2, 1) + out = model(x) diff --git a/segutils/core/nn/ca_block.py b/segutils/core/nn/ca_block.py new file mode 100644 index 0000000..954c293 --- /dev/null +++ b/segutils/core/nn/ca_block.py @@ -0,0 +1,72 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CrissCrossAttention', 'ca_weight', 'ca_map'] + + +class _CAWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, t, f): + weight = _C.ca_forward(t, f) + + ctx.save_for_backward(t, f) + + return weight + + @staticmethod + @once_differentiable + def backward(ctx, dw): + t, f = ctx.saved_tensors + + dt, df = _C.ca_backward(dw, t, f) + return dt, df + + +class _CAMap(torch.autograd.Function): + @staticmethod + def forward(ctx, weight, g): + out = _C.ca_map_forward(weight, g) + + ctx.save_for_backward(weight, g) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + weight, g = ctx.saved_tensors + + dw, dg = _C.ca_map_backward(dout, weight, g) + + return dw, dg + + +ca_weight = _CAWeight.apply +ca_map = _CAMap.apply + + +class CrissCrossAttention(nn.Module): + """Criss-Cross Attention Module""" + + def __init__(self, in_channels): + super(CrissCrossAttention, self).__init__() + self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.value_conv = nn.Conv2d(in_channels, in_channels, 1) + self.gamma = nn.Parameter(torch.zeros(1)) + + def forward(self, x): + proj_query = self.query_conv(x) + proj_key = self.key_conv(x) + proj_value = self.value_conv(x) + + energy = ca_weight(proj_query, proj_key) + attention = F.softmax(energy, 1) + out = ca_map(attention, proj_value) + out = self.gamma * out + x + + return out diff --git a/segutils/core/nn/csrc/ca.h b/segutils/core/nn/csrc/ca.h new file mode 100644 index 0000000..1a93b36 --- /dev/null +++ b/segutils/core/nn/csrc/ca.h @@ -0,0 +1,58 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor ca_forward(const at::Tensor& t, + const at::Tensor& f) { + if (t.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_forward_cuda(t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_forward_cpu(t, f); +} + +std::tuple ca_backward(const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + if (dw.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_backward_cuda(dw, t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_backward_cpu(dw, t, f); +} + +at::Tensor ca_map_forward(const at::Tensor& weight, + const at::Tensor& g) { + if (weight.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_forward_cuda(weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_forward_cpu(weight, g); +} + +std::tuple ca_map_backward(const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + if (dout.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_backward_cuda(dout, weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_backward_cpu(dout, weight, g); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/ca_cpu.cpp b/segutils/core/nn/csrc/cpu/ca_cpu.cpp new file mode 100644 index 0000000..6029c51 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/ca_cpu.cpp @@ -0,0 +1,24 @@ +#include "cpu/vision.h" + + +at::Tensor ca_forward_cpu( + const torch::Tensor& t, + const torch::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/psa_cpu.cpp b/segutils/core/nn/csrc/cpu/psa_cpu.cpp new file mode 100644 index 0000000..9e0e765 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/psa_cpu.cpp @@ -0,0 +1,13 @@ +#include "cpu/vision.h" + + +at::Tensor psa_forward_cpu( + const torch::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp b/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..70b5db4 --- /dev/null +++ b/segutils/core/nn/csrc/cpu/syncbn_cpu.cpp @@ -0,0 +1,45 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output = (input_ - broadcast_to(ex_, input_)) / broadcast_to(exs_, input_); + output = output * broadcast_to(gamma_, input_) + broadcast_to(beta_, input_); + return output; +} + +// Not implementing CPU backward for now +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input_); + at::Tensor gradgamma = at::zeros_like(gamma_); + at::Tensor gradbeta = at::zeros_like(beta_); + at::Tensor gradMean = at::zeros_like(ex_); + at::Tensor gradStd = at::zeros_like(exs_); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cpu/vision.h b/segutils/core/nn/csrc/cpu/vision.h new file mode 100644 index 0000000..8a824fe --- /dev/null +++ b/segutils/core/nn/csrc/cpu/vision.h @@ -0,0 +1,47 @@ +#pragma once +#include + + +at::Tensor ca_forward_cpu( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cpu( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/ca_cuda.cu b/segutils/core/nn/csrc/cuda/ca_cuda.cu new file mode 100644 index 0000000..ba459fa --- /dev/null +++ b/segutils/core/nn/csrc/cuda/ca_cuda.cu @@ -0,0 +1,324 @@ +#include +#include + +#include +#include +#include + +template +__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _t = t[(batch * chn + plane) * sp + y * width + x]; + + if (z < width) { + int i = z; + T _f = f[(batch * chn + plane) * sp + y * width + i]; + weight[(batch * len + i) * sp + y*width + x] += _t*_f; + } + else { + int i = z - width; + int j = i +__global__ void ca_backward_kernel_t(const T *dw, const T *t, const T *f, T *dt, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + i) * sp + y*width + x]; + T _f = f[(batch * chn + plane) * sp + y*width + i]; + dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i +__global__ void ca_backward_kernel_f(const T *dw, const T *t, const T *f, T *df, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + x) * sp + y*width + i]; + T _t = t[(batch * chn + plane) * sp + y*width + i]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dw = dw[(batch * len + width + j) * sp + i*width + x]; + T _t = t[(batch * chn + plane) * sp + i*width + x]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + } + } +} + +template +__global__ void ca_map_forward_kernel(const T *weight, const T *g, T *out, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _g = g[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + i) * sp + y*width + x]; + out[(batch * chn + plane) * sp + y*width + x] += _g * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + + int j = i +__global__ void ca_map_backward_kernel_w(const T *dout, const T *weight, const T *g, T *dw, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _dout = dout[(batch * chn + plane) * sp + y*width + x]; + + if (z < width) { + int i = z; + T _g = g[(batch * chn + plane) * sp + y*width + i]; + dw[(batch * len + i) * sp + y*width + x] += _dout * _g; + } + else { + int i = z - width; + int j = i +__global__ void ca_map_backward_kernel_g(const T *dout, const T *weight, const T *g, T *dg, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dout = dout[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + x) * sp + y*width + i]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dout = dout[(batch * chn + plane) * sp + i*width + x]; + T _w = weight[(batch * len + width + j) * sp + i*width + x]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + } + } +} + +/* + * Implementations + */ +at::Tensor ca_forward_cuda(const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor weight = at::zeros({n, h + w - 1, h, w}, t.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_forward", [&] { + ca_forward_kernel<<>>( + t.contiguous().data(), + f.contiguous().data(), + weight.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return weight; +} + +std::tuple ca_backward_cuda(const at::Tensor& dw, const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(dw.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor dt = at::zeros_like(t); + at::Tensor df = at::zeros_like(f); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_backward_kernel_t", [&] { + ca_backward_kernel_t<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + dt.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(f.type(), "ca_backward_kernel_f", [&] { + ca_backward_kernel_f<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + df.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dt, df); +} + +at::Tensor ca_map_forward_cuda(const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = g.size(0); + auto c = g.size(1); + auto h = g.size(2); + auto w = g.size(3); + + at::Tensor out = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_forward", [&] { + ca_map_forward_kernel<<>>( + weight.contiguous().data(), + g.contiguous().data(), + out.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return out; +} + +std::tuple ca_map_backward_cuda(const at::Tensor& dout, const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = dout.size(0); + auto c = dout.size(1); + auto h = dout.size(2); + auto w = dout.size(3); + + at::Tensor dw = at::zeros_like(weight); + at::Tensor dg = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(weight.type(), "ca_map_backward_kernel_w", [&] { + ca_map_backward_kernel_w<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dw.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_backward_kernel_g", [&] { + ca_map_backward_kernel_g<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dg.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dw, dg); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/helper.h b/segutils/core/nn/csrc/cuda/helper.h new file mode 100644 index 0000000..cc5ea88 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/helper.h @@ -0,0 +1,334 @@ +#include +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/psa_cuda.cu b/segutils/core/nn/csrc/cuda/psa_cuda.cu new file mode 100644 index 0000000..c47c98a --- /dev/null +++ b/segutils/core/nn/csrc/cuda/psa_cuda.cu @@ -0,0 +1,214 @@ +#include +#include + +#include +#include +#include + +#define PSA_TYPE_COLLECT 1 +#define PSA_TYPE_DISTRIBUTE 2 + +const int CUDA_NUM_THREADS = 512; + +inline int GET_BLOCKS(const int N) { + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__global__ void psa_collect_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_collect_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((h * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)]; + } + } + } +} + +at::Tensor psa_forward_cuda(const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor out = at::zeros({n, h * w, h * w}, hc.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_collect_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_distribute_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return out; +} + +at::Tensor psa_backward_cuda(const at::Tensor& dout, const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor dhc = at::zeros_like(hc); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_collect_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_distribute_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return dhc; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/syncbn_cuda.cu b/segutils/core/nn/csrc/cuda/syncbn_cuda.cu new file mode 100644 index 0000000..dcaed67 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/syncbn_cuda.cu @@ -0,0 +1,488 @@ +#include +// #include +#include +#include +#include + +#include +#include + +#include "helper.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void batchnorm_forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void inp_batchnorm_forward_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void expectation_forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void inp_batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void inp_expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + batchnorm_forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "inp_batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_batchnorm_forward_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + batchnorm_backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + inp_batchnorm_backward_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector expectation_forward_cuda( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_forward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + expectation_forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/cuda/vision.h b/segutils/core/nn/csrc/cuda/vision.h new file mode 100644 index 0000000..6696840 --- /dev/null +++ b/segutils/core/nn/csrc/cuda/vision.h @@ -0,0 +1,84 @@ +#pragma once +#include +#include + + +at::Tensor ca_forward_cuda( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cuda( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cuda( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cuda( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cuda( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cuda( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector expectation_forward_cuda( + const at::Tensor input_); + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/segutils/core/nn/csrc/psa.h b/segutils/core/nn/csrc/psa.h new file mode 100644 index 0000000..1702581 --- /dev/null +++ b/segutils/core/nn/csrc/psa.h @@ -0,0 +1,33 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor psa_forward(const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_forward_cuda(hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_forward_cpu(hc, forward_type); +} + +at::Tensor psa_backward(const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_backward_cuda(dout, hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_backward_cpu(dout, hc, forward_type); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/syncbn.h b/segutils/core/nn/csrc/syncbn.h new file mode 100644 index 0000000..fbcf695 --- /dev/null +++ b/segutils/core/nn/csrc/syncbn.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_forward_cpu(input_, ex_, exs_, gamma_, beta_, eps); +} + +at::Tensor inp_batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_backward_cpu(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); +} + +std::vector inp_batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector expectation_forward(const at::Tensor input_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_forward_cuda(input_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor expectation_backward(const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_backward_cuda(input_, gradEx_, gradExs_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor inp_expectation_backward(const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (output_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_expectation_backward_cuda(gradInput_, output_, gradEx_, gradExs_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/segutils/core/nn/csrc/vision.cpp b/segutils/core/nn/csrc/vision.cpp new file mode 100644 index 0000000..c369176 --- /dev/null +++ b/segutils/core/nn/csrc/vision.cpp @@ -0,0 +1,19 @@ +#include "ca.h" +#include "psa.h" +#include "syncbn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ca_forward", &ca_forward, "ca_forward"); + m.def("ca_backward", &ca_backward, "ca_backward"); + m.def("ca_map_forward", &ca_map_forward, "ca_map_forward"); + m.def("ca_map_backward", &ca_map_backward, "ca_map_backward"); + m.def("psa_forward", &psa_forward, "psa_forward"); + m.def("psa_backward", &psa_backward, "psa_backward"); + m.def("batchnorm_forward", &batchnorm_forward, "batchnorm_forward"); + m.def("inp_batchnorm_forward", &inp_batchnorm_forward, "inp_batchnorm_forward"); + m.def("batchnorm_backward", &batchnorm_backward, "batchnorm_backward"); + m.def("inp_batchnorm_backward", &inp_batchnorm_backward, "inp_batchnorm_backward"); + m.def("expectation_forward", &expectation_forward, "expectation_forward"); + m.def("expectation_backward", &expectation_backward, "expectation_backward"); + m.def("inp_expectation_backward", &inp_expectation_backward, "inp_expectation_backward"); +} \ No newline at end of file diff --git a/segutils/core/nn/jpu.py b/segutils/core/nn/jpu.py new file mode 100644 index 0000000..db23bab --- /dev/null +++ b/segutils/core/nn/jpu.py @@ -0,0 +1,68 @@ +"""Joint Pyramid Upsampling""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['JPU'] + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, + dilation=1, bias=False, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.conv = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.pointwise(x) + return x + + +# copy from: https://github.com/wuhuikai/FastFCN/blob/master/encoding/nn/customize.py +class JPU(nn.Module): + def __init__(self, in_channels, width=512, norm_layer=nn.BatchNorm2d, **kwargs): + super(JPU, self).__init__() + + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv4 = nn.Sequential( + nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv3 = nn.Sequential( + nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + + self.dilation1 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=1, dilation=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation2 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=2, dilation=2, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation3 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=4, dilation=4, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation4 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=8, dilation=8, bias=False), + norm_layer(width), + nn.ReLU(True)) + + def forward(self, *inputs): + feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])] + size = feats[-1].size()[2:] + feats[-2] = F.interpolate(feats[-2], size, mode='bilinear', align_corners=True) + feats[-3] = F.interpolate(feats[-3], size, mode='bilinear', align_corners=True) + feat = torch.cat(feats, dim=1) + feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], + dim=1) + + return inputs[0], inputs[1], inputs[2], feat diff --git a/segutils/core/nn/psa_block.py b/segutils/core/nn/psa_block.py new file mode 100644 index 0000000..c8ff11b --- /dev/null +++ b/segutils/core/nn/psa_block.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CollectAttention', 'DistributeAttention', 'psa_collect', 'psa_distribute'] + + +class _PSACollect(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 1) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 1) + + return dhc + + +class _PSADistribute(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 2) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 2) + + return dhc + + +psa_collect = _PSACollect.apply +psa_distribute = _PSADistribute.apply + + +class CollectAttention(nn.Module): + """Collect Attention Generation Module""" + + def __init__(self): + super(CollectAttention, self).__init__() + + def forward(self, x): + out = psa_collect(x) + return out + + +class DistributeAttention(nn.Module): + """Distribute Attention Generation Module""" + + def __init__(self): + super(DistributeAttention, self).__init__() + + def forward(self, x): + out = psa_distribute(x) + return out diff --git a/segutils/core/nn/setup.py b/segutils/core/nn/setup.py new file mode 100644 index 0000000..ec800c4 --- /dev/null +++ b/segutils/core/nn/setup.py @@ -0,0 +1,56 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# !/usr/bin/env python +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/90c226cf10e098263d1df28bda054a5f22513b4f/setup.py + +import os +import glob +import torch + +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME + +requirements = ["torch"] + + +def get_extension(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "csrc") + + main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) + + sources = main_file + source_cpu + extension = CppExtension + + define_macros = [] + + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + ) + ] + + return ext_modules + + +setup( + name="semantic_segmentation", + version="0.1", + author="tramac", + description="semantic segmentation in pytorch", + ext_modules=get_extension(), + cmdclass={"build_ext": BuildExtension} +) \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/__init__.py b/segutils/core/nn/sync_bn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/__pycache__/__init__.cpython-36.pyc b/segutils/core/nn/sync_bn/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..fd85555 Binary files /dev/null and b/segutils/core/nn/sync_bn/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/nn/sync_bn/__pycache__/functions.cpython-36.pyc b/segutils/core/nn/sync_bn/__pycache__/functions.cpython-36.pyc new file mode 100644 index 0000000..063b818 Binary files /dev/null and b/segutils/core/nn/sync_bn/__pycache__/functions.cpython-36.pyc differ diff --git a/segutils/core/nn/sync_bn/__pycache__/syncbn.cpython-36.pyc b/segutils/core/nn/sync_bn/__pycache__/syncbn.cpython-36.pyc new file mode 100644 index 0000000..f75150f Binary files /dev/null and b/segutils/core/nn/sync_bn/__pycache__/syncbn.cpython-36.pyc differ diff --git a/segutils/core/nn/sync_bn/functions.py b/segutils/core/nn/sync_bn/functions.py new file mode 100644 index 0000000..b0102e6 --- /dev/null +++ b/segutils/core/nn/sync_bn/functions.py @@ -0,0 +1,285 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## Email: zhanghang0704@gmail.com +## Copyright (c) 2018 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization functions""" +import torch.cuda.comm as comm + +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from core.nn.sync_bn import lib + +__all__ = ['syncbatchnorm', 'inp_syncbatchnorm'] + + +class syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + if x.is_cuda: + y = lib.gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + y = lib.cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if x.is_cuda: + dx_ = lib.gpu.expectation_backward(x, _dex, _dexs) + else: + raise NotImplemented + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +def _act_forward(ctx, x): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_forward(x, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +def _act_backward(ctx, x, dx): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_backward(x, dx, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +class inp_syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-5, + activation='none', slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsuqeeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + ctx.mark_dirty(x) + + # BN forward + activation + if x.is_cuda: + lib.gpu.batchnorm_inp_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + _act_forward(ctx, x) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_inp_backward(dz, z, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if z.is_cuda: + lib.gpu.expectation_inp_backward(dx, z, _dex, _dexs, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = syncbatchnorm_.apply +inp_syncbatchnorm = inp_syncbatchnorm_.apply diff --git a/segutils/core/nn/sync_bn/lib/__init__.py b/segutils/core/nn/sync_bn/lib/__init__.py new file mode 100644 index 0000000..98c3374 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/__init__.py @@ -0,0 +1,20 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') + +cpu = load('sync_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'syncbn_cpu.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('sync_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'activation_kernel.cu'), + os.path.join(gpu_path, 'syncbn_kernel.cu'), + ], extra_cuda_cflags=["--expt-extended-lambda"], + build_directory=gpu_path, verbose=False) diff --git a/segutils/core/nn/sync_bn/lib/__pycache__/__init__.cpython-36.pyc b/segutils/core/nn/sync_bn/lib/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..0110085 Binary files /dev/null and b/segutils/core/nn/sync_bn/lib/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/nn/sync_bn/lib/cpu/.ninja_deps b/segutils/core/nn/sync_bn/lib/cpu/.ninja_deps new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/cpu/.ninja_log b/segutils/core/nn/sync_bn/lib/cpu/.ninja_log new file mode 100644 index 0000000..d4c4d9d --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/.ninja_log @@ -0,0 +1,7 @@ +# ninja log v5 +0 6679 1555417150 syncbn_cpu.o b884354b4810778d +0 7702 1555417151 operator.o df6e270344a1d164 +7703 8115 1555417151 sync_cpu.so d148b4e40b0af67e +0 5172 1557113015 syncbn_cpu.o 9052547bb175072 +0 6447 1557113016 operator.o 209836e0b0c1e97e +6447 6613 1557113016 sync_cpu.so d148b4e40b0af67e diff --git a/segutils/core/nn/sync_bn/lib/cpu/build.ninja b/segutils/core/nn/sync_bn/lib/cpu/build.ninja new file mode 100644 index 0000000..e432f66 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/build.ninja @@ -0,0 +1,21 @@ +ninja_required_version = 1.3 +cxx = c++ + +cflags = -DTORCH_EXTENSION_NAME=sync_cpu -DTORCH_API_INCLUDE_EXTENSION_H -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/TH -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/THC -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/include/python3.6m -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++11 +ldflags = -shared + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out + depfile = $out.d + deps = gcc + +rule link + command = $cxx $in $ldflags -o $out + +build operator.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/operator.cpp +build syncbn_cpu.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp + +build sync_cpu.so: link operator.o syncbn_cpu.o + +default sync_cpu.so + diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.cpp b/segutils/core/nn/sync_bn/lib/cpu/operator.cpp new file mode 100644 index 0000000..5981ffc --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/operator.cpp @@ -0,0 +1,8 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CPU, "BatchNorm forward (CPU)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CPU, "BatchNorm backward (CPU)"); + m.def("sumsquare_forward", &Sum_Square_Forward_CPU, "SumSqu forward (CPU)"); + m.def("sumsquare_backward", &Sum_Square_Backward_CPU, "SumSqu backward (CPU)"); +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.h b/segutils/core/nn/sync_bn/lib/cpu/operator.h new file mode 100644 index 0000000..215fd53 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/operator.h @@ -0,0 +1,26 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_); + +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + bool train); + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input_); + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input_, + const at::Tensor gradSum_, + const at::Tensor gradSquare_); \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/operator.o b/segutils/core/nn/sync_bn/lib/cpu/operator.o new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/cpu/setup.py b/segutils/core/nn/sync_bn/lib/cpu/setup.py new file mode 100644 index 0000000..b0ecd6c --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension + +setup( + name='syncbn_cpu', + ext_modules=[ + CppExtension('syncbn_cpu', [ + 'operator.cpp', + 'syncbn_cpu.cpp', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) diff --git a/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..6b6bb73 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp @@ -0,0 +1,61 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta) { + auto output = (input - broadcast_to(mean, input)) / broadcast_to(std, input); + output = output * broadcast_to(gamma, input) + broadcast_to(beta, input); + return output; +} + +// Not implementing CPU backward for now +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput, + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta, + bool train) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input); + at::Tensor gradgamma = at::zeros_like(gamma); + at::Tensor gradbeta = at::zeros_like(beta); + at::Tensor gradMean = at::zeros_like(mean); + at::Tensor gradStd = at::zeros_like(std); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input) { + /* outputs */ + at::Tensor sum = torch::zeros({input.size(1)}, input.options()); + at::Tensor square = torch::zeros({input.size(1)}, input.options()); + return {sum, square}; +} + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input, + const at::Tensor gradSum, + const at::Tensor gradSquare) { + /* outputs */ + at::Tensor gradInput = at::zeros_like(input); + return gradInput; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.o b/segutils/core/nn/sync_bn/lib/cpu/syncbn_cpu.o new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/gpu/__init__.py b/segutils/core/nn/sync_bn/lib/gpu/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu b/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu new file mode 100644 index 0000000..e696667 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/activation_kernel.cu @@ -0,0 +1,46 @@ +#include +// #include +#include +#include +#include + +#include + +#include +#include + + +namespace { + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +} + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope) { + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "LeakyRelu_Backward_CUDA", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); + /* + // unstable after scaling + at::leaky_relu_(z, 1.0 / slope); + at::leaky_relu_backward(dz, z, slope); + */ +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/common.h b/segutils/core/nn/sync_bn/lib/gpu/common.h new file mode 100644 index 0000000..aa38296 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/common.h @@ -0,0 +1,224 @@ +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h b/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h new file mode 100644 index 0000000..c67dfae --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/device_tensor.h @@ -0,0 +1,110 @@ +#include + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/operator.cpp b/segutils/core/nn/sync_bn/lib/gpu/operator.cpp new file mode 100644 index 0000000..48e28fe --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/operator.cpp @@ -0,0 +1,13 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_inp_forward", &BatchNorm_Forward_Inp_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("batchnorm_inp_backward", &BatchNorm_Inp_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("expectation_forward", &Expectation_Forward_CUDA, "Expectation forward (CUDA)"); + m.def("expectation_backward", &Expectation_Backward_CUDA, "Expectation backward (CUDA)"); + m.def("expectation_inp_backward", &Expectation_Inp_Backward_CUDA, "Inplace Expectation backward (CUDA)"); + m.def("leaky_relu_forward", &LeakyRelu_Forward_CUDA, "Learky ReLU forward (CUDA)"); + m.def("leaky_relu_backward", &LeakyRelu_Backward_CUDA, "Learky ReLU backward (CUDA)"); +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/operator.h b/segutils/core/nn/sync_bn/lib/gpu/operator.h new file mode 100644 index 0000000..246570d --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/operator.h @@ -0,0 +1,59 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_); + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope); + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope); \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/setup.py b/segutils/core/nn/sync_bn/lib/gpu/setup.py new file mode 100644 index 0000000..14c01f6 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='syncbn_gpu', + ext_modules=[ + CUDAExtension('sync_gpu', [ + 'operator.cpp', + 'activation_kernel.cu', + 'syncbn_kernel.cu', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu b/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu new file mode 100644 index 0000000..2a7e840 --- /dev/null +++ b/segutils/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu @@ -0,0 +1,489 @@ +#include +// #include +#include +#include +#include + +#include "common.h" +#include "device_tensor.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void BatchNorm_Forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Forward_Inp_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Backward_Inp_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void BatchNorm_Backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + + +template +__global__ void Expectation_Forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void Expectation_Backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void Expectation_Backward_Inp_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_Inp_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_Inp_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + Expectation_Forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + Expectation_Backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + Expectation_Backward_Inp_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/segutils/core/nn/sync_bn/syncbn.py b/segutils/core/nn/sync_bn/syncbn.py new file mode 100644 index 0000000..f1247af --- /dev/null +++ b/segutils/core/nn/sync_bn/syncbn.py @@ -0,0 +1,124 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## ECE Department, Rutgers University +## Email: zhang.hang@rutgers.edu +## Copyright (c) 2017 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch + +from torch.nn.modules.batchnorm import _BatchNorm +from queue import Queue +from .functions import * + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01, inplace=True): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.inplace = False if activation == 'none' else inplace + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + if self.inplace: + return inp_syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + else: + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}, inplace={}'.format( + self.sync, self.activation, self.slope, self.inplace) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/segutils/core/nn/syncbn.py b/segutils/core/nn/syncbn.py new file mode 100644 index 0000000..c52ec1a --- /dev/null +++ b/segutils/core/nn/syncbn.py @@ -0,0 +1,223 @@ +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch +import torch.cuda.comm as comm + +from queue import Queue +from torch.autograd import Function +from torch.nn.modules.batchnorm import _BatchNorm +from torch.autograd.function import once_differentiable +from core.nn import _C + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +class _SyncBatchNorm(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + _ex, _exs = _C.expectation_forward(x) + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + y = _C.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + dx, _dex, _dexs, dgamma, dbeta = _C.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + dx_ = _C.expectation_backward(x, _dex, _dexs) + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = _SyncBatchNorm.apply + + +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}'.format( + self.sync, self.activation, self.slope) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/segutils/core/utils/__init__.py b/segutils/core/utils/__init__.py new file mode 100644 index 0000000..067a8d0 --- /dev/null +++ b/segutils/core/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility functions.""" +from __future__ import absolute_import + +from .download import download, check_sha1 +from .filesystem import makedirs, try_import_pycocotools diff --git a/segutils/core/utils/__pycache__/__init__.cpython-36.pyc b/segutils/core/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..94e930b Binary files /dev/null and b/segutils/core/utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/segutils/core/utils/__pycache__/__init__.cpython-38.pyc b/segutils/core/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..861de68 Binary files /dev/null and b/segutils/core/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/segutils/core/utils/__pycache__/distributed.cpython-36.pyc b/segutils/core/utils/__pycache__/distributed.cpython-36.pyc new file mode 100644 index 0000000..d8b7f1d Binary files /dev/null and b/segutils/core/utils/__pycache__/distributed.cpython-36.pyc differ diff --git a/segutils/core/utils/__pycache__/download.cpython-36.pyc b/segutils/core/utils/__pycache__/download.cpython-36.pyc new file mode 100644 index 0000000..26e9a4a Binary files /dev/null and b/segutils/core/utils/__pycache__/download.cpython-36.pyc differ diff --git a/segutils/core/utils/__pycache__/download.cpython-38.pyc b/segutils/core/utils/__pycache__/download.cpython-38.pyc new file mode 100644 index 0000000..042d4c1 Binary files /dev/null and b/segutils/core/utils/__pycache__/download.cpython-38.pyc differ diff --git a/segutils/core/utils/__pycache__/filesystem.cpython-36.pyc b/segutils/core/utils/__pycache__/filesystem.cpython-36.pyc new file mode 100644 index 0000000..72efcec Binary files /dev/null and b/segutils/core/utils/__pycache__/filesystem.cpython-36.pyc differ diff --git a/segutils/core/utils/__pycache__/filesystem.cpython-38.pyc b/segutils/core/utils/__pycache__/filesystem.cpython-38.pyc new file mode 100644 index 0000000..3906424 Binary files /dev/null and b/segutils/core/utils/__pycache__/filesystem.cpython-38.pyc differ diff --git a/segutils/core/utils/__pycache__/loss.cpython-36.pyc b/segutils/core/utils/__pycache__/loss.cpython-36.pyc new file mode 100644 index 0000000..75fab07 Binary files /dev/null and b/segutils/core/utils/__pycache__/loss.cpython-36.pyc differ diff --git a/segutils/core/utils/distributed.py b/segutils/core/utils/distributed.py new file mode 100644 index 0000000..257cdf9 --- /dev/null +++ b/segutils/core/utils/distributed.py @@ -0,0 +1,258 @@ +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" +import math +import pickle +import torch +import torch.utils.data as data +import torch.distributed as dist + +from torch.utils.data.sampler import Sampler, BatchSampler + +__all__ = ['get_world_size', 'get_rank', 'synchronize', 'is_main_process', + 'all_gather', 'make_data_sampler', 'make_batch_data_sampler', + 'reduce_dict', 'reduce_loss_dict'] + + +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/comm.py +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.IntTensor([tensor.numel()]).to("cuda") + size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) + if local_size != max_size: + padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +def reduce_loss_dict(loss_dict): + """ + Reduce the loss dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + loss_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return loss_dict + with torch.no_grad(): + loss_names = [] + all_losses = [] + for k in sorted(loss_dict.keys()): + loss_names.append(k) + all_losses.append(loss_dict[k]) + all_losses = torch.stack(all_losses, dim=0) + dist.reduce(all_losses, dst=0) + if dist.get_rank() == 0: + # only main process gets accumulated, so only divide by + # world_size in this case + all_losses /= world_size + reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} + return reduced_losses + + +def make_data_sampler(dataset, shuffle, distributed): + if distributed: + return DistributedSampler(dataset, shuffle=shuffle) + if shuffle: + sampler = data.sampler.RandomSampler(dataset) + else: + sampler = data.sampler.SequentialSampler(dataset) + return sampler + + +def make_batch_data_sampler(sampler, images_per_batch, num_iters=None, start_iter=0): + batch_sampler = data.sampler.BatchSampler(sampler, images_per_batch, drop_last=True) + if num_iters is not None: + batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter) + return batch_sampler + + +# Code is copy-pasted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/distributed.py +class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset: offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class IterationBasedBatchSampler(BatchSampler): + """ + Wraps a BatchSampler, resampling from it until + a specified number of iterations have been sampled + """ + + def __init__(self, batch_sampler, num_iterations, start_iter=0): + self.batch_sampler = batch_sampler + self.num_iterations = num_iterations + self.start_iter = start_iter + + def __iter__(self): + iteration = self.start_iter + while iteration <= self.num_iterations: + # if the underlying sampler has a set_epoch method, like + # DistributedSampler, used for making each process see + # a different split of the dataset, then set it + if hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(iteration) + for batch in self.batch_sampler: + iteration += 1 + if iteration > self.num_iterations: + break + yield batch + + def __len__(self): + return self.num_iterations + + +if __name__ == '__main__': + pass diff --git a/segutils/core/utils/download.py b/segutils/core/utils/download.py new file mode 100644 index 0000000..fec8bb4 --- /dev/null +++ b/segutils/core/utils/download.py @@ -0,0 +1,88 @@ +"""Download files with progress bar.""" +import os +import hashlib +import requests +from tqdm import tqdm + +def check_sha1(filename, sha1_hash): + """Check whether the sha1 hash of the file content matches the expected hash. + Parameters + ---------- + filename : str + Path to the file. + sha1_hash : str + Expected sha1 hash in hexadecimal digits. + Returns + ------- + bool + Whether the file content matches the expected hash. + """ + sha1 = hashlib.sha1() + with open(filename, 'rb') as f: + while True: + data = f.read(1048576) + if not data: + break + sha1.update(data) + + sha1_file = sha1.hexdigest() + l = min(len(sha1_file), len(sha1_hash)) + return sha1.hexdigest()[0:l] == sha1_hash[0:l] + +def download(url, path=None, overwrite=False, sha1_hash=None): + """Download an given URL + Parameters + ---------- + url : str + URL to download + path : str, optional + Destination path to store downloaded file. By default stores to the + current directory with same name as in url. + overwrite : bool, optional + Whether to overwrite destination file if already exists. + sha1_hash : str, optional + Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified + but doesn't match. + Returns + ------- + str + The file path of the downloaded file. + """ + if path is None: + fname = url.split('/')[-1] + else: + path = os.path.expanduser(path) + if os.path.isdir(path): + fname = os.path.join(path, url.split('/')[-1]) + else: + fname = path + + if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): + dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) + if not os.path.exists(dirname): + os.makedirs(dirname) + + print('Downloading %s from %s...'%(fname, url)) + r = requests.get(url, stream=True) + if r.status_code != 200: + raise RuntimeError("Failed downloading url %s"%url) + total_length = r.headers.get('content-length') + with open(fname, 'wb') as f: + if total_length is None: # no content length header + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + else: + total_length = int(total_length) + for chunk in tqdm(r.iter_content(chunk_size=1024), + total=int(total_length / 1024. + 0.5), + unit='KB', unit_scale=False, dynamic_ncols=True): + f.write(chunk) + + if sha1_hash and not check_sha1(fname, sha1_hash): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(fname)) + + return fname \ No newline at end of file diff --git a/segutils/core/utils/filesystem.py b/segutils/core/utils/filesystem.py new file mode 100644 index 0000000..ab2510d --- /dev/null +++ b/segutils/core/utils/filesystem.py @@ -0,0 +1,123 @@ +"""Filesystem utility functions.""" +from __future__ import absolute_import +import os +import errno + + +def makedirs(path): + """Create directory recursively if not exists. + Similar to `makedir -p`, you can skip checking existence before this function. + Parameters + ---------- + path : str + Path of the desired dir + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def try_import(package, message=None): + """Try import specified package, with custom message support. + Parameters + ---------- + package : str + The name of the targeting package. + message : str, default is None + If not None, this function will raise customized error message when import error is found. + Returns + ------- + module if found, raise ImportError otherwise + """ + try: + return __import__(package) + except ImportError as e: + if not message: + raise e + raise ImportError(message) + + +def try_import_cv2(): + """Try import cv2 at runtime. + Returns + ------- + cv2 module if found. Raise ImportError otherwise + """ + msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \ + or `pip install opencv-python --user` (note that this is unofficial PYPI package)." + return try_import('cv2', msg) + + +def import_try_install(package, extern_url=None): + """Try import the specified package. + If the package not installed, try use pip to install and import if success. + Parameters + ---------- + package : str + The name of the package trying to import. + extern_url : str or None, optional + The external url if package is not hosted on PyPI. + For example, you can install a package using: + "pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx". + In this case, you can pass the url to the extern_url. + Returns + ------- + + The imported python module. + """ + try: + return __import__(package) + except ImportError: + try: + from pip import main as pipmain + except ImportError: + from pip._internal import main as pipmain + + # trying to install package + url = package if extern_url is None else extern_url + pipmain(['install', '--user', url]) # will raise SystemExit Error if fails + + # trying to load again + try: + return __import__(package) + except ImportError: + import sys + import site + user_site = site.getusersitepackages() + if user_site not in sys.path: + sys.path.append(user_site) + return __import__(package) + return __import__(package) + + +"""Import helper for pycocotools""" + + +# NOTE: for developers +# please do not import any pycocotools in __init__ because we are trying to lazy +# import pycocotools to avoid install it for other users who may not use it. +# only import when you actually use it + + +def try_import_pycocotools(): + """Tricks to optionally install and import pycocotools""" + # first we can try import pycocotools + try: + import pycocotools as _ + except ImportError: + import os + # we need to install pycootools, which is a bit tricky + # pycocotools sdist requires Cython, numpy(already met) + import_try_install('cython') + # pypi pycocotools is not compatible with windows + win_url = 'git+https://github.com/zhreshold/cocoapi.git#subdirectory=PythonAPI' + try: + if os.name == 'nt': + import_try_install('pycocotools', win_url) + else: + import_try_install('pycocotools') + except ImportError: + faq = 'cocoapi FAQ' + raise ImportError('Cannot import or install pycocotools, please refer to %s.' % faq) diff --git a/segutils/core/utils/logger.py b/segutils/core/utils/logger.py new file mode 100644 index 0000000..a2de227 --- /dev/null +++ b/segutils/core/utils/logger.py @@ -0,0 +1,30 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import logging +import os +import sys + +__all__ = ['setup_logger'] + + +# reference from: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py +def setup_logger(name, save_dir, distributed_rank, filename="log.txt", mode='w'): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + if not os.path.exists(save_dir): + os.makedirs(save_dir) + fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) # 'a+' for add, 'w' for overwrite + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger diff --git a/segutils/core/utils/loss.py b/segutils/core/utils/loss.py new file mode 100644 index 0000000..aab5314 --- /dev/null +++ b/segutils/core/utils/loss.py @@ -0,0 +1,196 @@ +"""Custom losses.""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd import Variable + +__all__ = ['MixSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyOHEMLoss', + 'EncNetLoss', 'ICNetLoss', 'get_segmentation_loss'] + + +# TODO: optim function +class MixSoftmaxCrossEntropyLoss(nn.CrossEntropyLoss): + def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs, **kwargs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs)) + + +# reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/loss.py +class EncNetLoss(nn.CrossEntropyLoss): + """2D Cross Entropy Loss with SE Loss""" + + def __init__(self, se_loss=True, se_weight=0.2, nclass=19, aux=False, + aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(EncNetLoss, self).__init__(weight, None, ignore_index) + self.se_loss = se_loss + self.aux = aux + self.nclass = nclass + self.se_weight = se_weight + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if not self.se_loss and not self.aux: + return super(EncNetLoss, self).forward(*inputs) + elif not self.se_loss: + pred1, pred2, target = tuple(inputs) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + return dict(loss=loss1 + self.aux_weight * loss2) + elif not self.aux: + print (inputs) + pred, se_pred, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred) + loss1 = super(EncNetLoss, self).forward(pred, target) + loss2 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.se_weight * loss2) + else: + pred1, se_pred, pred2, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + loss3 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3) + + @staticmethod + def _get_batch_label_vector(target, nclass): + # target is a 3D Variable BxHxW, output is 2D BxnClass + batch = target.size(0) + tvect = Variable(torch.zeros(batch, nclass)) + for i in range(batch): + hist = torch.histc(target[i].cpu().data.float(), + bins=nclass, min=0, + max=nclass - 1) + vect = hist > 0 + tvect[i] = vect + return tvect + + +# TODO: optim function +class ICNetLoss(nn.CrossEntropyLoss): + """Cross Entropy Loss for ICNet""" + + def __init__(self, nclass, aux_weight=0.4, ignore_index=-1, **kwargs): + super(ICNetLoss, self).__init__(ignore_index=ignore_index) + self.nclass = nclass + self.aux_weight = aux_weight + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + + pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs) + # [batch, W, H] -> [batch, 1, W, H] + target = target.unsqueeze(1).float() + target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze( + 1).long() + loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4) + loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8) + loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16) + return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight) + + +class OhemCrossEntropy2d(nn.Module): + def __init__(self, ignore_index=-1, thresh=0.7, min_kept=100000, use_weight=True, **kwargs): + super(OhemCrossEntropy2d, self).__init__() + self.ignore_index = ignore_index + self.thresh = float(thresh) + self.min_kept = int(min_kept) + if use_weight: + weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, + 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, + 1.0865, 1.1529, 1.0507]) + self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index) + else: + self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) + + def forward(self, pred, target): + n, c, h, w = pred.size() + target = target.view(-1) + valid_mask = target.ne(self.ignore_index) + target = target * valid_mask.long() + num_valid = valid_mask.sum() + + prob = F.softmax(pred, dim=1) + prob = prob.transpose(0, 1).reshape(c, -1) + + if self.min_kept > num_valid: + print("Lables: {}".format(num_valid)) + elif num_valid > 0: + prob = prob.masked_fill_(1 - valid_mask, 1) + mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] + threshold = self.thresh + if self.min_kept > 0: + index = mask_prob.argsort() + threshold_index = index[min(len(index), self.min_kept) - 1] + if mask_prob[threshold_index] > self.thresh: + threshold = mask_prob[threshold_index] + kept_mask = mask_prob.le(threshold) + valid_mask = valid_mask * kept_mask + target = target * kept_mask.long() + + target = target.masked_fill_(1 - valid_mask, self.ignore_index) + target = target.view(n, h, w) + + return self.criterion(pred, target) + + +class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d): + def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(*inputs)) + + +def get_segmentation_loss(model, use_ohem=False, **kwargs): + if use_ohem: + return MixSoftmaxCrossEntropyOHEMLoss(**kwargs) + + model = model.lower() + if model == 'encnet': + return EncNetLoss(**kwargs) + elif model == 'icnet': + return ICNetLoss(nclass=4, **kwargs) + else: + return MixSoftmaxCrossEntropyLoss(**kwargs) diff --git a/segutils/core/utils/lr_scheduler.py b/segutils/core/utils/lr_scheduler.py new file mode 100644 index 0000000..32b3795 --- /dev/null +++ b/segutils/core/utils/lr_scheduler.py @@ -0,0 +1,179 @@ +"""Popular Learning Rate Schedulers""" +from __future__ import division +import math +import torch + +from bisect import bisect_right + +__all__ = ['LRScheduler', 'WarmupMultiStepLR', 'WarmupPolyLR'] + + +class LRScheduler(object): + r"""Learning Rate Scheduler + + Parameters + ---------- + mode : str + Modes for learning rate scheduler. + Currently it supports 'constant', 'step', 'linear', 'poly' and 'cosine'. + base_lr : float + Base learning rate, i.e. the starting learning rate. + target_lr : float + Target learning rate, i.e. the ending learning rate. + With constant mode target_lr is ignored. + niters : int + Number of iterations to be scheduled. + nepochs : int + Number of epochs to be scheduled. + iters_per_epoch : int + Number of iterations in each epoch. + offset : int + Number of iterations before this scheduler. + power : float + Power parameter of poly scheduler. + step_iter : list + A list of iterations to decay the learning rate. + step_epoch : list + A list of epochs to decay the learning rate. + step_factor : float + Learning rate decay factor. + """ + + def __init__(self, mode, base_lr=0.01, target_lr=0, niters=0, nepochs=0, iters_per_epoch=0, + offset=0, power=0.9, step_iter=None, step_epoch=None, step_factor=0.1, warmup_epochs=0): + super(LRScheduler, self).__init__() + assert (mode in ['constant', 'step', 'linear', 'poly', 'cosine']) + + if mode == 'step': + assert (step_iter is not None or step_epoch is not None) + self.niters = niters + self.step = step_iter + epoch_iters = nepochs * iters_per_epoch + if epoch_iters > 0: + self.niters = epoch_iters + if step_epoch is not None: + self.step = [s * iters_per_epoch for s in step_epoch] + + self.step_factor = step_factor + self.base_lr = base_lr + self.target_lr = base_lr if mode == 'constant' else target_lr + self.offset = offset + self.power = power + self.warmup_iters = warmup_epochs * iters_per_epoch + self.mode = mode + + def __call__(self, optimizer, num_update): + self.update(num_update) + assert self.learning_rate >= 0 + self._adjust_learning_rate(optimizer, self.learning_rate) + + def update(self, num_update): + N = self.niters - 1 + T = num_update - self.offset + T = min(max(0, T), N) + + if self.mode == 'constant': + factor = 0 + elif self.mode == 'linear': + factor = 1 - T / N + elif self.mode == 'poly': + factor = pow(1 - T / N, self.power) + elif self.mode == 'cosine': + factor = (1 + math.cos(math.pi * T / N)) / 2 + elif self.mode == 'step': + if self.step is not None: + count = sum([1 for s in self.step if s <= T]) + factor = pow(self.step_factor, count) + else: + factor = 1 + else: + raise NotImplementedError + + # warm up lr schedule + if self.warmup_iters > 0 and T < self.warmup_iters: + factor = factor * 1.0 * T / self.warmup_iters + + if self.mode == 'step': + self.learning_rate = self.base_lr * factor + else: + self.learning_rate = self.target_lr + (self.base_lr - self.target_lr) * factor + + def _adjust_learning_rate(self, optimizer, lr): + optimizer.param_groups[0]['lr'] = lr + # enlarge the lr at the head + for i in range(1, len(optimizer.param_groups)): + optimizer.param_groups[i]['lr'] = lr * 10 + + +# separating MultiStepLR with WarmupLR +# but the current LRScheduler design doesn't allow it +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/solver/lr_scheduler.py +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method="linear", last_epoch=-1): + super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones) + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted got {}".format(warmup_method)) + + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_factor == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + return [base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs] + + +class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method='linear', last_epoch=-1): + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted " + "got {}".format(warmup_method)) + + self.target_lr = target_lr + self.max_iters = max_iters + self.power = power + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + super(WarmupPolyLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + N = self.max_iters - self.warmup_iters + T = self.last_epoch - self.warmup_iters + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_method == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup type.") + return [self.target_lr + (base_lr - self.target_lr) * warmup_factor for base_lr in self.base_lrs] + factor = pow(1 - T / N, self.power) + return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs] + + +if __name__ == '__main__': + import torch + import torch.nn as nn + + model = nn.Conv2d(16, 16, 3, 1, 1) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + lr_scheduler = WarmupPolyLR(optimizer, niters=1000) diff --git a/segutils/core/utils/parallel.py b/segutils/core/utils/parallel.py new file mode 100644 index 0000000..cb9e896 --- /dev/null +++ b/segutils/core/utils/parallel.py @@ -0,0 +1,162 @@ +"""Utils for Semantic Segmentation""" +import threading +import torch +import torch.cuda.comm as comm +from torch.nn.parallel.data_parallel import DataParallel +from torch.nn.parallel._functions import Broadcast +from torch.autograd import Function + +__all__ = ['DataParallelModel', 'DataParallelCriterion'] + + +class Reduce(Function): + @staticmethod + def forward(ctx, *inputs): + ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))] + inputs = sorted(inputs, key=lambda i: i.get_device()) + return comm.reduce_add(inputs) + + @staticmethod + def backward(ctx, gradOutputs): + return Broadcast.apply(ctx.target_gpus, gradOutputs) + + +class DataParallelModel(DataParallel): + """Data parallelism + + Hide the difference of single/multiple GPUs to the user. + In the forward pass, the module is replicated on each device, + and each replica handles a portion of the input. During the backwards + pass, gradients from each replica are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + Parameters + ---------- + module : object + Network to be parallelized. + sync : bool + enable synchronization (default: False). + Inputs: + - **inputs**: list of input + Outputs: + - **outputs**: list of output + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + def gather(self, outputs, output_device): + return outputs + + def replicate(self, module, device_ids): + modules = super(DataParallelModel, self).replicate(module, device_ids) + return modules + + +# Reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/parallel.py +class DataParallelCriterion(DataParallel): + """ + Calculate loss in multiple-GPUs, which balance the memory usage for + Semantic Segmentation. + + The targets are splitted across the specified devices by chunking in + the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`. + + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> criterion = DataParallelCriterion(criterion, device_ids=[0, 1, 2]) + >>> y = net(x) + >>> loss = criterion(y, target) + """ + + def forward(self, inputs, *targets, **kwargs): + # the inputs should be the outputs of DataParallelModel + if not self.device_ids: + return self.module(inputs, *targets, **kwargs) + targets, kwargs = self.scatter(targets, kwargs, self.device_ids) + if len(self.device_ids) == 1: + return self.module(inputs, *targets[0], **kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = criterion_parallel_apply(replicas, inputs, targets, kwargs) + return Reduce.apply(*outputs) / len(outputs) + + +def get_a_var(obj): + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, list) or isinstance(obj, tuple): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + + +def criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): + r"""Applies each `module` in :attr:`modules` in parallel on arguments + contained in :attr:`inputs` (positional), attr:'targets' (positional) and :attr:`kwargs_tup` (keyword) + on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + targets (tensor): targets to the modules + devices (list of int or torch.device): CUDA devices + :attr:`modules`, :attr:`inputs`, :attr:'targets' :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs) + assert len(targets) == len(inputs) + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = ({},) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + lock = threading.Lock() + results = {} + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input, target, kwargs, device=None): + torch.set_grad_enabled(grad_enabled) + if device is None: + device = get_a_var(input).get_device() + try: + with torch.cuda.device(device): + output = module(*(list(input) + target), **kwargs) + with lock: + results[i] = output + except Exception as e: + with lock: + results[i] = e + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, target, kwargs, device)) + for i, (module, input, target, kwargs, device) in + enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, Exception): + raise output + outputs.append(output) + return outputs diff --git a/segutils/core/utils/score.py b/segutils/core/utils/score.py new file mode 100644 index 0000000..a037e65 --- /dev/null +++ b/segutils/core/utils/score.py @@ -0,0 +1,161 @@ +"""Evaluation Metrics for Semantic Segmentation""" +import torch +import numpy as np + +__all__ = ['SegmentationMetric', 'batch_pix_accuracy', 'batch_intersection_union', + 'pixelAccuracy', 'intersectionAndUnion', 'hist_info', 'compute_score'] + + +class SegmentationMetric(object): + """Computes pixAcc and mIoU metric scores + """ + + def __init__(self, nclass): + super(SegmentationMetric, self).__init__() + self.nclass = nclass + self.reset() + + def update(self, preds, labels): + """Updates the internal evaluation result. + + Parameters + ---------- + labels : 'NumpyArray' or list of `NumpyArray` + The labels of the data. + preds : 'NumpyArray' or list of `NumpyArray` + Predicted values. + """ + + def evaluate_worker(self, pred, label): + correct, labeled = batch_pix_accuracy(pred, label) + inter, union = batch_intersection_union(pred, label, self.nclass) + + self.total_correct += correct + self.total_label += labeled + if self.total_inter.device != inter.device: + self.total_inter = self.total_inter.to(inter.device) + self.total_union = self.total_union.to(union.device) + self.total_inter += inter + self.total_union += union + + if isinstance(preds, torch.Tensor): + evaluate_worker(self, preds, labels) + elif isinstance(preds, (list, tuple)): + for (pred, label) in zip(preds, labels): + evaluate_worker(self, pred, label) + + def get(self): + """Gets the current evaluation result. + + Returns + ------- + metrics : tuple of float + pixAcc and mIoU + """ + pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label) # remove np.spacing(1) + IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union) + mIoU = IoU.mean().item() + return pixAcc, mIoU + + def reset(self): + """Resets the internal evaluation result to initial state.""" + self.total_inter = torch.zeros(self.nclass) + self.total_union = torch.zeros(self.nclass) + self.total_correct = 0 + self.total_label = 0 + + +# pytorch version +def batch_pix_accuracy(output, target): + """PixAcc""" + # inputs are numpy array, output 4D, target 3D + predict = torch.argmax(output.long(), 1) + 1 + target = target.long() + 1 + + pixel_labeled = torch.sum(target > 0).item() + pixel_correct = torch.sum((predict == target) * (target > 0)).item() + assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled" + return pixel_correct, pixel_labeled + + +def batch_intersection_union(output, target, nclass): + """mIoU""" + # inputs are numpy array, output 4D, target 3D + mini = 1 + maxi = nclass + nbins = nclass + predict = torch.argmax(output, 1) + 1 + target = target.float() + 1 + + predict = predict.float() * (target > 0).float() + intersection = predict * (predict == target).float() + # areas of intersection and union + # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. + area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi) + area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi) + area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi) + area_union = area_pred + area_lab - area_inter + assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area" + return area_inter.float(), area_union.float() + + +def pixelAccuracy(imPred, imLab): + """ + This function takes the prediction and label of a single image, returns pixel-wise accuracy + To compute over many images do: + for i = range(Nimages): + (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \ + pixelAccuracy(imPred[i], imLab[i]) + mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + pixel_labeled = np.sum(imLab >= 0) + pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) + pixel_accuracy = 1.0 * pixel_correct / pixel_labeled + return (pixel_accuracy, pixel_correct, pixel_labeled) + + +def intersectionAndUnion(imPred, imLab, numClass): + """ + This function takes the prediction and label of a single image, + returns intersection and union areas for each class + To compute over many images do: + for i in range(Nimages): + (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) + IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + imPred = imPred * (imLab >= 0) + + # Compute area intersection: + intersection = imPred * (imPred == imLab) + (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) + + # Compute area union: + (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) + (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) + area_union = area_pred + area_lab - area_intersection + return (area_intersection, area_union) + + +def hist_info(pred, label, num_cls): + assert pred.shape == label.shape + k = (label >= 0) & (label < num_cls) + labeled = np.sum(k) + correct = np.sum((pred[k] == label[k])) + + return np.bincount(num_cls * label[k].astype(int) + pred[k], minlength=num_cls ** 2).reshape(num_cls, + num_cls), labeled, correct + + +def compute_score(hist, correct, labeled): + iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + freq = hist.sum(1) / hist.sum() + freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() + mean_pixel_acc = correct / labeled + + return iu, mean_IU, mean_IU_no_back, mean_pixel_acc diff --git a/segutils/core/utils/visualize.py b/segutils/core/utils/visualize.py new file mode 100644 index 0000000..c63d6c9 --- /dev/null +++ b/segutils/core/utils/visualize.py @@ -0,0 +1,158 @@ +import os +import numpy as np +from PIL import Image + +__all__ = ['get_color_pallete', 'print_iou', 'set_img_color', + 'show_prediction', 'show_colorful_images', 'save_colorful_images'] + + +def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False): + n = iu.size + lines = [] + for i in range(n): + if class_names is None: + cls = 'Class %d:' % (i + 1) + else: + cls = '%d %s' % (i + 1, class_names[i]) + # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + if show_no_back: + lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % ( + mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100)) + else: + lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100)) + lines.append('=================================================') + line = "\n".join(lines) + + print(line) + + +def set_img_color(img, label, colors, background=0, show255=False): + for i in range(len(colors)): + if i != background: + img[np.where(label == i)] = colors[i] + if show255: + img[np.where(label == 255)] = 255 + + return img + + +def show_prediction(img, pred, colors, background=0): + im = np.array(img, np.uint8) + set_img_color(im, pred, colors, background) + out = np.array(im) + + return out + + +def show_colorful_images(prediction, palettes): + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + im.show() + + +def save_colorful_images(prediction, filename, output_dir, palettes): + ''' + :param prediction: [B, H, W, C] + ''' + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + fn = os.path.join(output_dir, filename) + out_dir = os.path.split(fn)[0] + if not os.path.exists(out_dir): + os.mkdir(out_dir) + im.save(fn) + + +def get_color_pallete(npimg, dataset='pascal_voc'): + """Visualize image. + + Parameters + ---------- + npimg : numpy.ndarray + Single channel image with shape `H, W, 1`. + dataset : str, default: 'pascal_voc' + The dataset that model pretrained on. ('pascal_voc', 'ade20k') + Returns + ------- + out_img : PIL.Image + Image with color pallete + """ + # recovery boundary + if dataset in ('pascal_voc', 'pascal_aug'): + npimg[npimg == -1] = 255 + # put colormap + if dataset == 'ade20k': + npimg = npimg + 1 + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(adepallete) + return out_img + elif dataset == 'citys': + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(cityspallete) + return out_img + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(vocpallete) + return out_img + + +def _getvocpallete(num_cls): + n = num_cls + pallete = [0] * (n * 3) + for j in range(0, n): + lab = j + pallete[j * 3 + 0] = 0 + pallete[j * 3 + 1] = 0 + pallete[j * 3 + 2] = 0 + i = 0 + while (lab > 0): + pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i = i + 1 + lab >>= 3 + return pallete + + +vocpallete = _getvocpallete(256) + +adepallete = [ + 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, + 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, + 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, + 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, + 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, + 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, + 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, + 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, + 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, + 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, + 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, + 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, + 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, + 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, + 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, + 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, + 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, + 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] + +cityspallete = [ + 128, 64, 128, + 244, 35, 232, + 70, 70, 70, + 102, 102, 156, + 190, 153, 153, + 153, 153, 153, + 250, 170, 30, + 220, 220, 0, + 107, 142, 35, + 152, 251, 152, + 0, 130, 180, + 220, 20, 60, + 255, 0, 0, + 0, 0, 142, + 0, 0, 70, + 0, 60, 100, + 0, 80, 100, + 0, 0, 230, + 119, 11, 32, +] diff --git a/segutils/segMultiOutModel.py b/segutils/segMultiOutModel.py new file mode 100644 index 0000000..e38838d --- /dev/null +++ b/segutils/segMultiOutModel.py @@ -0,0 +1,377 @@ +import torch +from core.models.bisenet import BiSeNet,BiSeNet_MultiOutput +from torchvision import transforms +import cv2,os,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,model = None,weights=None,modelsize=512,device='cuda:3',multiOutput=False): + #self.args = args + self.model = model + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.multiOutput = multiOutput + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + #mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143) + def eval(self,image,outsize=None,smooth_kernel=0): + imageH,imageW,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,test_flag=True,smooth_kernel = 0) + + time2 = time.time() + + if self.multiOutput: + pred = [outputx.data.cpu().numpy()[0] for outputx in output] + else: + pred = output.data.cpu().numpy() + pred = pred[0] + + time3 = time.time() + + if self.multiOutput: + pred = [ cv2.blur(predx,(smooth_kernel,smooth_kernel) ) for predx in pred] + pred = [cv2.resize(predx.astype(np.uint8),(imageW,imageH)) for predx in pred[0:2]] + else: + pred = cv2.blur(pred,(smooth_kernel,smooth_kernel) ) + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH),interpolation = cv2.INTER_NEAREST) + time4 = time.time() + print('##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image = image * 3.2 - 1.6 + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + print('###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +def test(): + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + model = BiSeNet(nclass) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +label_dic={'landcover':[[0, 0, 0], [255, 0, 0], [0,255,0], [0,0,255], [255,255,0]], + 'deepRoad':[[0,0,0],[255,0,0]], + 'water':[[0,0,0],[255,255,255]], + 'water_building':[[0,0,0],[0,0,255],[255,0,0]], + 'floater':[[0,0,0], [0,255,0],[255,255,0],[255,0,255],[0,128, 255], [255,0,0], [0,255,255] ] + + + + + } + +def index2color(label_mask,label_colours): + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + label_cnt = len(label_colours) + for ll in range(0, label_cnt): + r[label_mask == ll] = label_colours[ll][0] + g[label_mask == ll] = label_colours[ll][1] + b[label_mask == ll] = label_colours[ll][2] + rgb = np.stack((b, g,r), axis=-1) + return rgb.astype(np.uint8) +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id +def result_merge_sep(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + ishow = 2 + #plt.figure(1);plt.imshow(mask_map); + for index,color in zip(indexes,colors): + mask_binaray = (mask_map == index).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binaray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + cv2.drawContours(image,contours,-1,color[::-1],3) + #plt.figure(ishow);plt.imshow(mask_binaray);ishow+=1 + #plt.show() + return image +def result_merge(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + mask_binary = (mask_map>0).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; contours = np.array(contours)[d] + cv2.drawContours(image,contours,-1,colors[0][::-1],3) + + coors = np.array([(np.mean(contours_x ,axis=0)+0.5).astype(np.int32)[0] for contours_x in contours]) + #print(mask_map.shape,coors.shape) + typess = mask_map[ coors[:,1],coors[:,0]] + #for jj,iclass in enumerate(typess): + #print(iclass,colors) + # cv2.drawContours(image,contours,-1, colors[iclass][::-1],3) + + + + return image + +def test_floater(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,7] + outresult=True + weights = 'runs/thFloater/BiSeNet_MultiOutput/train/experiment_4/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/host/workspace/WJ/data/thFloater/val/images/' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:9',multiOutput=True) + + + for i,image_url in enumerate(image_url_list[0:10]) : + image_array0 = cv2.imread(image_url) + image_array0 = cv2.cvtColor(image_array0, cv2.COLOR_BGR2RGB) # cv2默认为bgr顺序 + imageH,imageW,_ = image_array0.shape + #image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + time1=time.time() + + mask_colors=[ { 'mask':pred[0] ,'index':range(1,2),'color':label_dic['water'][0:] }, + { 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } ] + result_draw = result_merge(image_array0,mask_colors) + + + time2=time.time() + + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + + + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) +def test_water_buildings(): + from core.models.bisenet import BiSeNet + #image_url = 'temp_pics/DJI_0645.JPG' + nclass = 3 + outresult=True + weights = 'runs/thWaterBuilding/BiSeNet/train/experiment_2/checkpoint.pth' + model = BiSeNet(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:0',multiOutput=False) + + + for i,image_url in enumerate(image_url_list[0:]) : + #image_url = '/home/thsw/WJ/data/THWaterBuilding/val/images/0anWqgmO9rGe1n8P.png' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + #print(binary[0].shape) + time1=time.time() + + + mask_colors=[ { 'mask':pred ,'index':range(1,3),'color':label_dic['water_building'][1:] }, + #{ 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } + ] + result_draw = result_merge_sep(image_array0,mask_colors) + + + time2=time.time() + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line294: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +def get_illegal_index(contours,hierarchy,water_dilate,overlap_threshold): + out_index=[] + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + imageH,imageW = water_dilate.shape + for ii,cont in enumerate(contours): + build_area=np.zeros((imageH,imageW )) + cv2.fillPoly(build_area,[cont[:,0,:]],1) + area1=np.sum(build_area);area2=np.sum(build_area*water_dilate) + if (area2/area1) >overlap_threshold: + out_index.append(ii) + + + return out_index + + +def test_water_building_seperately(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,2] + outresult=True + weights = 'runs/thWaterBuilding_seperate/BiSeNet_MultiOutput/train/experiment_0/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:1',multiOutput=True) + + print('###line307 image cnt:',len(image_url_list)) + for i,image_url in enumerate(image_url_list[0:1]) : + image_url = '/home/thsw/WJ/data/river_buildings/DJI_20210904092044_0001_S_output896.jpg' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None,smooth_kernel=20) + + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred[0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = get_largest_contours(contours); + water = pred[0].copy(); water[:,:] = 0 + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + + + + + ##画出水体膨胀后的蓝线区域。 + kernel = np.ones((100,100),np.uint8) + water_dilate = cv2.dilate(water,kernel,iterations = 1) + contours, hierarchy = cv2.findContours(water_dilate,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + #print('####line310:',contours) + cv2.drawContours(image_array0,contours,-1,(255,0,0),3) + + + ###逐个建筑判断是否与蓝线内区域有交叉。如果交叉面积占本身面积超过0.1,则认为是违法建筑。 + contours, hierarchy = cv2.findContours(pred[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + outIndex=get_illegal_index(contours,hierarchy,water_dilate,0.1) + + for ii in outIndex: + cv2.drawContours(image_array0,contours,ii,(0,0,255),3) + + + plt.imshow(image_array0);plt.show() + ## + + time0=time.time() + + time1=time.time() + + + mask_colors=[ { 'mask':pred[0],'index':[1],'color':label_dic['water_building'][1:2]}, + { 'mask':pred[1],'index':[1],'color':label_dic['water_building'][2:3]} + ] + result_draw = result_merge_sep(image_array0,mask_colors) + time2=time.time() + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +if __name__=='__main__': + #test() + #test_floater() + #test_water_buildings() + test_water_building_seperately() + + + + + + diff --git a/segutils/segWaterBuilding.py b/segutils/segWaterBuilding.py new file mode 100644 index 0000000..f087227 --- /dev/null +++ b/segutils/segWaterBuilding.py @@ -0,0 +1,387 @@ +import torch +from core.models.bisenet import BiSeNet,BiSeNet_MultiOutput +from torchvision import transforms +import cv2,os,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,model = None,weights=None,modelsize=512,device='cuda:3',multiOutput=False): + #self.args = args + self.model = model + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.multiOutput = multiOutput + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + #mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143) + def eval(self,image,outsize=None,smooth_kernel=0): + imageH,imageW,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,test_flag=True,smooth_kernel = 0) + + time2 = time.time() + + if self.multiOutput: + pred = [outputx.data.cpu().numpy()[0] for outputx in output] + else: + pred = output.data.cpu().numpy() + pred = pred[0] + + time3 = time.time() + + if self.multiOutput: + pred = [ cv2.blur(predx,(smooth_kernel,smooth_kernel) ) for predx in pred] + pred = [cv2.resize(predx.astype(np.uint8),(imageW,imageH)) for predx in pred[0:2]] + else: + pred = cv2.blur(pred,(smooth_kernel,smooth_kernel) ) + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH),interpolation = cv2.INTER_NEAREST) + time4 = time.time() + outStr= '##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ) + #print('##line52:pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image = image * 3.2 - 1.6 + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + outStr='###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) + #print('###line84: in preprocess: resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +def test(): + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + model = BiSeNet(nclass) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +label_dic={'landcover':[[0, 0, 0], [255, 0, 0], [0,255,0], [0,0,255], [255,255,0]], + 'deepRoad':[[0,0,0],[255,0,0]], + 'water':[[0,0,0],[255,255,255]], + 'water_building':[[0,0,0],[0,0,255],[255,0,0]], + 'floater':[[0,0,0], [0,255,0],[255,255,0],[255,0,255],[0,128, 255], [255,0,0], [0,255,255] ] + + + + + } + +def index2color(label_mask,label_colours): + r = label_mask.copy() + g = label_mask.copy() + b = label_mask.copy() + label_cnt = len(label_colours) + for ll in range(0, label_cnt): + r[label_mask == ll] = label_colours[ll][0] + g[label_mask == ll] = label_colours[ll][1] + b[label_mask == ll] = label_colours[ll][2] + rgb = np.stack((b, g,r), axis=-1) + return rgb.astype(np.uint8) +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id +def result_merge_sep(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + ishow = 2 + #plt.figure(1);plt.imshow(mask_map); + for index,color in zip(indexes,colors): + mask_binaray = (mask_map == index).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binaray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + cv2.drawContours(image,contours,-1,color[::-1],3) + #plt.figure(ishow);plt.imshow(mask_binaray);ishow+=1 + #plt.show() + return image +def result_merge(image,mask_colors): + #mask_colors=[{ 'mask':mask_map,'index':[1],'color':[255,255,255] }] + for mask_color in mask_colors: + mask_map,indexes,colors = mask_color['mask'], mask_color['index'], mask_color['color'] + mask_binary = (mask_map>0).astype(np.uint8) + contours, hierarchy = cv2.findContours(mask_binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; contours = np.array(contours)[d] + cv2.drawContours(image,contours,-1,colors[0][::-1],3) + + coors = np.array([(np.mean(contours_x ,axis=0)+0.5).astype(np.int32)[0] for contours_x in contours]) + #print(mask_map.shape,coors.shape) + typess = mask_map[ coors[:,1],coors[:,0]] + #for jj,iclass in enumerate(typess): + #print(iclass,colors) + # cv2.drawContours(image,contours,-1, colors[iclass][::-1],3) + + + + return image + +def test_floater(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,7] + outresult=True + weights = 'runs/thFloater/BiSeNet_MultiOutput/train/experiment_4/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/host/workspace/WJ/data/thFloater/val/images/' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:9',multiOutput=True) + + + for i,image_url in enumerate(image_url_list[0:10]) : + image_array0 = cv2.imread(image_url) + image_array0 = cv2.cvtColor(image_array0, cv2.COLOR_BGR2RGB) # cv2默认为bgr顺序 + imageH,imageW,_ = image_array0.shape + #image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + time1=time.time() + + mask_colors=[ { 'mask':pred[0] ,'index':range(1,2),'color':label_dic['water'][0:] }, + { 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } ] + result_draw = result_merge(image_array0,mask_colors) + + + time2=time.time() + + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + + + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) +def test_water_buildings(): + from core.models.bisenet import BiSeNet + #image_url = 'temp_pics/DJI_0645.JPG' + nclass = 3 + outresult=True + weights = 'runs/thWaterBuilding/BiSeNet/train/experiment_2/checkpoint.pth' + model = BiSeNet(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:0',multiOutput=False) + + + for i,image_url in enumerate(image_url_list[0:]) : + #image_url = '/home/thsw/WJ/data/THWaterBuilding/val/images/0anWqgmO9rGe1n8P.png' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None) + + time0=time.time() + if isinstance(pred,list): + binary = [predx.copy() for predx in pred] + #print(binary[0].shape) + time1=time.time() + + + mask_colors=[ { 'mask':pred ,'index':range(1,3),'color':label_dic['water_building'][1:] }, + #{ 'mask':pred[1] ,'index':[1,2,3,4,5,6],'color':label_dic['floater'][0:] } + ] + result_draw = result_merge_sep(image_array0,mask_colors) + + + time2=time.time() + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line294: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +def get_illegal_index(contours,hierarchy,water_dilate,overlap_threshold): + out_index=[] + if len(contours)>0: + d=hierarchy[0,:,3]<0 ; + contours = np.array(contours,dtype=object)[d] + imageH,imageW = water_dilate.shape + for ii,cont in enumerate(contours): + cont = cont.astype(np.int32) + build_area=np.zeros((imageH,imageW )) + try: + cv2.fillPoly(build_area,[cont[:,0,:]],1) + area1=np.sum(build_area);area2=np.sum(build_area*water_dilate) + if (area2/area1) >overlap_threshold: + out_index.append(ii) + except Exception as e: + print('###read error:%s '%(e)) + print(cont.shape,type(cont),cont.dtype) + + + return out_index + +def illBuildings(pred,image_array0): + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred[0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + water = pred[0].copy(); water[:,:] = 0 + if len(contours)==0: + return image_array0,water + max_id = get_largest_contours(contours); + + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + + ##画出水体膨胀后的蓝线区域。 + kernel = np.ones((100,100),np.uint8) + water_dilate = cv2.dilate(water,kernel,iterations = 1) + contours, hierarchy = cv2.findContours(water_dilate,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + #print('####line310:',contours) + cv2.drawContours(image_array0,contours,-1,(255,0,0),3) + + ##确定违法建筑并绘图 + ###逐个建筑判断是否与蓝线内区域有交叉。如果交叉面积占本身面积超过0.1,则认为是违法建筑。 + contours, hierarchy = cv2.findContours(pred[1],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + outIndex=get_illegal_index(contours,hierarchy,water_dilate,0.1) + + for ii in outIndex: + cv2.drawContours(image_array0,contours,ii,(0,0,255),3) + return image_array0,water + +def test_water_building_seperately(): + from core.models.dinknet import DinkNet34_MultiOutput + #create_model('DinkNet34_MultiOutput',[2,5]) + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = [2,2] + outresult=True + weights = 'runs/thWaterBuilding_seperate/BiSeNet_MultiOutput/train/experiment_0/checkpoint.pth' + model = BiSeNet_MultiOutput(nclass) + outdir='temp' + image_dir = '/home/thsw/WJ/data/river_buildings/' + #image_dir = '/home/thsw/WJ/data/THWaterBuilding/val/images' + image_url_list=glob.glob('%s/*'%(image_dir)) + segmodel = SegModel(model=model,nclass=nclass,weights=weights,device='cuda:1',multiOutput=True) + + print('###line307 image cnt:',len(image_url_list)) + for i,image_url in enumerate(image_url_list[0:1]) : + image_url = '/home/thsw/WJ/data/river_buildings/DJI_20210904092044_0001_S_output896.jpg' + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + pred = segmodel.eval(image_array,outsize=None,smooth_kernel=20) + + image_array0,water = illBuildings(pred,image_array0) + + + plt.imshow(image_array0);plt.show() + ## + + time0=time.time() + + time1=time.time() + + + mask_colors=[ { 'mask':pred[0],'index':[1],'color':label_dic['water_building'][1:2]}, + { 'mask':pred[1],'index':[1],'color':label_dic['water_building'][2:3]} + ] + result_draw = result_merge_sep(image_array0,mask_colors) + time2=time.time() + + if outresult: + basename=os.path.splitext( os.path.basename(image_url))[0] + outname=os.path.join(outdir,basename+'_draw.png') + cv2.imwrite(outname,result_draw[:,:,:]) + + print('##line151: time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + +if __name__=='__main__': + #test() + #test_floater() + #test_water_buildings() + test_water_building_seperately() + + + + + + diff --git a/segutils/seg_detect.py b/segutils/seg_detect.py new file mode 100644 index 0000000..84611b7 --- /dev/null +++ b/segutils/seg_detect.py @@ -0,0 +1,132 @@ +import torch +from core.models.bisenet import BiSeNet +from torchvision import transforms +import cv2,os +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt + +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:3'): + #self.args = args + self.model = BiSeNet(nclass) + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + def eval(self,image,outsize=None): + imageW,imageH,imageC = image.shape + time0 = time.time() + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image,outsize=outsize) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) )) + return pred + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + + time1 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + time2 = time.time() + #image -= self.mean + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + time3 = time.time() + #image /= self.std + + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + + + time4 = time.time() + image = np.transpose(image, ( 2, 0, 1)) + time5 = time.time() + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + print('resize:%.1f norm:%.1f mean:%.1f std:%.1f trans:%.f '%(self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3) ,self.get_ms(time5,time4) ) ) + + return image + + + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + +if __name__=='__main__': + + + + #os.environ["CUDA_VISIBLE_DEVICES"] = str('4') + ''' + image_url = '../../data/landcover/corp512/test/images/N-33-139-C-d-2-4_169.jpg' + nclass = 5 + weights = 'runs/landcover/DinkNet34_save/experiment_wj_loss-10-10-1/checkpoint.pth' + ''' + + + image_url = 'temp_pics/DJI_0645.JPG' + nclass = 2 + #weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + weights = 'runs/THriver/BiSeNet/train/experiment_0/checkpoint.pth' + #weights = 'runs/segmentation/BiSeNet_test/experiment_10/checkpoint.pth' + + + + segmodel = SegModel(nclass=nclass,weights=weights,device='cuda:4') + for i in range(10): + image_array0 = cv2.imread(image_url) + imageH,imageW,_ = image_array0.shape + #print('###line84:',image_array0.shape) + image_array = cv2.cvtColor( image_array0,cv2.COLOR_RGB2BGR) + #image_in = segmodel.preprocess_image(image_array) + pred = segmodel.eval(image_array,outsize=None) + time0=time.time() + binary = pred.copy() + time1=time.time() + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print(pred.shape,' time copy:%.1f finccontour:%.1f '%(get_ms(time1,time0),get_ms(time2,time1) )) + + + ##计算findconturs时间与大小的关系 + binary0 = binary.copy() + for ii,ss in enumerate([22,256,512,1024,2048]): + time0=time.time() + image = cv2.resize(binary0,(ss,ss)) + time1=time.time() + if ii ==0: + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + else: + contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + time2=time.time() + print('size:%d resize:%.1f ,findtime:%.1f '%(ss, get_ms(time1,time0),get_ms(time2,time1))) + \ No newline at end of file diff --git a/segutils/segmodel.py b/segutils/segmodel.py new file mode 100644 index 0000000..a992abb --- /dev/null +++ b/segutils/segmodel.py @@ -0,0 +1,140 @@ +import torch +import sys,os +sys.path.extend(['segutils']) +from core.models.bisenet import BiSeNet +from torchvision import transforms +import cv2,glob +import numpy as np +from core.models.dinknet import DinkNet34 +import matplotlib.pyplot as plt +import time +class SegModel(object): + def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0'): + #self.args = args + self.model = BiSeNet(nclass) + #self.model = DinkNet34(nclass) + checkpoint = torch.load(weights) + self.modelsize = modelsize + self.model.load_state_dict(checkpoint['model']) + self.device = device + self.model= self.model.to(self.device) + '''self.composed_transforms = transforms.Compose([ + + transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + transforms.ToTensor()]) ''' + self.mean = (0.335, 0.358, 0.332) + self.std = (0.141, 0.138, 0.143) + def eval(self,image): + time0 = time.time() + imageH,imageW,imageC = image.shape + image = self.preprocess_image(image) + time1 = time.time() + self.model.eval() + image = image.to(self.device) + with torch.no_grad(): + output = self.model(image) + + time2 = time.time() + pred = output.data.cpu().numpy() + pred = np.argmax(pred, axis=1)[0]#得到每行 + time3 = time.time() + pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH)) + time4 = time.time() + outstr= 'pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ) + + #print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) )) + return pred,outstr + def get_ms(self,t1,t0): + return (t1-t0)*1000.0 + def preprocess_image(self,image): + + time0 = time.time() + image = cv2.resize(image,(self.modelsize,self.modelsize)) + time0 = time.time() + image = image.astype(np.float32) + image /= 255.0 + + image[:,:,0] -=self.mean[0] + image[:,:,1] -=self.mean[1] + image[:,:,2] -=self.mean[2] + + image[:,:,0] /= self.std[0] + image[:,:,1] /= self.std[1] + image[:,:,2] /= self.std[2] + image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR) + #image -= self.mean + #image /= self.std + image = np.transpose(image, ( 2, 0, 1)) + + image = torch.from_numpy(image).float() + image = image.unsqueeze(0) + + + return image + +def get_ms(t1,t0): + return (t1-t0)*1000.0 + + +def get_largest_contours(contours): + areas = [cv2.contourArea(x) for x in contours] + max_area = max(areas) + max_id = areas.index(max_area) + + return max_id + +if __name__=='__main__': + image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG' + nclass = 2 + weights = '../weights/segmentation/BiSeNet/checkpoint.pth' + + segmodel = SegModel(nclass=nclass,weights=weights) + + image_urls=glob.glob('/home/thsw2/WJ/data/THexit/val/images/*') + out_dir ='../runs/detect/exp2-seg';os.makedirs(out_dir,exist_ok=True) + for image_url in image_urls[0:1]: + image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG' + image_array0 = cv2.imread(image_url) + pred = segmodel.eval(image_array0 ) + + #plt.figure(1);plt.imshow(pred); + #plt.show() + binary0 = pred.copy() + + + time0 = time.time() + contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + max_id = -1 + if len(contours)>0: + max_id = get_largest_contours(contours) + binary0[:,:] = 0 + print(contours[0].shape,contours[1].shape,contours[0]) + cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1) + + time1 = time.time() + + #num_labels,_,Areastats,centroids = cv2.connectedComponentsWithStats(binary0,connectivity=4) + time2 = time.time() + + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + time3 = time.time() + out_url='%s/%s'%(out_dir,os.path.basename(image_url)) + ret = cv2.imwrite(out_url,image_array0) + time4 = time.time() + + print('image:%s findcontours:%.1f ms , connect:%.1f ms ,draw:%.1f save:%.1f'%(os.path.basename(image_url),get_ms(time1,time0),get_ms(time2,time1), get_ms(time3,time2),get_ms(time4,time3), ) ) + plt.figure(0);plt.imshow(pred) + plt.figure(1);plt.imshow(image_array0) + plt.figure(2);plt.imshow(binary0) + plt.show() + + #print(out_url,ret) + + + + + + + + + diff --git a/utils/SendLog/platformQueryOfftask.json b/utils/SendLog/platformQueryOfftask.json new file mode 100644 index 0000000..70aadd1 --- /dev/null +++ b/utils/SendLog/platformQueryOfftask.json @@ -0,0 +1 @@ +{"code": 0, "msg": "操作成功", "data": []} \ No newline at end of file diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..f00fc87 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/__pycache__/autoanchor.cpython-38.pyc b/utils/__pycache__/autoanchor.cpython-38.pyc new file mode 100644 index 0000000..3079d9b Binary files /dev/null and b/utils/__pycache__/autoanchor.cpython-38.pyc differ diff --git a/utils/__pycache__/datasets.cpython-38.pyc b/utils/__pycache__/datasets.cpython-38.pyc new file mode 100644 index 0000000..4b24677 Binary files /dev/null and b/utils/__pycache__/datasets.cpython-38.pyc differ diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc new file mode 100644 index 0000000..697d749 Binary files /dev/null and b/utils/__pycache__/general.cpython-38.pyc differ diff --git a/utils/__pycache__/get_offline_url.cpython-38.pyc b/utils/__pycache__/get_offline_url.cpython-38.pyc new file mode 100644 index 0000000..844c185 Binary files /dev/null and b/utils/__pycache__/get_offline_url.cpython-38.pyc differ diff --git a/utils/__pycache__/google_utils.cpython-38.pyc b/utils/__pycache__/google_utils.cpython-38.pyc new file mode 100644 index 0000000..a303e7f Binary files /dev/null and b/utils/__pycache__/google_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/loss.cpython-38.pyc b/utils/__pycache__/loss.cpython-38.pyc new file mode 100644 index 0000000..ac552e4 Binary files /dev/null and b/utils/__pycache__/loss.cpython-38.pyc differ diff --git a/utils/__pycache__/metrics.cpython-38.pyc b/utils/__pycache__/metrics.cpython-38.pyc new file mode 100644 index 0000000..eb247a3 Binary files /dev/null and b/utils/__pycache__/metrics.cpython-38.pyc differ diff --git a/utils/__pycache__/plots.cpython-38.pyc b/utils/__pycache__/plots.cpython-38.pyc new file mode 100644 index 0000000..ac17153 Binary files /dev/null and b/utils/__pycache__/plots.cpython-38.pyc differ diff --git a/utils/__pycache__/torch_utils.cpython-38.pyc b/utils/__pycache__/torch_utils.cpython-38.pyc new file mode 100644 index 0000000..060c4d7 Binary files /dev/null and b/utils/__pycache__/torch_utils.cpython-38.pyc differ diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000..aa3ddf0 --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..5777746 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,160 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 0000000..faad8d2 --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 0000000..890606b --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/utils/datasets.py b/utils/datasets.py new file mode 100644 index 0000000..ed44569 --- /dev/null +++ b/utils/datasets.py @@ -0,0 +1,1074 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): # index, source + assert i==0 + # Start thread to read frames from video stream + print(f'{i + 1}/{n}: {s}... ', end='') + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.cap = cap + _, self.imgs[i] = cap.read() # guarantee first frame + + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') + + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + frames=2 + # Read next stream frame in a daemon thread + n = 0 + iframe=0 + while cap.isOpened(): + n += 1 + _, self.imgs[index] = cap.read() + iframe +=1 + '''cap.grab() + if n == frames: # read every 4th frame + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 + n = 0''' + #print('###sleep:%.1f ms ,index:%d ,n:%d, iframe:%d'%(1/self.fps*1000,index,n,iframe) ) + #time.sleep(1 / self.fps) # wait time + return self.imgs + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + #img0 = self.imgs.copy() + + img0 = self.update(0,self.cap).copy() + if not isinstance(img0[0],np.ndarray): + #print('####video stream :%s error or video ends#####',self.sources) + return False, None, None, None + #if cv2.waitKey(1) == ord('q'): # q to quit + # cv2.destroyAllWindows() + # raise StopIteration + + # Letterbox + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.close() + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit('../coco128') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md new file mode 100644 index 0000000..0cdc51b --- /dev/null +++ b/utils/flask_rest_api/README.md @@ -0,0 +1,51 @@ +# Flask REST API +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +``` + +The model inference results are returned: + +```shell +[{'class': 0, + 'confidence': 0.8197850585, + 'name': 'person', + 'xmax': 1159.1403808594, + 'xmin': 750.912902832, + 'ymax': 711.2583007812, + 'ymin': 44.0350036621}, + {'class': 0, + 'confidence': 0.5667674541, + 'name': 'person', + 'xmax': 1065.5523681641, + 'xmin': 116.0448303223, + 'ymax': 713.8904418945, + 'ymin': 198.4603881836}, + {'class': 27, + 'confidence': 0.5661227107, + 'name': 'tie', + 'xmax': 516.7975463867, + 'xmin': 416.6880187988, + 'ymax': 717.0524902344, + 'ymin': 429.2020568848}] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py new file mode 100644 index 0000000..ff21f30 --- /dev/null +++ b/utils/flask_rest_api/example_request.py @@ -0,0 +1,13 @@ +"""Perform test request""" +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +TEST_IMAGE = "zidane.jpg" + +image_data = open(TEST_IMAGE, "rb").read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py new file mode 100644 index 0000000..9d88f61 --- /dev/null +++ b/utils/flask_rest_api/restapi.py @@ -0,0 +1,38 @@ +""" +Run a rest API exposing the yolov5s object detection model +""" +import argparse +import io + +import torch +from PIL import Image +from flask import Flask, request + +app = Flask(__name__) + +DETECTION_URL = "/v1/object-detection/yolov5s" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(): + if not request.method == "POST": + return + + if request.files.get("image"): + image_file = request.files["image"] + image_bytes = image_file.read() + + img = Image.open(io.BytesIO(image_bytes)) + + results = model(img, size=640) + data = results.pandas().xyxy[0].to_json(orient="records") + return data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + args = parser.parse_args() + + model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache + app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000..ac3a698 --- /dev/null +++ b/utils/general.py @@ -0,0 +1,604 @@ +# YOLOv5 general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") + print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/utils/get_offline_url.py b/utils/get_offline_url.py new file mode 100644 index 0000000..934e367 --- /dev/null +++ b/utils/get_offline_url.py @@ -0,0 +1,106 @@ +from PIL import Image +import numpy as np +import cv2 +import base64 +import io,os +import requests +import time,json +import string,random +import glob +##for CeKanYuan +def get_offlineUrls(taskUrl,offlineFile,jsonfile='SendLog/platformQueryOfftask.json'): + with open(offlineFile,'r') as fp: + lines=fp.readlines() + doneCodes=[line.strip().split(' ')[2] for line in lines] + try: + res = requests.get(taskUrl,timeout=10).json() + offlines=res['data'] ##offlines[0]['code'],offlines[0]['videoUrl'] + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###line43 %s read taskUrl:%s error:%s '%(timestr,taskUrl,ee)) + offlines=[] + outOfflines=[] + for off in offlines: + off['port']=1935; + off.update({'name':'off-' +off.pop("code")}) + if off['name'] in doneCodes: + continue + off.update({'url': off.pop("videoUrl")}) + outOfflines.append(off) + #off['url']=off['videoUrl'] + + return outOfflines +def platurlToJsonfile(taskUrl,jsonfile='SendLog/platformQuery.json'): + try: + res = requests.get(taskUrl,timeout=10).json() + offlines=res['data'] ##offlines[0]['code'],offlines[0]['videoUrl'] + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###line43 %s read taskUrl:%s error:%s '%(timestr,taskUrl,ee)) + +def get_websource_fromTxt(txtfile): + with open(txtfile,'r') as fp: + lines = fp.readlines() + sources=[ ] + for line in lines: + sous={} + try: + sps = line.strip().split(' ') + sous['url']=sps[0];sous['port']=sps[1] + + #webs.append(sps[0]) + if 'rtmp' in sps[0]: + name = sps[0].split('/')[4] + + else: + name = sps[0][-3:] + sous['name']='live-'+name.replace('_','') + sous['port']=sps[1] + sources.append(sous) + except Exception as ee: + + print('####format error : %s, line:%s , in file:%s#####'%(ee,line,txtfile)) + assert len(sources)>0 + return sources + +def update_websource_offAndLive(platform_query_url,sourceFile,offlineFile,jsonfile='SendLog/platformQuery.json'): + + #platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo' + txtSource=get_websource_fromTxt(sourceFile) + try: + res = requests.get(platform_query_url,timeout=10).json() + questionUrl = res['data']['questionUrl'] ###直播流时,问题图片的推送地址 + offlineUrl = res['data']['offlineUrl'] ###http离线视频时,问题图片的推送地址 + taskUrl = res['data']['taskUrl'] ###http离线视频时,离线视频存放的地址 + with open(jsonfile,'w') as fp: + json.dump(res,fp, ensure_ascii=False) + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('######line83: %s: file:geturlPlatform: error %s ,url:%s #####'%(timestr,ee,platform_query_url)) + taskUrl='http://121.40.249.52:9050/api/analyse/getAiInspectionList' + if taskUrl: + offlines=get_offlineUrls(taskUrl,offlineFile) + txtSource.extend(offlines) + #[{'url': 'rtmp://demoplay.yunhengzhizao.cn/live/THSA_HD5M', 'port': '1935', 'name': 'live-THSAHD5M'}] + outlist=[] + for sourss in txtSource : + source_url = sourss['url'] + vid = cv2.VideoCapture(source_url) + if vid.isOpened(): + outlist.append( sourss ) + return outlist + #print('##line65:',txtSource) + + + +if __name__=='__main__': + platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo' + sourceFile='../config/source.txt' + offlineFile='../mintors/offlines/doneCodes.txt' + jsonfile='../SendLog/platformQuery.json' + txtSource=update_websource_offAndLive(platform_query_url,sourceFile,offlineFile,jsonfile=jsonfile) + print(txtSource) diff --git a/utils/google_app_engine/Dockerfile b/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..5fcc305 --- /dev/null +++ b/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==18.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..ac29d10 --- /dev/null +++ b/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/utils/google_utils.py b/utils/google_utils.py new file mode 100644 index 0000000..0a7ca3b --- /dev/null +++ b/utils/google_utils.py @@ -0,0 +1,122 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='ultralytics/yolov5'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000..9e78df1 --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,216 @@ +# Loss functions + +import torch +import torch.nn as nn + +from utils.general import bbox_iou +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLoss, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..666b8c7 --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,223 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) diff --git a/utils/platech.ttf b/utils/platech.ttf new file mode 100644 index 0000000..d66a970 Binary files /dev/null and b/utils/platech.ttf differ diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..f16dac2 --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,575 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont +from scipy.signal import butter, filtfilt,savgol_filter + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +#matplotlib.use('Agg') # for writing to files only + +def smooth_outline(contours,p1,p2): + arcontours=np.array(contours) + coors_x=arcontours[0,:,0,0] + coors_y=arcontours[0,:,0,1] + coors_x_smooth= savgol_filter(coors_x,p1,p2) + coors_y_smooth= savgol_filter(coors_y,p1,p2) + arcontours[0,:,0,0] = coors_x_smooth + arcontours[0,:,0,1] = coors_y_smooth + return arcontours +def smooth_outline_auto(contours): + cnt = len(contours[0]) + p1 = int(cnt/12)*2+1 + p2 =3 + if p10 + return webs,ports,streamNames + + +def get_label_array( color=None, label=None,outfontsize=None,fontpath="conf/platech.ttf"): + + # Plots one bounding box on image 'im' using PIL + fontsize = 48 + font = ImageFont.truetype(fontpath, fontsize,encoding='utf-8') + + txt_width, txt_height = font.getsize(label) + im = np.zeros((txt_height,txt_width,3),dtype=np.uint8) + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + draw.rectangle([0, 0 , txt_width, txt_height ], fill=tuple(color)) + draw.text(( 0 , -3 ), label, fill=(255, 255, 255), font=font) + im_array = np.asarray(im) + + if outfontsize: + scaley = outfontsize / txt_height + im_array= cv2.resize(im_array,(0,0),fx = scaley ,fy =scaley) + return im_array +def get_label_arrays(labelnames,colors,outfontsize=40,fontpath="conf/platech.ttf"): + label_arraylist = [] + if len(labelnames) > len(colors): + print('#####labelnames cnt > colors cnt#####') + for ii,labelname in enumerate(labelnames): + + color = colors[ii%20] + label_arraylist.append(get_label_array(color=color,label=labelname,outfontsize=40,fontpath=fontpath)) + + return label_arraylist +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + + '''image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + pil_image = Image.fromarray(image) + draw = ImageDraw.Draw(pil_image) + font = ImageFont.truetype('./font/platech.ttf', 40, encoding='utf-8') + for info in infos: + detect = info['bndbox'] + text = ','.join(list(info['attributes'].values())) + temp = -50 + if info['name'] == 'vehicle': + temp = 20 + draw.text((detect[0], detect[1] + temp), text, (0, 255, 255), font=font) + if 'scores' in info: + draw.text((detect[0], detect[3]), info['scores'], (0, 255, 0), font=font) + if 'pscore' in info: + draw.text((detect[2], detect[3]), str(round(info['pscore'],3)), (0, 255, 0), font=font) + image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR) + for info in infos: + detect = info['bndbox'] + cv2.rectangle(image, (detect[0], detect[1]), (detect[2], detect[3]), (0, 255, 0), 1, cv2.LINE_AA) + return image''' + +'''def plot_one_box_PIL(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + + + + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + font = ImageFont.truetype('./font/platech.ttf', t_size, encoding='utf-8') + draw.text((c1[0], c1[1] - 2), label, (0, 255, 0), font=font) + + #cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + return np.array(im) ''' + +def plot_one_box(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + print('##line149:',box) + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + + if label: + fontsize = max(round(max(im.size) / 40), 12) + font = ImageFont.truetype("platech.ttf", fontsize,encoding='utf-8') + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + im_array = np.asarray(im) + + return np.asarray(im) + +def draw_painting_joint(box,img,label_array,score=0.5,color=None,line_thickness=None): + box = [int(xx.cpu()) for xx in box] + lh, lw, lc = label_array.shape + imh, imw, imc = img.shape + x0 , y1 = box[0:2] + x1 , y0 = x0 + lw , y1 - lh + y0 = max(0,y0) ; y1 = y0 + lh + x1 = min(imw, x1); x0 = x1 - lw + img[y0:y1,x0:x1,:] = label_array + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + label = ' %.2f'%(score) + tf = max(tl - 1, 1) # font thickness + fontScale = tl * 0.33 + t_size = cv2.getTextSize(label, 0, fontScale=fontScale , thickness=tf)[0] + c2 = c1[0]+ lw + t_size[0], c1[1] - lh + cv2.rectangle(img, (int(box[0])+lw,int(box[1])) , c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0]+lw, c1[1] - (lh-t_size[1])//2 ), 0, fontScale, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + return img + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..9991e5e --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,303 @@ +# YOLOv5 PyTorch utils + +import datetime +import logging +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError as e: + return '' # not a git repository + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/wandb_logging/__init__.py b/utils/wandb_logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/wandb_logging/__pycache__/__init__.cpython-38.pyc b/utils/wandb_logging/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..71491cf Binary files /dev/null and b/utils/wandb_logging/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/wandb_logging/__pycache__/wandb_utils.cpython-38.pyc b/utils/wandb_logging/__pycache__/wandb_utils.cpython-38.pyc new file mode 100644 index 0000000..9d3e648 Binary files /dev/null and b/utils/wandb_logging/__pycache__/wandb_utils.cpython-38.pyc differ diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py new file mode 100644 index 0000000..d7a521f --- /dev/null +++ b/utils/wandb_logging/log_dataset.py @@ -0,0 +1,24 @@ +import argparse + +import yaml + +from wandb_utils import WandbLogger + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py new file mode 100644 index 0000000..d8f50ae --- /dev/null +++ b/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,306 @@ +import json +import sys +from pathlib import Path + +import torch +import yaml +from tqdm import tqdm + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset + +try: + import wandb + from wandb import init, finish +except ImportError: + wandb = None + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + else: + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict + + def setup_training(self, opt, data_dict): + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict + + def download_dataset_artifact(self, path, alias): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + if self.wandb_run: + wandb.log(self.log_dict) + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + if self.wandb_run: + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish() diff --git a/utilsK/GPUtils.py b/utilsK/GPUtils.py new file mode 100644 index 0000000..bcbafff --- /dev/null +++ b/utilsK/GPUtils.py @@ -0,0 +1,501 @@ +#@@ -1,43 +1,43 @@ +# GPUtil - GPU utilization +# +# A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi +# +# Author: Anders Krogh Mortensen (anderskm) +# Date: 16 January 2017 +# Web: https://github.com/anderskm/gputil +# +# LICENSE +# +# MIT License +# +# Copyright (c) 2017 anderskm +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from subprocess import Popen, PIPE +from distutils import spawn +import os +import math +import random +import time +import sys +import platform +import subprocess +import numpy as np + + +__version__ = '1.4.0' +class GPU: + def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu): + self.id = ID + self.uuid = uuid + self.load = load + self.memoryUtil = float(memoryUsed)/float(memoryTotal) + self.memoryTotal = memoryTotal + self.memoryUsed = memoryUsed + self.memoryFree = memoryFree + self.driver = driver + self.name = gpu_name + self.serial = serial + self.display_mode = display_mode + self.display_active = display_active + self.temperature = temp_gpu + + def __str__(self): + return str(self.__dict__) + + +class GPUProcess: + def __init__(self, pid, processName, gpuId, gpuUuid, gpuName, usedMemory, + uid, uname): + self.pid = pid + self.processName = processName + self.gpuId = gpuId + self.gpuUuid = gpuUuid + self.gpuName = gpuName + self.usedMemory = usedMemory + self.uid = uid + self.uname = uname + + def __str__(self): + return str(self.__dict__) + +def safeFloatCast(strNumber): + try: + number = float(strNumber) + except ValueError: + number = float('nan') + return number + +#def getGPUs(): +def getNvidiaSmiCmd(): + if platform.system() == "Windows": + # If the platform is Windows and nvidia-smi + # could not be found from the environment path, + #@@ -75,57 +94,97 @@ def getGPUs(): + nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive'] + else: + nvidia_smi = "nvidia-smi" + return nvidia_smi + + +def getGPUs(): + # Get ID, processing and memory utilization for all GPUs + nvidia_smi = getNvidiaSmiCmd() + try: + p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE) + stdout, stderror = p.communicate() + p = subprocess.run([ + nvidia_smi, + "--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout;#output = stdout.decode('UTF-8') + # output = output[2:-1] # Remove b' and ' from string added by python + #print(output) + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + #print(lines) + numDevices = len(lines)-1 + GPUs = [] + for g in range(numDevices): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + for i in range(12): + # print(vals[i]) + if (i == 0): + deviceIds = int(vals[i]) + elif (i == 1): + uuid = vals[i] + elif (i == 2): + gpuUtil = safeFloatCast(vals[i])/100 + elif (i == 3): + memTotal = safeFloatCast(vals[i]) + elif (i == 4): + memUsed = safeFloatCast(vals[i]) + elif (i == 5): + memFree = safeFloatCast(vals[i]) + elif (i == 6): + driver = vals[i] + elif (i == 7): + gpu_name = vals[i] + elif (i == 8): + serial = vals[i] + elif (i == 9): + display_active = vals[i] + elif (i == 10): + display_mode = vals[i] + elif (i == 11): + temp_gpu = safeFloatCast(vals[i]); + deviceIds = int(vals[0]) + uuid = vals[1] + gpuUtil = safeFloatCast(vals[2]) / 100 + memTotal = safeFloatCast(vals[3]) + memUsed = safeFloatCast(vals[4]) + memFree = safeFloatCast(vals[5]) + driver = vals[6] + gpu_name = vals[7] + serial = vals[8] + display_active = vals[9] + display_mode = vals[10] + temp_gpu = safeFloatCast(vals[11]); + GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu)) + return GPUs # (deviceIds, gpuUtil, memUtil) + + + +def getGPUProcesses(): + """Get all gpu compute processes.""" + + global gpuUuidToIdMap + gpuUuidToIdMap = {} + try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus + except: + pass + + + nvidia_smi = getNvidiaSmiCmd() + try: + p = subprocess.run([ + nvidia_smi, + "--query-compute-apps=pid,process_name,gpu_uuid,gpu_name,used_memory", + "--format=csv,noheader,nounits" + ], stdout=subprocess.PIPE, encoding='utf8') + stdout, stderror = p.stdout, p.stderr + except: + return [] + output = stdout + ## Parse output + # Split on line break + lines = output.split(os.linesep) + numProcesses = len(lines) - 1 + processes = [] + for g in range(numProcesses): + line = lines[g] + #print(line) + vals = line.split(', ') + #print(vals) + pid = int(vals[0]) + processName = vals[1] + gpuUuid = vals[2] + gpuName = vals[3] + usedMemory = safeFloatCast(vals[4]) + gpuId = gpuUuidToIdMap[gpuUuid] + if gpuId is None: + gpuId = -1 + + # get uid and uname owner of the pid + try: + p = subprocess.run(['ps', f'-p{pid}', '-oruid=,ruser='], + stdout=subprocess.PIPE, encoding='utf8') + uid, uname = p.stdout.split() + uid = int(uid) + except: + uid, uname = -1, '' + + processes.append(GPUProcess(pid, processName, gpuId, gpuUuid, + gpuName, usedMemory, uid, uname)) + return processes + + +def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # order = first | last | random | load | memory + # first --> select the GPU with the lowest ID (DEFAULT) + # last --> select the GPU with the highest ID + # random --> select a random available GPU + # load --> select the GPU with the lowest load + # memory --> select the GPU with the most memory available + # limit = 1 (DEFAULT), 2, ..., Inf + # Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned. + # Get device IDs, load and memory usage + GPUs = getGPUs() + # Determine, which GPUs are available + GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)] + # Discard unavailable GPUs + GPUs = [GPUs[g] for g in availAbleGPUindex] + # Sort available GPUs according to the order argument + if (order == 'first'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False) + elif (order == 'last'): + GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True) + elif (order == 'random'): + GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))] + elif (order == 'load'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False) + elif (order == 'memory'): + GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False) + # Extract the number of desired GPUs, but limited to the total number of available GPUs + GPUs = GPUs[0:min(limit, len(GPUs))] + # Extract the device IDs from the GPUs and return them + deviceIds = [gpu.id for gpu in GPUs] + return deviceIds +#def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False): +# # Determine, which GPUs are available +# GPUavailability = np.zeros(len(GPUs)) +# for i in range(len(GPUs)): +# if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))): +# GPUavailability[i] = 1 +def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]): + # Determine, which GPUs are available + GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs] + return GPUavailability +def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]): + #GPUs = getGPUs() + #firstAvailableGPU = np.NaN + #for i in range(len(GPUs)): + # if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory): + # firstAvailableGPU = GPUs[i].id + # break + #return firstAvailableGPU + for i in range(attempts): + if (verbose): + print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.') + # Get first available GPU + available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID) + # If an available GPU was found, break for loop. + if (available): + if (verbose): + print('GPU ' + str(available) + ' located!') + break + # If this is not the last attempt, sleep for 'interval' seconds + if (i != attempts-1): + time.sleep(interval) + # Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found + if (not(available)): + raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.') + # Return found GPU + return available +def showUtilization(all=False, attrList=None, useOldCode=False): + GPUs = getGPUs() + if (all): + if (useOldCode): + print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |') + print('------------------------------------------------------------------------------------------------------------------------------') + for gpu in GPUs: + print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'name','name':'Name'}, + {'attr':'serial','name':'Serial'}, + {'attr':'uuid','name':'UUID'}], + [{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0}, + {'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}], + [{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0}, + {'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0}, + {'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}], + [{'attr':'display_mode','name':'Display mode'}, + {'attr':'display_active','name':'Display active'}]] + + else: + if (useOldCode): + print(' ID GPU MEM') + print('--------------') + for gpu in GPUs: + print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100)) + else: + attrList = [[{'attr':'id','name':'ID'}, + {'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0}, + {'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}], + ] + + if (not useOldCode): + if (attrList is not None): + headerString = '' + GPUstrings = ['']*len(GPUs) + for attrGroup in attrList: + #print(attrGroup) + for attrDict in attrGroup: + headerString = headerString + '| ' + attrDict['name'] + ' ' + headerWidth = len(attrDict['name']) + minWidth = len(attrDict['name']) + + attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else '' + attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else '' + attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x + for gpu in GPUs: + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:' + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:d}').format(attr) + elif (isinstance(attr,str)): + attrStr = attr; + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = attr.encode('ascii','ignore') + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + minWidth = max(minWidth,len(attrStr)) + + headerString += ' '*max(0,minWidth-headerWidth) + + minWidthStr = str(minWidth - len(attrSuffix)) + + for gpuIdx,gpu in enumerate(GPUs): + attr = getattr(gpu,attrDict['attr']) + + attr = attrTransform(attr) + + if (isinstance(attr,float)): + attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr) + elif (isinstance(attr,int)): + attrStr = ('{0:' + minWidthStr + 'd}').format(attr) + elif (isinstance(attr,str)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr); + elif (sys.version_info[0] == 2): + if (isinstance(attr,unicode)): + attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore')) + else: + raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'') + + attrStr += attrSuffix + + GPUstrings[gpuIdx] += '| ' + attrStr + ' ' + + headerString = headerString + '|' + for gpuIdx,gpu in enumerate(GPUs): + GPUstrings[gpuIdx] += '|' + + headerSpacingString = '-' * len(headerString) + print(headerString) + print(headerSpacingString) + for GPUstring in GPUstrings: + print(GPUstring) + + +# Generate gpu uuid to id map +gpuUuidToIdMap = {} +try: + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + del gpus +except: + pass +def getGPUInfos(): + ###返回gpus:list,一个GPU为一个元素-对象 + ###########:有属性,'id','load','memoryFree', + ###########:'memoryTotal','memoryUsed','memoryUtil','name','serial''temperature','uuid',process + ###其中process:每一个计算进程是一个元素--对象 + ############:有属性,'gpuId','gpuName','gpuUuid', + ############:'gpuid','pid','processName','uid', 'uname','usedMemory' + gpus = getGPUs() + gpuUuidToIdMap={} + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + gpu.process=[] + indexx = [x.id for x in gpus ] + + process = getGPUProcesses() + for pre in process: + pre.gpuid = gpuUuidToIdMap[pre.gpuUuid] + gpuId = indexx.index(pre.gpuid ) + gpus[gpuId].process.append(pre ) + return gpus + +def get_available_gpu(gpuStatus): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + cuda=None + for gpus in gpuStatus: + if len(gpus.process) == 0: + cuda = gpus.id + return str(cuda) + return cuda +def get_whether_gpuProcess(): + ##判断是否有空闲的显卡,如果有返回id,没有返回None + gpuStatus=getGPUInfos() + gpuProcess=True + for gpus in gpuStatus: + if len(gpus.process) != 0: + gpuProcess = False + return gpuProcess + +def get_offlineProcess_gpu(gpuStatus,pidInfos): + gpu_onLine = [] + for gpu in gpuStatus: + for gpuProcess in gpu.process: + pid = gpuProcess.pid + if pid in pidInfos.keys(): + pidType = pidInfos[pid]['type'] + if pidType == 'onLine': + gpu_onLine.append(gpu) + gpu_offLine = set(gpuStatus) - set(gpu_onLine) + return list(gpu_offLine) +def arrange_offlineProcess(gpuStatus,pidInfos,modelMemory=1500): + cudaArrange=[] + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + for gpu in gpu_offLine: + leftMemory = gpu.memoryTotal*0.9 - gpu.memoryUsed + modelCnt = int(leftMemory// modelMemory) + + cudaArrange.extend( [gpu.id] * modelCnt ) + return cudaArrange +def get_potential_gpu(gpuStatus,pidInfos): + ###所有GPU上都有计算。需要为“在线任务”空出一块显卡。 + ###step1:查看所有显卡上是否有“在线任务” + + gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos) + if len(gpu_offLine) == 0 : + return False + + ###step2,找出每张显卡上离线进程的数目 + offLineCnt = [ len(gpu.process) for gpu in gpu_offLine ] + minCntIndex =offLineCnt.index( min(offLineCnt)) + + pids = [x.pid for x in gpu_offLine[minCntIndex].process] + return {'cuda':gpu_offLine[minCntIndex].id,'pids':pids } +if __name__=='__main__': + #pres = getGPUProcesses() + #print('###line404:',pres) + gpus = getGPUs() + for gpu in gpus: + gpuUuidToIdMap[gpu.uuid] = gpu.id + print(gpu) + print(gpuUuidToIdMap) + pres = getGPUProcesses() + print('###line404:',pres) + for pre in pres: + print('#'*20) + for ken in ['gpuName','gpuUuid','pid','processName','uid','uname','usedMemory' ]: + print(ken,' ',pre.__getattribute__(ken )) + print(' ') + + diff --git a/utilsK/__pycache__/GPUtils.cpython-38.pyc b/utilsK/__pycache__/GPUtils.cpython-38.pyc new file mode 100644 index 0000000..2c63946 Binary files /dev/null and b/utilsK/__pycache__/GPUtils.cpython-38.pyc differ diff --git a/utilsK/__pycache__/maskUtils.cpython-38.pyc b/utilsK/__pycache__/maskUtils.cpython-38.pyc new file mode 100644 index 0000000..f88b60a Binary files /dev/null and b/utilsK/__pycache__/maskUtils.cpython-38.pyc differ diff --git a/utilsK/__pycache__/masterUtils.cpython-38.pyc b/utilsK/__pycache__/masterUtils.cpython-38.pyc new file mode 100644 index 0000000..59098a6 Binary files /dev/null and b/utilsK/__pycache__/masterUtils.cpython-38.pyc differ diff --git a/utilsK/__pycache__/modelEval.cpython-38.pyc b/utilsK/__pycache__/modelEval.cpython-38.pyc new file mode 100644 index 0000000..720bfcb Binary files /dev/null and b/utilsK/__pycache__/modelEval.cpython-38.pyc differ diff --git a/utilsK/__pycache__/queRiver.cpython-38.pyc b/utilsK/__pycache__/queRiver.cpython-38.pyc new file mode 100644 index 0000000..0e36f87 Binary files /dev/null and b/utilsK/__pycache__/queRiver.cpython-38.pyc differ diff --git a/utilsK/__pycache__/sendUtils.cpython-38.pyc b/utilsK/__pycache__/sendUtils.cpython-38.pyc new file mode 100644 index 0000000..1724baa Binary files /dev/null and b/utilsK/__pycache__/sendUtils.cpython-38.pyc differ diff --git a/utilsK/masterUtils.py b/utilsK/masterUtils.py new file mode 100644 index 0000000..236c64d --- /dev/null +++ b/utilsK/masterUtils.py @@ -0,0 +1,303 @@ +from kafka import KafkaProducer, KafkaConsumer,TopicPartition +from kafka.errors import kafka_errors +import os,cv2,sys,json,time +import numpy as np +import requests +def query_channel_status(channelIndex): + channel_query_api='https://streaming.t-aaron.com/livechannel/getLiveStatus/%s'%(channelIndex) + #https://streaming.t-aaron.com/livechannel/getLiveStatus/LC001 + try: + res = requests.get(channel_query_api,timeout=10).json() + if res['data']['status']==2:#1空闲中 2使用中 3停用 4待关闭 + taskEnd=False + else: + taskEnd=True + infos='channel_query_api connected' + except Exception as e: + taskEnd=True + infos='channel_query_api not connected:%s'%(e) + return infos, taskEnd + +def query_request_status(request_url): + #channel_query_api='https://streaming.t-aaron.com/livechannel/getLiveStatus/%s'%(channelIndex) + channel_request_api=request_url + + try: + res = requests.get(channel_request_api,timeout=10).json() + if res['data']['status']==5:#5:执行中 10:待停止分析 15:执行结束 + taskEnd=False + else: + taskEnd=True + infos='channel_request_api connected' + except Exception as e: + taskEnd=True + infos='channel_request_api not connected:%s'%(e) + return infos, taskEnd + +def get_needed_objectsIndex(object_config): + needed_objectsIndex=[] + + for model in object_config: + try: + needed_objectsIndex.append(int(model['id'])) + except Exception as e: + a=1 + allowedList_str=[str(x) for x in needed_objectsIndex] + allowedList_string=','.join(allowedList_str) + + return needed_objectsIndex , allowedList_string + + +def get_infos(taskId, msgId,msg_h,key_str='waiting stream or video, send heartbeat'): + outStrList={} + outStrList['success']= '%s, taskId:%s msgId:%s send:%s'%(key_str,taskId, msgId,msg_h); + outStrList['failure']='kafka ERROR, %s'%(key_str) + outStrList['Refailure']='kafka Re-send ERROR ,%s'%(key_str) + return outStrList +def writeTxtEndFlag(outImaDir,streamName,imageTxtFile,endFlag='结束'): +#time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + EndUrl='%s/%s_frame-9999-9999_type-%s_9999999999999999_s-%s_AI.jpg'%(outImaDir,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),endFlag,streamName) + EndUrl = EndUrl.replace(' ','-').replace(':','-') + img_end=np.zeros((100,100),dtype=np.uint8);cv2.imwrite(EndUrl,img_end) + if imageTxtFile: + EndUrl_txt = EndUrl.replace('.jpg','.txt') + fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close() + + EndUrl='%s/%s_frame-9999-9999_type-%s_9999999999999999_s-%s_OR.jpg'%(outImaDir,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),endFlag,streamName) + EndUrl = EndUrl.replace(' ','-').replace(':','-') + ret = cv2.imwrite(EndUrl,img_end) + if imageTxtFile: + EndUrl_txt = EndUrl.replace('.jpg','.txt') + fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close() +def get_current_time(): + """[summary] 获取当前时间 + + [description] 用time.localtime()+time.strftime()实现 + :returns: [description] 返回str类型 + """ + ct = time.time() + local_time = time.localtime(ct) + data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time) + data_secs = (ct - int(ct)) * 1000 + time_stamp = "%s.%03d" % (data_head, data_secs) + return time_stamp + + + +def send_kafka(producer,par,msg,outStrList,fp_log,logger,line='000',thread='detector',printFlag=False ): + future = producer.send(par['topic'], msg) + try: + record_metadata = future.get() + outstr=outStrList['success'] + + #outstr=wrtiteLog(fp_log,outstr);print( outstr); + writeELK_log(outstr,fp_log,level='INFO',thread=thread,line=line,logger=logger,printFlag=printFlag) + + except Exception as e: + outstr='%s , warning: %s'%( outStrList['failure'],str(e)) + writeELK_log(outstr,fp_log,level='WARNING',thread=thread,line=line,logger=logger,printFlag=printFlag) + try: + producer.close() + producer = KafkaProducer(bootstrap_servers=par['server'], value_serializer=lambda v: v.encode('utf-8')).get() + future = producer.send(par['topic'], msg).get() + except Exception as e: + outstr='%s, error: %s'%( outStrList['Refailure'],str(e)) + #outstr=wrtiteLog(fp_log,outstr);print( outstr); + writeELK_log(outstr,fp_log,level='ERROR',thread=thread,line=line,logger=logger,printFlag=printFlag) + +def check_time_interval(time0_beg,time_interval): + time_2 = time.time() + if time_2 - time0_beg>time_interval: + return time_2,True + else: + return time0_beg,False +def addTime(strs): + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + + outstr='\n %s %s '%(timestr,strs) + return + + +def get_file(): + print("文件名 :",__file__,sys._getframe().f_lineno) + print("函数名: ", sys._getframe().f_code.co_name) + print("模块名: ", sys._getframe().f_back.f_code.co_name) + +def writeELK_log(msg,fp,level='INFO',thread='detector',logger='kafka_yolov5',line=9999,newLine=False,printFlag=True): + #timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + timestr=get_current_time() + outstr='%s [%s][%s][%d][%s]- %s'%(timestr,level,thread,line,logger,msg) + + if newLine: + outstr = '\n'+outstr + + fp.write(outstr+'\n') + fp.flush() + if printFlag: + print(outstr) + return outstr + + +def wrtiteLog(fp,strs,newLine=False): + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + if newLine: + outstr='\n %s %s '%(timestr,strs) + else: + outstr='%s %s '%(timestr,strs) + fp.write(outstr+'\n') + fp.flush() + return outstr + +def create_logFile(logdir='logdir',name=None): + if name: + logname =logdir+'/'+ name + else: + logname =logdir+'/'+ time.strftime("%Y-%m-%d.txt", time.localtime()) + if os.path.exists(logname): + fp_log = open(logname,'a+') + else: + fp_log = open(logname,'w') + return fp_log +def get_boradcast_address(outResource): + #rtmp://live.push.t-aaron.com/live/THSB,阿里云,1945 + #rtmp://demopush.yunhengzhizao.cn/live/THSB,腾讯云,1935 + if '1945' in outResource: + return 'rtmp://live.play.t-aaron.com/live/THSB' + else: + return 'rtmp://demoplay.yunhengzhizao.cn/live/THSB_HD5M' +def save_message(kafka_dir,msg): + outtxt=os.path.join(kafka_dir,msg['request_id']+'.json') + assert os.path.exists(kafka_dir) + with open(outtxt,'w') as fp: + json.dump(msg,fp,ensure_ascii=False) + + + +def get_push_address(outResource): + #rtmp://live.push.t-aaron.com/live/THSB,阿里云,1945 + #rtmp://demopush.yunhengzhizao.cn/live/THSB,腾讯云,1935 + #终端推流地址:rtmp://live.push.t-aaron.com/live/THSAa + #终端拉流地址:rtmp://live.play.t-aaron.com/live/THSAa_hd + #AI推流地址:rtmp://live.push.t-aaron.com/live/THSBa + #AI拉流地址:rtmp://live.play.t-aaron.com/live/THSBa_hd + + if 't-aaron' in outResource: + if 'THSBa' in outResource: port=1975 + elif 'THSBb' in outResource: port=1991 + elif 'THSBc' in outResource: port=1992 + elif 'THSBd' in outResource: port=1993 + elif 'THSBe' in outResource: port=1994 + elif 'THSBf' in outResource: port=1995 + elif 'THSBg' in outResource: port=1996 + elif 'THSBh' in outResource: port=1997 + else: port=1945 + else: port=1935 + return 'rtmp://127.0.0.1:%d/live/test'%(port) + return outResource +def getAllRecord_poll(consumer): + msgs = consumer.poll(5000) + keys=msgs.keys() + out = [ msgs[x] for x in keys] + out = [y for x in out for y in x] + + + for key in keys: + out.extend(msgs[key]) + return out +def getAllRecords(consumer,topics): + leftCnt = 0 + for topic in topics[0:2]: + leftCnt+=get_left_cnt(consumer,topic) + out = [] + if leftCnt == 0: + return [] + for ii,msg in enumerate(consumer): + consumer.commit() + out.append(msg) + if ii== (leftCnt-1): + break###断流或者到终点 + return out + +def get_left_cnt(consumer,topic): + partitions = [TopicPartition(topic, p) for p in consumer.partitions_for_topic(topic)] + + # total + toff = consumer.end_offsets(partitions) + toff = [(key.partition, toff[key]) for key in toff.keys()] + toff.sort() + + # current + coff = [(x.partition, consumer.committed(x)) for x in partitions] + coff.sort() + + # cal sum and left + toff_sum = sum([x[1] for x in toff]) + cur_sum = sum([x[1] for x in coff if x[1] is not None]) + left_sum = toff_sum - cur_sum + + return left_sum +def view_bar(num, total,time1,prefix='prefix'): + rate = num / total + time_n=time.time() + rate_num = int(rate * 30) + rate_nums = np.round(rate * 100) + r = '\r %s %d / %d [%s%s] %.2f s'%(prefix,num,total, ">" * rate_num, " " * (30 - rate_num), time_n-time1 ) + sys.stdout.write(r) + sys.stdout.flush() +def get_total_cnt(inSource): + cap=cv2.VideoCapture(inSource) + assert cap.isOpened() + cnt=cap.get(7) + fps = cap.get(cv2.CAP_PROP_FPS) + cap.release() + return cnt,fps +def check_stream(inSource,producer,par,msg,outStrList ,fp_log,logger,line='000',thread='detector',timeMs=120,): + cnt =(timeMs-1)//10 + 1 + Stream_ok=False + + for icap in range(cnt): + cap=cv2.VideoCapture(inSource) + + if cap.isOpened() and get_fps_rtmp(inSource,video=False)[0] : + Stream_ok=True ;cap.release();break; + #Stream_ok,_= get_fps_rtmp(inSource,video=False) + #if Stream_ok:cap.release();break; + else: + Stream_ok=False + timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) + outstr='Waiting stream %d s'%(10*icap) + writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=line,logger=logger) + time.sleep(10) + if icap%3==0: + send_kafka(producer,par,msg,outStrList,fp_log,logger=logger,line=line,thread=thread ) + + + return Stream_ok + + + + +def get_fps_rtmp(inSource,video=False): + cap=cv2.VideoCapture(inSource) + if not cap.isOpened(): + print('#####error url:',inSource) + return False,[0,0,0,0] + + fps = cap.get(cv2.CAP_PROP_FPS) + width = cap.get(cv2.CAP_PROP_FRAME_WIDTH ) + height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + cnt = 0 + if video: cnt=cap.get(7) + + if width*height==0 or fps>30: + return False,[0,0,0,0] + cap.release() + try: + outx = [fps,width,height,cnt] + outx = [int(x+0.5) for x in outx] + + return True,outx + except: + return False, [0,0,0,0] + + diff --git a/utilsK/modelEval.py b/utilsK/modelEval.py new file mode 100644 index 0000000..7fb7c1f --- /dev/null +++ b/utilsK/modelEval.py @@ -0,0 +1,110 @@ +import sys +sys.path.extend(['/home/thsw2/WJ/src/yolov5']) +import utils,json,time,torch +import numpy as np +from segutils.segmodel import SegModel,get_largest_contours +from models.experimental import attempt_load +from utils.torch_utils import select_device, load_classifier, time_synchronized +import subprocess as sp +import cv2 +from utils.datasets import LoadStreams, LoadImages +from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str +def get_total_cnt(inSource): + cap=cv2.VideoCapture(inSource) + cnt=cap.get(7) + cap.release() + return cnt +def onlineModelProcess(parIn ): + streamName = parIn['streamName'] + childCallback=parIn['callback'] + try: + + inSource,outSource=parIn['inSource'],parIn['outSource'] + weights='../yolov5/weights/1230_last.pt' + device = select_device('0') + half = device.type != 'cpu' # half precision only supported on CUDA + model = attempt_load(weights, map_location=device) # load FP32 model + if half: model.half() + seg_nclass = 2 + weights = '../yolov5/weights/segmentation/BiSeNet/checkpoint.pth' + segmodel = SegModel(nclass=seg_nclass,weights=weights,device=device) + jsonfile='../yolov5/config/queRiver.json' + with open(jsonfile,'r') as fp: + parAll = json.load(fp) + + + resource=parAll['prep_process']['source'] + if outSource: + command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24', + '-s', "{}x{}".format(parAll["push_process"]['OutVideoW'],parAll["push_process"]['OutVideoH']),# 图片分辨率 + '-r', str(30),# 视频帧率 + '-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p', + '-f', 'flv',outSource + ] + txtname='mintors/%s.txt'%( time.strftime("%Y-%m-%d", time.localtime()) ) + fp_out = open( txtname,'a+' ) + outstr='%s stream:%s starts \n'%( time_str(),parAll['push_process']['rtmpUrl']) + fp_out.write(outstr);fp_out.flush() + + + + + # 管道配置,其中用到管道 + if outSource: + ppipe = sp.Popen(command, stdin=sp.PIPE) + + ##后处理参数 + par=parAll['post_process'] + conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] + labelnames=par['labelnames'] + rainbows=par['rainbows'] + fpsample = par['fpsample'] + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) + + dataset = LoadStreams(inSource, img_size=640, stride=32) + if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')): + totalcnt=get_total_cnt(inSource) + childCallback.send('####model load success####') + iframe = 0;post_results=[];time_beg=time.time() + print('###line71 modelEval.py',totalcnt,len(dataset), inSource) + for path, img, im0s, vid_cap in dataset: + print(path) + if not path:childCallback.send('####strem ends####'); break###断流或者到终点 + if not outSource:###如果不推流,则显示进度条 + view_bar(iframe,totalcnt,time_beg ) + time0=time.time() + iframe +=1 + time1=time.time() + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + time2 = time.time() + pred = model(img,augment=False)[0] + time3 = time.time() + seg_pred,segstr = segmodel.eval(im0s[0] ) + + time4 = time.time() + datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe] + + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) + ##每隔 fpsample帧处理一次,如果有问题就保存图片 + if (iframe % fpsample == 0) and (len(post_results)>0) : + parImage=save_problem_images(post_results,iframe,names,streamName=streamName) + + #parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid + #parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); + #parOut['time_str'] = time_str;parOut['type'] = names[cls_max] + + + + post_results=[] + + if len(p_result[2] )>0: ## + post_results.append(p_result) + + image_array = p_result[1] + if outSource: + ppipe.stdin.write(image_array.tostring()) + except Exception as e: + childCallback.send(e) #将异常通过管道送出 diff --git a/utilsK/queRiver.py b/utilsK/queRiver.py new file mode 100644 index 0000000..3167a89 --- /dev/null +++ b/utilsK/queRiver.py @@ -0,0 +1,307 @@ +from kafka import KafkaProducer, KafkaConsumer +from kafka.errors import kafka_errors +import traceback +import json, base64,os +import numpy as np +from multiprocessing import Process,Queue +import time,cv2,string,random +import subprocess as sp + +import matplotlib.pyplot as plt +from utils.datasets import LoadStreams, LoadImages +from models.experimental import attempt_load +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +import torch,sys +#from segutils.segmodel import SegModel,get_largest_contours +#sys.path.extend(['../yolov5/segutils']) + +from segutils.segWaterBuilding import SegModel,get_largest_contours,illBuildings + +#from segutils.core.models.bisenet import BiSeNet +from segutils.core.models.bisenet import BiSeNet_MultiOutput + +from utils.plots import plot_one_box,plot_one_box_PIL,draw_painting_joint,get_label_arrays,get_websource +from collections import Counter +#import matplotlib +import matplotlib.pyplot as plt +# get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str +#FP_DEBUG=open('debut.txt','w') +def bsJpgCode(image_ori): + jpgCode = cv2.imencode('.jpg',image_ori)[-1]###np.array,(4502009,1) + bsCode = str(base64.b64encode(jpgCode))[2:-1] ###str,长6002680 + return bsCode +def bsJpgDecode(bsCode): + bsDecode = base64.b64decode(bsCode)###types,长4502009 + npString = np.frombuffer(bsDecode,np.uint8)###np.array,(长4502009,) + jpgDecode = cv2.imdecode(npString,cv2.IMREAD_COLOR)###np.array,(3000,4000,3) + return jpgDecode +def get_ms(time0,time1): + str_time ='%.2f ms'%((time1-time0)*1000) + return str_time +rainbows=[ + (0,0,255),(0,255,0),(255,0,0),(255,0,255),(255,255,0),(255,127,0),(255,0,127), + (127,255,0),(0,255,127),(0,127,255),(127,0,255),(255,127,255),(255,255,127), + (127,255,255),(0,255,255),(255,127,255),(127,255,255), + (0,127,0),(0,0,127),(0,255,255) + ] + + +def get_labelnames(labelnames): + with open(labelnames,'r') as fp: + namesjson=json.load(fp) + names_fromfile=namesjson['labelnames'] + names = names_fromfile + return names + +def check_stream(stream): + cap = cv2.VideoCapture(stream) + if cap.isOpened(): + return True + else: + return False +##### +def drawWater(pred,image_array0):####pred是模型的输出,只有水分割的任务 + ##画出水体区域 + contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + water = pred.copy(); water[:,:] = 0 + + if len(contours)==0: + return image_array0,water + max_id = get_largest_contours(contours); + cv2.fillPoly(water, [contours[max_id][:,0,:]], 1) + cv2.drawContours(image_array0,contours,max_id,(0,255,255),3) + return image_array0,water + + + +def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,object_config=[0,1,2,3,4]): + ##输入dataset genereate 生成的数据,model预测的结果pred,nms参数 + ##主要操作NMS ---> 坐标转换 ---> 画图 + ##输出原图、AI处理后的图、检测结果 + time0=time.time() + path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6]; + segmodel=True + pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False) + time1=time.time() + i=0;det=pred[0]###一次检测一张图片 + p, s, im0 = path[i], '%g: ' % i, im0s[i].copy() + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + det_xywh=[]; + #im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR); + if len(seg_pred)==2: + im0,water = illBuildings(seg_pred,im0) + else: + im0,water = drawWater(seg_pred,im0) + time2=time.time() + #plt.imshow(im0);plt.show() + if len(det)>0: + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round() + #用seg模型,确定有效检测匡及河道轮廓线 + if segmodel: + '''contours, hierarchy = cv2.findContours(seg_pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) + if len(contours)>0: + max_id = get_largest_contours(contours) + seg_pred[:,:] = 0 + cv2.fillPoly(seg_pred, [contours[max_id][:,0,:]], 1) + cv2.drawContours(im0,contours,max_id,(0,255,255),3)''' + det_c = det.clone(); det_c=det_c.cpu().numpy() + area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )/((x[2]-x[0])*(x[3]-x[1])) for x in det_c] ) + det = det[area_factors>0.1] + #对检测匡绘图 + for *xyxy, conf, cls in reversed(det): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + cls_c = cls.cpu().numpy() + if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示 + continue + conf_c = conf.cpu().numpy() + line = [float(cls_c), *xywh, float(conf_c)] # label format + det_xywh.append(line) + label = f'{names[int(cls)]} {conf:.2f}' + + im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],line_thickness=None) + time3=time.time() + strout='nms:%s illBuilding:%s detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2), get_ms(time2,time3) ) + return [im0s[0],im0,det_xywh,iframe],strout + + + +def preprocess(par): + print('#####process:',par['name']) + ##负责读取视频,生成原图及供检测的使用图,numpy格式 + #source='rtmp://liveplay.yunhengzhizao.cn/live/demo_HD5M' + #img_size=640; stride=32 + while True: + cap = cv2.VideoCapture(par['source']) + iframe = 0 + if cap.isOpened(): + print( '#### read %s success!'%(par['source'])) + try: + dataset = LoadStreams(par['source'], img_size=640, stride=32) + for path, img, im0s, vid_cap in dataset: + datas=[path, img, im0s, vid_cap,iframe] + par['queOut'].put(datas) + iframe +=1 + except Exception as e: + print('###read error:%s '%(par['source'])) + time.sleep(10) + iframe = 0 + + else: + print('###read error:%s '%(par['source'] )) + time.sleep(10) + iframe = 0 + +def gpu_process(par): + print('#####process:',par['name']) + half=True + ##gpu运算,检测模型 + weights = par['weights'] + device = par['device'] + print('###line127:',par['device']) + model = attempt_load(par['weights'], map_location=par['device']) # load FP32 model + if half: + model.half() + + ##gpu运算,分割模型 + seg_nclass = par['seg_nclass'] + seg_weights = par['seg_weights'] + + #segmodel = SegModel(nclass=seg_nclass,weights=seg_weights,device=device) + + + nclass = [2,2] + Segmodel = BiSeNet_MultiOutput(nclass) + weights='weights/segmentation/WaterBuilding.pth' + segmodel = SegModel(model=Segmodel,nclass=nclass,weights=weights,device='cuda:0',multiOutput=True) + while True: + if not par['queIn'].empty(): + time0=time.time() + datas = par['queIn'].get() + path, img, im0s, vid_cap,iframe = datas[0:5] + time1=time.time() + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + time2 = time.time() + pred = model(img,augment=False)[0] + time3 = time.time() + seg_pred = segmodel.eval(im0s[0],outsize=None,smooth_kernel=20) + time4 = time.time() + fpStr= 'process:%s ,iframe:%d,getdata:%s,copygpu:%s,dettime:%s,segtime:%s , time:%s, queLen:%d '%( par['name'],iframe,get_ms(time0,time1) ,get_ms(time1,time2) ,get_ms(time2,time3) ,get_ms(time3,time4),get_ms(time0,time4) ,par['queIn'].qsize() ) + #FP_DEBUG.write( fpStr+'\n' ) + datasOut = [path, img, im0s, vid_cap,pred,seg_pred,iframe] + par['queOut'].put(datasOut) + if par['debug']: + print('#####process:',par['name'],' line107') + else: + time.sleep(1/300) +def get_cls(array): + dcs = Counter(array) + keys = list(dcs.keys()) + values = list(dcs.values()) + max_index = values.index(max(values)) + cls = int(keys[max_index]) + return cls +def save_problem_images(post_results,iimage_cnt,names,streamName='live-THSAHD5M',outImaDir='problems/images_tmp',imageTxtFile=False): + ## [cls, x,y,w,h, conf] + problem_image=[[] for i in range(6)] + + + dets_list = [x[2] for x in post_results] + + mean_scores=[ np.array(x)[:,5].mean() for x in dets_list ] ###mean conf + + best_index = mean_scores.index(max(mean_scores)) ##获取该批图片里,问题图片的index + best_frame = post_results[ best_index][3] ##获取绝对帧号 + img_send = post_results[best_index][1]##AI处理后的图 + img_bak = post_results[best_index][0]##原图 + cls_max = get_cls( x[0] for x in dets_list[best_index] ) + + + time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + uid=''.join(random.sample(string.ascii_letters + string.digits, 16)) + #ori_name = '2022-01-20-15-57-36_frame-368-720_type-漂浮物_qVh4zI08ZlwJN9on_s-live-THSAHD5M_OR.jpg' + #2022-01-13-15-07-57_frame-9999-9999_type-结束_9999999999999999_s-off-XJRW20220110115904_AI.jpg + outnameOR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_AI.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) + outnameAR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_OR.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName) + + cv2.imwrite(outnameOR,img_send) + cv2.imwrite(outnameAR,img_bak) + if imageTxtFile: + outnameOR_txt = outnameOR.replace('.jpg','.txt') + fp=open(outnameOR_txt,'w');fp.write(outnameOR+'\n');fp.close() + outnameAI_txt = outnameAR.replace('.jpg','.txt') + fp=open(outnameAI_txt,'w');fp.write(outnameAR+'\n');fp.close() + + parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid + parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR); + parOut['time_str'] = time_str;parOut['type'] = names[cls_max] + return parOut + + + + +def post_process(par): + + print('#####process:',par['name']) + ###post-process参数 + conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes'] + labelnames=par['labelnames'] + rainbows=par['rainbows'] + fpsample = par['fpsample'] + names=get_labelnames(labelnames) + label_arraylist = get_label_arrays(names,rainbows,outfontsize=40) + iimage_cnt = 0 + post_results=[] + while True: + if not par['queIn'].empty(): + time0=time.time() + datas = par['queIn'].get() + iframe = datas[6] + if par['debug']: + print('#####process:',par['name'],' line129') + p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe) + par['queOut'].put(p_result) + ##输出结果 + + + + ##每隔 fpsample帧处理一次,如果有问题就保存图片 + if (iframe % fpsample == 0) and (len(post_results)>0) : + #print('####line204:',iframe,post_results) + save_problem_images(post_results,iframe,names) + post_results=[] + + if len(p_result[2] )>0: ## + #post_list = p_result.append(iframe) + post_results.append(p_result) + #print('####line201:',type(p_result)) + + time1=time.time() + outstr='process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) + #FP_DEBUG.write(outstr +'\n') + #print( 'process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) ) + else: + time.sleep(1/300) + + +def save_logfile(name,txt): + if os.path.exists(name): + fp=open(name,'r+') + else: + fp=open(name,'w') + + fp.write('%s %s \n'%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),txt)) + fp.close() +def time_str(): + return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + + + +if __name__=='__main__': + jsonfile='config/queRiver.json' + #image_encode_decode() + work_stream(jsonfile) + #par={'name':'preprocess'} + #preprocess(par) diff --git a/utilsK/sendUtils.py b/utilsK/sendUtils.py new file mode 100644 index 0000000..d42a84e --- /dev/null +++ b/utilsK/sendUtils.py @@ -0,0 +1,207 @@ +from aliyunsdkvod.request.v20170321 import GetPlayInfoRequest +import json +import traceback +from aliyunsdkcore.client import AcsClient + +from PIL import Image +import numpy as np +import cv2 +import base64 +import io,os,copy +import requests +import time,json +import string,random +import glob,string,sys +from multiprocessing import Process,Queue +import oss2 +from kafka import KafkaProducer, KafkaConsumer +from voduploadsdk.UploadVideoRequest import UploadVideoRequest +from voduploadsdk.AliyunVodUtils import * +from voduploadsdk.AliyunVodUploader import AliyunVodUploader +from datetime import datetime, date, timedelta +def get_today(): + return date.today().strftime("%Y-%m-%d") +def get_yesterday(beforeday=-1): + return (date.today() + timedelta(days =beforeday)).strftime("%Y-%m-%d") + +def get_videoUurl(videoBakDir,filename): + ###七天时间内 + potentialUrls=[ os.path.join( videoBakDir,get_yesterday(beforeday=-x),filename) for x in range(7) ] + existsList=[os.path.exists(x ) for x in potentialUrls] + for i,flag in enumerate(existsList): + if flag: return potentialUrls[i] + return potentialUrls[0] + +def getNamedic(jsonfile): + with open(jsonfile) as fp: + dataDic=json.load(fp) + #"labelnames":["排口","排污口","水生植被","漂浮物","其它"], + #"labelIndexs":["SL014","SL011","SL013","SL001","SL001" ] + + assert 'labelnames' in dataDic.keys() , 'labelnames is not the key in %s'%(jsonfile) + assert 'labelIndexs' in dataDic.keys() , 'labelIndexs is not the key in %s'%(jsonfile) + assert len(dataDic['labelnames'])==len(dataDic['labelIndexs']) + nameDic={} + for key,value in zip(dataDic['labelnames'],dataDic['labelIndexs']): + nameDic[key]=value + return nameDic + + +def get_play_info(clt, videoId): + request = GetPlayInfoRequest.GetPlayInfoRequest() + request.set_accept_format('JSON') + request.set_VideoId(videoId) + request.set_AuthTimeout(3600*5) + response = json.loads(clt.do_action_with_exception(request)) + return response + +def create_status_msg(msg_dict_off,taskInfos,sts='waiting'): + msg= copy.deepcopy(msg_dict_off) + msg=update_json(taskInfos,msg,offkeys=["request_id"] ) + msg['status']=sts + msg = json.dumps(msg, ensure_ascii=False) + return msg +# 填入AccessKey信息 +def init_vod_client(accessKeyId, accessKeySecret): + regionId = 'cn-shanghai' # 点播服务接入地域 + connectTimeout = 3 # 连接超时,单位为秒 + return AcsClient(accessKeyId, accessKeySecret, regionId, auto_retry=True, max_retry_time=3, timeout=connectTimeout) +def update_json(jsonOri,jsonNew,offkeys=["request_id" ]): + #{'biz_id': 'hehuzhang', 'mod_id': 'ai', 'request_id': 'bblvgyntTsZCamqjuLArkiSYIbKXEeWx', 'offering_id': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'offering_type': 'mp4', 'results_base_dir': 'XJRW20220317153547', 'inSource': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'outSource': 'NO'} + for key in offkeys: + jsonNew[key] = jsonOri[key] + return jsonNew +def get_time(filename): + #2021-10-09-11-44-51_frame-598-720_type-水生植被.jpg + sps=filename.strip().split('_')[0] + tsps=sps.split('-') + return '%s-%s-%s %s:%s:%s'%(tsps[0],tsps[1],tsps[2],tsps[3],tsps[4],tsps[5]) +def get_ms(time0,time1): + str_time ='%.2f ms'%((time1-time0)*1000) + return str_time + +def get_urls( platform_query_url,fp_log ): + try: + if os.path.exists(platform_query_url): + #print('###line49') + with open('SendLog/platformQuery.json','r') as fp: + res = json.load(fp) + else: + res = requests.get(platform_query_url,timeout=10).json() + #print('###line54') + questionUrl = res['data']['questionUrl'] ###直播流时,问题图片的推送地址 + offlineUrl = res['data']['offlineUrl'] ###http离线视频时,问题图片的推送地址 + except Exception as ee: + timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + print('###### %s: file:send_transfer: error %s ,url:%s #####'%(timestr,ee,platform_query_url)) + outstr = '\n %s ###### get url platform error : update error:%s , url:%s'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,ee,platform_query_url) + fp_log.write(outstr);fp_log.flush() + questionUrl="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion" + offlineUrl ="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion" + return questionUrl,offlineUrl +def parse_filename(filename_base): + #etc:2022-01-13-16-04-17_frame-823-1440_type-水生植被_hgYFEulc0dPIrG1S_s-off-XJRW20220113154959_AI.jpg + uid =filename_base.split('.')[0].split('_')[3].strip() + sourceType=filename_base.split('_')[4].split('-')[1] + sourceId=filename_base.split('_')[4].split('-')[2] + typename=filename_base.split('.')[0].split('_')[2].split('-')[1].strip() + return uid,sourceType,sourceId,typename +def b64encode_function(filename, filename_OR): + if os.path.exists(filename): + image_ori=cv2.imread(filename) + image_ori_OR=cv2.imread(filename_OR) + else: + image_ori = filename.copy() + image_ori_OR = image_ori_OR.copy() + image_pngcode = cv2.imencode('.jpg',image_ori)[-1] + image_pngcode_OR = cv2.imencode('.jpg',image_ori_OR)[-1] + image_code = str(base64.b64encode(image_pngcode))[2:-1] + image_code_OR = str(base64.b64encode(image_pngcode_OR))[2:-1] + return image_code, image_code_OR +def JsonSend(parIn): + + fp_log = parIn['fp_log'] + try: + response=requests.post(parIn['api'],json=parIn['input_'],timeout=10).json() + t3 = time.time() + print('\n file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s, size:%.2f M \n'%(parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],api,response['code'],parIn['sizeImage'])) + outstr = '%s file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s,size:%.2f M ,%s\n'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],parIn['api'],response['code'],parIn['sizeImage'],parIn['dic_str']) + fp_log.write(outstr);fp_log.flush() + + except Exception as ee: + print('\n ######file:%s: upload error:%s,size:%.2f M'%(parIn['filename_base'],ee, parIn['sizeImage'])) + outstr = '\n%s ###### file:%s: upload error:%s , size:%.2f M'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,parIn['filename_base'],ee,parIn['sizeImage']) + fp_log.write(outstr);fp_log.flush() + + +def dic2str(dic): + st='' + for key in dic.keys(): + st='%s %s:%s,'%(st,key,dic[key]) + return st +def createJsonInput(filename,offlineUrl,questionUrl): + flag = True + filename_base = os.path.basename(filename) + filename_OR=filename.replace('_AI.','_OR.') + if not os.path.exists(filename_OR ): + return False + + uid,sourceType, sourceId,typename = parse_filename(filename_base) + if (typename not in name_dic.keys()) or (typename == '排口'): + return False + api = questionUrl if sourceType=='live' else offlineUrl + + time_str = get_time(filename_base) + input_ ={ + 'imgName':os.path.basename(filename), + 'imgNameOriginal':os.path.basename(filename_OR), + 'time':time_str, + 'fid':uid, ###随机16位字符 + 'type':name_dic[typename],###这次先采用 ["排口","污口","水生植被","漂浮物","其它"] + 'typeId':nameID_dic[typename] + + } + if sourceType!='live': + input_['code']=sourceId;###只有离线视频才需要code, + + dic_str = dic2str(input_) + t1 = time.time() + + image_code, image_code_OR = b64encode_function(filename, filename_OR) + input_['imgData']=image_code + input_['imgDataOriginal']=image_code_OR + + sizeImage = (len(image_code) + len(image_code_OR) )/1000000.0 + + parOut={};parOut['flag']=True;parOut['input_']=input_; + parOut['sizeImage']=sizeImage;parOut['dic_str']=dic_str; + parOut['filename']=filename;parOut['filename_OR']=filename_OR; + parOut['api']=api ; parOut['t1']=t1 ; parOut['filename_base']= filename_base + return parOut + +def getLogFileFp(streamName): + logname ='SendLog/'+ time.strftime("%Y-%m-%d", time.localtime())+'_%s.txt'%(streamName) + if os.path.exists(logname): + fp_log = open(logname,'a+') + else: + fp_log = open(logname,'w') + return + +def lodaMsgInfos(jsonDir,msgId): + jsonUrl = os.path.join(jsonDir,msgId+'.json') + with open(jsonUrl,'r') as fp: + data=json.load(fp) + return data + +def parse_filename_for_oss(name): + splts=name.split('_') + typename=splts[2].split('-')[1].strip() + msgId=splts[4].split('-')[3] + onLineType=splts[4].split('-')[1] + return typename,msgId,onLineType +def percentage(consumed_bytes, total_bytes): + if total_bytes: + rate = int(100 * (float(consumed_bytes) / float(total_bytes))) + print('\r{0}% '.format(rate), end='') + sys.stdout.flush() + \ No newline at end of file diff --git a/voduploadsdk/AliyunVodUploader.py b/voduploadsdk/AliyunVodUploader.py new file mode 100644 index 0000000..c0356c9 --- /dev/null +++ b/voduploadsdk/AliyunVodUploader.py @@ -0,0 +1,670 @@ +# -*- coding: UTF-8 -*- +import json +import oss2 +import base64 +import requests +from oss2 import compat +import time + +from aliyunsdkcore import client +from aliyunsdkvod.request.v20170321 import CreateUploadVideoRequest +from aliyunsdkvod.request.v20170321 import RefreshUploadVideoRequest +from aliyunsdkvod.request.v20170321 import CreateUploadImageRequest +from aliyunsdkvod.request.v20170321 import CreateUploadAttachedMediaRequest +from voduploadsdk.AliyunVodUtils import * +from voduploadsdk.UploadVideoRequest import UploadVideoRequest + +VOD_MAX_TITLE_LENGTH = 128 +VOD_MAX_DESCRIPTION_LENGTH = 1024 + +class AliyunVodUploader: + + def __init__(self, accessKeyId, accessKeySecret, ecsRegionId=None): + """ + constructor for VodUpload + :param accessKeyId: string, access key id + :param accessKeySecret: string, access key secret + :param ecsRegion: string, 部署迁移脚本的ECS所在的Region,详细参考:https://help.aliyun.com/document_detail/40654.html,如:cn-beijing + :return + """ + self.__accessKeyId = accessKeyId + self.__accessKeySecret = accessKeySecret + self.__ecsRegion = ecsRegionId + self.__vodApiRegion = None + self.__connTimeout = 3 + self.__bucketClient = None + self.__maxRetryTimes = 3 + self.__vodClient = None + self.__EnableCrc = True + + # 分片上传参数 + self.__multipartThreshold = 10 * 1024 * 1024 # 分片上传的阈值,超过此值开启分片上传 + self.__multipartPartSize = 10 * 1024 * 1024 # 分片大小,单位byte + self.__multipartThreadsNum = 3 # 分片上传时并行上传的线程数,暂时为串行上传,不支持并行,后续会支持。 + + self.setApiRegion('cn-shanghai') + + + def setApiRegion(self, apiRegion): + """ + 设置VoD的接入地址,中国大陆为cn-shanghai,海外支持ap-southeast-1(新加坡)等区域,详情参考:https://help.aliyun.com/document_detail/98194.html + :param apiRegion: 接入地址的Region英文表示 + :return: + """ + self.__vodApiRegion = apiRegion + self.__vodClient = self.__initVodClient() + + + def setMultipartUpload(self, multipartThreshold=10*1024*1024, multipartPartSize=10*1024*1024, multipartThreadsNum=1): + if multipartThreshold > 0: + self.__multipartThreshold = multipartThreshold + if multipartPartSize > 0: + self.__multipartPartSize = multipartPartSize + if multipartThreadsNum > 0: + self.__multipartThreadsNum = multipartThreadsNum + + def setEnableCrc(self, isEnable=False): + self.__EnableCrc = True if isEnable else False + + @catch_error + def uploadLocalVideo(self, uploadVideoRequest, startUploadCallback=None): + """ + 上传本地视频或音频文件到点播,最大支持48.8TB的单个文件,暂不支持断点续传 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为本地文件的绝对路径 + :param startUploadCallback为获取到上传地址和凭证(uploadInfo)后开始进行文件上传时的回调,可用于记录上传日志等;uploadId为设置的上传ID,可用于关联导入视频。 + :return + """ + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + if startUploadCallback: + startUploadCallback(uploadVideoRequest.uploadId, uploadInfo) + headers = self.__getUploadHeaders(uploadVideoRequest) + self.__uploadOssObjectWithRetry(uploadVideoRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, headers) + return uploadInfo + + @catch_error + def uploadWebVideo(self, uploadVideoRequest, startUploadCallback=None): + """ + 上传网络视频或音频文件到点播,最大支持48.8TB的单个文件(需本地磁盘空间足够);会先下载到本地临时目录,再上传到点播存储 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为网络文件的URL地址 + :return + """ + # 下载文件 + uploadVideoRequest = self.__downloadWebMedia(uploadVideoRequest) + + # 上传到点播 + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + if startUploadCallback: + startUploadCallback(uploadVideoRequest.uploadId, uploadInfo) + headers = self.__getUploadHeaders(uploadVideoRequest) + self.__uploadOssObjectWithRetry(uploadVideoRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, headers) + + # 删除本地临时文件 + os.remove(uploadVideoRequest.filePath) + + return uploadInfo['VideoId'] + + @catch_error + def uploadLocalM3u8(self, uploadVideoRequest, sliceFilePaths=None): + """ + 上传本地m3u8视频或音频文件到点播,m3u8文件和分片文件默认在同一目录 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为本地m3u8索引文件的绝对路径, + 且m3u8文件的分片信息必须是相对地址,不能含有URL或本地绝对路径 + :param sliceFilePaths: list, 分片文件的本地路径列表,例如:['/opt/m3u8_video/sample_001.ts', '/opt/m3u8_video/sample_002.ts'] + sliceFilePaths为None时,会按照同一目录去解析分片地址;如不在同一目录等原因导致解析有误,可自行组装分片地址 + :return + """ + + if sliceFilePaths is None: + sliceFilePaths = self.parseLocalM3u8(uploadVideoRequest.filePath) + + if (not isinstance(sliceFilePaths, list)) or len(sliceFilePaths) <= 0: + raise AliyunVodException('InvalidM3u8SliceFile', 'M3u8 slice files invalid', 'sliceFilePaths invalid or m3u8 index file error') + + # 上传到点播的m3u8索引文件会重写,以此确保分片地址都为相对地址 + downloader = AliyunVodDownloader() + m3u8LocalDir = downloader.getSaveLocalDir() + '/' + AliyunVodUtils.getStringMd5(uploadVideoRequest.fileName) + downloader.setSaveLocalDir(m3u8LocalDir) + m3u8LocalPath = m3u8LocalDir + '/' + os.path.basename(uploadVideoRequest.fileName) + self.__rewriteM3u8File(uploadVideoRequest.filePath, m3u8LocalPath, True) + + # 获取上传凭证 + uploadVideoRequest.setFilePath(m3u8LocalPath) + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + uploadAddress = uploadInfo['UploadAddress'] + headers = self.__getUploadHeaders(uploadVideoRequest) + + # 依次上传分片文件 + for sliceFilePath in sliceFilePaths: + tempFilePath, sliceFileName = AliyunVodUtils.getFileBriefPath(sliceFilePath) + self.__uploadOssObjectWithRetry(sliceFilePath, uploadAddress['ObjectPrefix'] + sliceFileName, uploadInfo, headers) + + # 上传m3u8文件 + self.__uploadOssObjectWithRetry(m3u8LocalPath, uploadAddress['FileName'], uploadInfo, headers) + + # 删除重写到本地的m3u8文件 + if os.path.exists(m3u8LocalPath): + os.remove(m3u8LocalPath) + if not os.listdir(m3u8LocalDir): + os.rmdir(m3u8LocalDir) + + return uploadInfo['VideoId'] + + @catch_error + def uploadWebM3u8(self, uploadVideoRequest, sliceFileUrls=None): + """ + 上传网络m3u8视频或音频文件到点播,需本地磁盘空间足够,会先下载到本地临时目录,再上传到点播存储 + :param uploadVideoRequest: UploadVideoRequest类的实例,注意filePath为m3u8网络文件的URL地址 + :param sliceFileUrls: list, 分片文件的url,例如:['http://host/sample_001.ts', 'http://host/sample_002.ts'] + sliceFileUrls为None时,会按照同一前缀解析分片地址;如分片路径和m3u8索引文件前缀不同等原因导致解析有误,可自行组装分片地址 + :return + """ + if sliceFileUrls is None: + sliceFileUrls = self.parseWebM3u8(uploadVideoRequest.filePath) + + if (not isinstance(sliceFileUrls, list)) or len(sliceFileUrls) <= 0: + raise AliyunVodException('InvalidM3u8SliceFile', 'M3u8 slice urls invalid', + 'sliceFileUrls invalid or m3u8 index file error') + + # 下载m3u8文件和所有ts分片文件到本地;上传到点播的m3u8索引文件会重写,以此确保分片地址都为相对地址 + downloader = AliyunVodDownloader() + m3u8LocalDir = downloader.getSaveLocalDir() + '/' + AliyunVodUtils.getStringMd5(uploadVideoRequest.fileName) + downloader.setSaveLocalDir(m3u8LocalDir) + m3u8LocalPath = m3u8LocalDir + '/' + os.path.basename(uploadVideoRequest.fileName) + self.__rewriteM3u8File(uploadVideoRequest.filePath, m3u8LocalPath, False) + + sliceList = [] + for sliceFileUrl in sliceFileUrls: + tempFilePath, sliceFileName = AliyunVodUtils.getFileBriefPath(sliceFileUrl) + err, sliceLocalPath = downloader.downloadFile(sliceFileUrl, sliceFileName) + if sliceLocalPath is None: + raise AliyunVodException('FileDownloadError', 'Download M3u8 File Error', '') + sliceList.append((sliceLocalPath, sliceFileName)) + + # 获取上传凭证 + uploadVideoRequest.setFilePath(m3u8LocalPath) + uploadInfo = self.__createUploadVideo(uploadVideoRequest) + uploadAddress = uploadInfo['UploadAddress'] + headers = self.__getUploadHeaders(uploadVideoRequest) + + # 依次上传分片文件 + for sliceFile in sliceList: + self.__uploadOssObjectWithRetry(sliceFile[0], uploadAddress['ObjectPrefix'] + sliceFile[1], uploadInfo, headers) + + # 上传m3u8文件 + self.__uploadOssObjectWithRetry(m3u8LocalPath, uploadAddress['FileName'], uploadInfo, headers) + + # 删除下载到本地的m3u8文件和分片文件 + if os.path.exists(m3u8LocalPath): + os.remove(m3u8LocalPath) + for sliceFile in sliceList: + if os.path.exists(sliceFile[0]): + os.remove(sliceFile[0]) + if not os.listdir(m3u8LocalDir): + os.rmdir(m3u8LocalDir) + + return uploadInfo['VideoId'] + + + @catch_error + def uploadImage(self, uploadImageRequest, isLocalFile=True): + """ + 上传图片文件到点播,不支持断点续传;该接口可支持上传本地图片或网络图片 + :param uploadImageRequest: UploadImageRequest,注意filePath为本地文件的绝对路径或网络文件的URL地址 + :param isLocalFile: bool, 是否为本地文件。True:本地文件,False:网络文件 + :return + """ + # 网络图片需要先下载到本地 + if not isLocalFile: + uploadImageRequest = self.__downloadWebMedia(uploadImageRequest) + + # 上传到点播 + uploadInfo = self.__createUploadImage(uploadImageRequest) + self.__uploadOssObject(uploadImageRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, None) + + # 删除本地临时文件 + if not isLocalFile: + os.remove(uploadImageRequest.filePath) + + return uploadInfo['ImageId'], uploadInfo['ImageURL'] + + @catch_error + def uploadAttachedMedia(self, uploadAttachedRequest, isLocalFile=True): + """ + 上传辅助媒资文件(如水印、字幕文件)到点播,不支持断点续传;该接口可支持上传本地或网络文件 + :param uploadAttachedRequest: UploadAttachedMediaRequest,注意filePath为本地文件的绝对路径或网络文件的URL地址 + :param isLocalFile: bool, 是否为本地文件。True:本地文件,False:网络文件 + :return + """ + # 网络文件需要先下载到本地 + if not isLocalFile: + uploadAttachedRequest = self.__downloadWebMedia(uploadAttachedRequest) + + # 上传到点播 + uploadInfo = self.__createUploadAttachedMedia(uploadAttachedRequest) + self.__uploadOssObject(uploadAttachedRequest.filePath, uploadInfo['UploadAddress']['FileName'], uploadInfo, None) + + # 删除本地临时文件 + if not isLocalFile: + os.remove(uploadAttachedRequest.filePath) + + result = {'MediaId': uploadInfo['MediaId'], 'MediaURL': uploadInfo['MediaURL'], 'FileURL': uploadInfo['FileURL']} + return result + + @catch_error + def parseWebM3u8(self, m3u8FileUrl): + """ + 解析网络m3u8文件得到所有分片文件地址,原理是将m3u8地址前缀拼接ts分片名称作为后者的下载url,适用于url不带签名或分片与m3u8文件签名相同的情况 + 本函数解析时会默认分片文件和m3u8文件位于同一目录,如不是则请自行拼接分片文件的地址列表 + :param m3u8FileUrl: string, m3u8网络文件url,例如:http://host/sample.m3u8 + :return sliceFileUrls + """ + sliceFileUrls = [] + res = requests.get(m3u8FileUrl) + res.raise_for_status() + for line in res.iter_lines(): + if line.startswith('#'): + continue + sliceFileUrl = AliyunVodUtils.replaceFileName(m3u8FileUrl, line.strip()) + sliceFileUrls.append(sliceFileUrl) + + return sliceFileUrls + + @catch_error + def parseLocalM3u8(self, m3u8FilePath): + """ + 解析本地m3u8文件得到所有分片文件地址,原理是将m3u8地址前缀拼接ts分片名称作为后者的本地路径 + 本函数解析时会默认分片文件和m3u8文件位于同一目录,如不是则请自行拼接分片文件的地址列表 + :param m3u8FilePath: string, m3u8本地文件路径,例如:/opt/videos/sample.m3u8 + :return sliceFilePaths + """ + sliceFilePaths = [] + m3u8FilePath = AliyunVodUtils.toUnicode(m3u8FilePath) + for line in open(m3u8FilePath): + if line.startswith('#'): + continue + sliceFileName = line.strip() + sliceFilePath = AliyunVodUtils.replaceFileName(m3u8FilePath, sliceFileName) + sliceFilePaths.append(sliceFilePath) + + return sliceFilePaths + + + # 定义进度条回调函数;consumedBytes: 已经上传的数据量,totalBytes:总数据量 + def uploadProgressCallback(self, consumedBytes, totalBytes): + + if totalBytes: + rate = int(100 * (float(consumedBytes) / float(totalBytes))) + else: + rate = 0 + + print ("[%s]uploaded %s bytes, percent %s%s" % (AliyunVodUtils.getCurrentTimeStr(), consumedBytes, format(rate), '%')) + sys.stdout.flush() + + + def __initVodClient(self): + return client.AcsClient(self.__accessKeyId, self.__accessKeySecret, self.__vodApiRegion, + auto_retry=True, max_retry_time=self.__maxRetryTimes, timeout=self.__connTimeout) + + def __downloadWebMedia(self, request): + + # 下载媒体文件到本地临时目录 + downloader = AliyunVodDownloader() + localFileName = "%s.%s" % (AliyunVodUtils.getStringMd5(request.fileName), request.mediaExt) + fileUrl = request.filePath + err, localFilePath = downloader.downloadFile(fileUrl, localFileName) + if err < 0: + raise AliyunVodException('FileDownloadError', 'Download File Error', '') + + # 重新设置上传请求对象 + request.setFilePath(localFilePath) + return request + + def __rewriteM3u8File(self, srcM3u8File, dstM3u8File, isSrcLocal=True): + newM3u8Text = '' + if isSrcLocal: + for line in open(AliyunVodUtils.toUnicode(srcM3u8File)): + item = self.__processM3u8Line(line) + if item is not None: + newM3u8Text += item + "\n" + else: + res = requests.get(srcM3u8File) + res.raise_for_status() + for line in res.iter_lines(): + item = self.__processM3u8Line(line) + if item is not None: + newM3u8Text += item + "\n" + + AliyunVodUtils.mkDir(dstM3u8File) + with open(dstM3u8File, 'w') as f: + f.write(newM3u8Text) + + + def __processM3u8Line(self, line): + item = line.strip() + if len(item) <= 0: + return None + + if item.startswith('#'): + return item + + tempFilePath, fileName = AliyunVodUtils.getFileBriefPath(item) + return fileName + + + def __requestUploadInfo(self, request, mediaType): + request.set_accept_format('JSON') + result = json.loads(self.__vodClient.do_action_with_exception(request).decode('utf-8')) + result['OriUploadAddress'] = result['UploadAddress'] + result['OriUploadAuth'] = result['UploadAuth'] + + result['UploadAddress'] = json.loads(base64.b64decode(result['OriUploadAddress']).decode('utf-8')) + result['UploadAuth'] = json.loads(base64.b64decode(result['OriUploadAuth']).decode('utf-8')) + + result['MediaType'] = mediaType + if mediaType == 'video': + result['MediaId'] = result['VideoId'] + elif mediaType == 'image': + result['MediaId'] = result['ImageId'] + result['MediaURL'] = result['ImageURL'] + + return result + + + # 获取视频上传地址和凭证 + def __createUploadVideo(self, uploadVideoRequest): + request = CreateUploadVideoRequest.CreateUploadVideoRequest() + + title = AliyunVodUtils.subString(uploadVideoRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + request.set_FileName(uploadVideoRequest.fileName) + + if uploadVideoRequest.description: + description = AliyunVodUtils.subString(uploadVideoRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadVideoRequest.coverURL: + request.set_CoverURL(uploadVideoRequest.coverURL) + if uploadVideoRequest.tags: + request.set_Tags(uploadVideoRequest.tags) + if uploadVideoRequest.cateId: + request.set_CateId(uploadVideoRequest.cateId) + if uploadVideoRequest.templateGroupId: + request.set_TemplateGroupId(uploadVideoRequest.templateGroupId) + if uploadVideoRequest.storageLocation: + request.set_StorageLocation(uploadVideoRequest.storageLocation) + if uploadVideoRequest.userData: + request.set_UserData(uploadVideoRequest.userData) + if uploadVideoRequest.appId: + request.set_AppId(uploadVideoRequest.appId) + if uploadVideoRequest.workflowId: + request.set_WorkflowId(uploadVideoRequest.workflowId) + + result = self.__requestUploadInfo(request, 'video') + logger.info("CreateUploadVideo, FilePath: %s, VideoId: %s" % (uploadVideoRequest.filePath, result['VideoId'])) + return result + + # 刷新上传凭证 + def __refresh_upload_video(self, videoId): + request = RefreshUploadVideoRequest.RefreshUploadVideoRequest(); + request.set_VideoId(videoId) + + result = self.__requestUploadInfo(request, 'video') + logger.info("RefreshUploadVideo, VideoId %s" % (result['VideoId'])) + return result + + # 获取图片上传地址和凭证 + def __createUploadImage(self, uploadImageRequest): + request = CreateUploadImageRequest.CreateUploadImageRequest() + + request.set_ImageType(uploadImageRequest.imageType) + request.set_ImageExt(uploadImageRequest.imageExt) + if uploadImageRequest.title: + title = AliyunVodUtils.subString(uploadImageRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + if uploadImageRequest.description: + description = AliyunVodUtils.subString(uploadImageRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadImageRequest.tags: + request.set_Tags(uploadImageRequest.tags) + if uploadImageRequest.cateId: + request.set_CateId(uploadImageRequest.cateId) + if uploadImageRequest.storageLocation: + request.set_StorageLocation(uploadImageRequest.storageLocation) + if uploadImageRequest.userData: + request.set_UserData(uploadImageRequest.userData) + if uploadImageRequest.appId: + request.set_AppId(uploadImageRequest.appId) + if uploadImageRequest.workflowId: + request.set_WorkflowId(uploadImageRequest.workflowId) + + result = self.__requestUploadInfo(request, 'image') + logger.info("CreateUploadImage, FilePath: %s, ImageId: %s, ImageUrl: %s" % ( + uploadImageRequest.filePath, result['ImageId'], result['ImageURL'])) + return result + + def __createUploadAttachedMedia(self, uploadAttachedRequest): + request = CreateUploadAttachedMediaRequest.CreateUploadAttachedMediaRequest() + request.set_BusinessType(uploadAttachedRequest.businessType) + request.set_MediaExt(uploadAttachedRequest.mediaExt) + + if uploadAttachedRequest.title: + title = AliyunVodUtils.subString(uploadAttachedRequest.title, VOD_MAX_TITLE_LENGTH) + request.set_Title(title) + if uploadAttachedRequest.description: + description = AliyunVodUtils.subString(uploadAttachedRequest.description, VOD_MAX_DESCRIPTION_LENGTH) + request.set_Description(description) + if uploadAttachedRequest.tags: + request.set_Tags(uploadAttachedRequest.tags) + if uploadAttachedRequest.cateId: + request.set_CateId(uploadAttachedRequest.cateId) + if uploadAttachedRequest.storageLocation: + request.set_StorageLocation(uploadAttachedRequest.storageLocation) + if uploadAttachedRequest.userData: + request.set_UserData(uploadAttachedRequest.userData) + if uploadAttachedRequest.appId: + request.set_AppId(uploadAttachedRequest.appId) + if uploadAttachedRequest.workflowId: + request.set_WorkflowId(uploadAttachedRequest.workflowId) + + result = self.__requestUploadInfo(request, 'attached') + logger.info("CreateUploadImage, FilePath: %s, MediaId: %s, MediaURL: %s" % ( + uploadAttachedRequest.filePath, result['MediaId'], result['MediaURL'])) + return result + + + def __getUploadHeaders(self, uploadVideoRequest): + if uploadVideoRequest.isShowWatermark is None: + return None + else: + userData = "{\"Vod\":{\"UserData\":{\"IsShowWaterMark\": \"%s\"}}}" % (uploadVideoRequest.isShowWatermark) + return {'x-oss-notification': base64.b64encode(userData, 'utf-8')} + + # uploadType,可选:multipart, put, web + def __uploadOssObjectWithRetry(self, filePath, object, uploadInfo, headers=None): + retryTimes = 0 + while retryTimes < self.__maxRetryTimes: + try: + return self.__uploadOssObject(filePath, object, uploadInfo, headers) + except OssError as e: + # 上传凭证过期需要重新获取凭证 + if e.code == 'SecurityTokenExpired' or e.code == 'InvalidAccessKeyId': + uploadInfo = self.__refresh_upload_video(uploadInfo['MediaId']) + except Exception as e: + raise e + except: + raise AliyunVodException('UnkownError', repr(e), traceback.format_exc()) + finally: + retryTimes += 1 + + + def __uploadOssObject(self, filePath, object, uploadInfo, headers=None): + self.__createOssClient(uploadInfo['UploadAuth'], uploadInfo['UploadAddress']) + """ + p = os.path.dirname(os.path.realpath(__file__)) + store = os.path.dirname(p) + '/osstmp' + return oss2.resumable_upload(self.__bucketClient, object, filePath, + store=oss2.ResumableStore(root=store), headers=headers, + multipart_threshold=self.__multipartThreshold, part_size=self.__multipartPartSize, + num_threads=self.__multipartThreadsNum, progress_callback=self.uploadProgressCallback) + """ + uploader = _VodResumableUploader(self.__bucketClient, filePath, object, uploadInfo, headers, + self.uploadProgressCallback, self.__refreshUploadAuth) + uploader.setMultipartInfo(self.__multipartThreshold, self.__multipartPartSize, self.__multipartThreadsNum) + uploader.setClientId(self.__accessKeyId) + res = uploader.upload() + + uploadAddress = uploadInfo['UploadAddress'] + bucketHost = uploadAddress['Endpoint'].replace('://', '://' + uploadAddress['Bucket'] + ".") + logger.info("UploadFile %s Finish, MediaId: %s, FilePath: %s, Destination: %s/%s" % ( + uploadInfo['MediaType'], uploadInfo['MediaId'], filePath, bucketHost, object)) + return res + + # 使用上传凭证和地址信息初始化OSS客户端(注意需要先Base64解码并Json Decode再传入) + # 如果上传的ECS位于点播相同的存储区域(如上海),则可以指定internal为True,通过内网上传更快且免费 + def __createOssClient(self, uploadAuth, uploadAddress): + auth = oss2.StsAuth(uploadAuth['AccessKeyId'], uploadAuth['AccessKeySecret'], uploadAuth['SecurityToken']) + endpoint = AliyunVodUtils.convertOssInternal(uploadAddress['Endpoint'], self.__ecsRegion) + self.__bucketClient = oss2.Bucket(auth, endpoint, uploadAddress['Bucket'], + connect_timeout=self.__connTimeout, enable_crc=self.__EnableCrc) + return self.__bucketClient + + def __refreshUploadAuth(self, videoId): + uploadInfo = self.__refresh_upload_video(videoId) + uploadAuth = uploadInfo['UploadAuth'] + uploadAddress = uploadInfo['UploadAddress'] + return self.__createOssClient(uploadAuth, uploadAddress) + + +from oss2 import SizedFileAdapter, determine_part_size +from oss2.models import PartInfo +from aliyunsdkcore.utils import parameter_helper as helper +class _VodResumableUploader: + def __init__(self, bucket, filePath, object, uploadInfo, headers, progressCallback, refreshAuthCallback): + self.__bucket = bucket + self.__filePath = filePath + self.__object = object + self.__uploadInfo = uploadInfo + self.__totalSize = None + self.__headers = headers + self.__mtime = os.path.getmtime(filePath) + self.__progressCallback = progressCallback + self.__refreshAuthCallback = refreshAuthCallback + + self.__threshold = None + self.__partSize = None + self.__threadsNum = None + self.__uploadId = 0 + + self.__record = {} + self.__finishedSize = 0 + self.__finishedParts = [] + self.__filePartHash = None + self.__clientId = None + + def setMultipartInfo(self, threshold, partSize, threadsNum): + self.__threshold = threshold + self.__partSize = partSize + self.__threadsNum = threadsNum + + + def setClientId(self, clientId): + self.__clientId = clientId + + + def upload(self): + self.__totalSize = os.path.getsize(self.__filePath) + if self.__threshold and self.__totalSize <= self.__threshold: + return self.simpleUpload() + else: + return self.multipartUpload() + + + def simpleUpload(self): + with open(AliyunVodUtils.toUnicode(self.__filePath), 'rb') as f: + result = self.__bucket.put_object(self.__object, f, headers=self.__headers, progress_callback=None) + if self.__uploadInfo['MediaType'] == 'video': + self.__reportUploadProgress('put', 1, self.__totalSize) + + return result + + def multipartUpload(self): + psize = oss2.determine_part_size(self.__totalSize, preferred_size=self.__partSize) + + # 初始化分片 + self.__uploadId = self.__bucket.init_multipart_upload(self.__object).upload_id + + startTime = time.time() + expireSeconds = 2500 # 上传凭证有效期3000秒,提前刷新 + # 逐个上传分片 + with open(AliyunVodUtils.toUnicode(self.__filePath), 'rb') as fileObj: + partNumber = 1 + offset = 0 + + while offset < self.__totalSize: + uploadSize = min(psize, self.__totalSize - offset) + #logger.info("UploadPart, FilePath: %s, VideoId: %s, UploadId: %s, PartNumber: %s, PartSize: %s" % (self.__fileName, self.__videoId, self.__uploadId, partNumber, uploadSize)) + result = self.__bucket.upload_part(self.__object, self.__uploadId, partNumber, SizedFileAdapter(fileObj,uploadSize)) + #print(result.request_id) + self.__finishedParts.append(PartInfo(partNumber, result.etag)) + offset += uploadSize + partNumber += 1 + + # 上传进度回调 + self.__progressCallback(offset, self.__totalSize) + + if self.__uploadInfo['MediaType'] == 'video': + # 上报上传进度 + self.__reportUploadProgress('multipart', partNumber - 1, offset) + + # 检测上传凭证是否过期 + nowTime = time.time() + if nowTime - startTime >= expireSeconds: + self.__bucket = self.__refreshAuthCallback(self.__uploadInfo['MediaId']) + startTime = nowTime + + + # 完成分片上传 + self.__bucket.complete_multipart_upload(self.__object, self.__uploadId, self.__finishedParts, headers=self.__headers) + + return result + + + def __reportUploadProgress(self, uploadMethod, donePartsCount, doneBytes): + reportHost = 'vod.cn-shanghai.aliyuncs.com' + sdkVersion = '1.3.1' + reportKey = 'HBL9nnSwhtU2$STX' + + uploadPoint = {'upMethod': uploadMethod, 'partSize': self.__partSize, 'doneBytes': doneBytes} + timestamp = int(time.time()) + authInfo = AliyunVodUtils.getStringMd5("%s|%s|%s" % (self.__clientId, reportKey, timestamp)) + + fields = {'Action': 'ReportUploadProgress', 'Format': 'JSON', 'Version': '2017-03-21', + 'Timestamp': helper.get_iso_8061_date(), 'SignatureNonce': helper.get_uuid(), + 'VideoId': self.__uploadInfo['MediaId'], 'Source': 'PythonSDK', 'ClientId': self.__clientId, + 'BusinessType': 'UploadVideo', 'TerminalType': 'PC', 'DeviceModel': 'Server', + 'AppVersion': sdkVersion, 'AuthTimestamp': timestamp, 'AuthInfo': authInfo, + + 'FileName': self.__filePath, 'FileHash': self.__getFilePartHash(self.__clientId, self.__filePath, self.__totalSize), + 'FileSize': self.__totalSize, 'FileCreateTime': timestamp, 'UploadRatio': 0, 'UploadId': self.__uploadId, + 'DonePartsCount': donePartsCount, 'PartSize': self.__partSize, 'UploadPoint': json.dumps(uploadPoint), + 'UploadAddress': self.__uploadInfo['OriUploadAddress'] + } + requests.post('http://' + reportHost, fields, timeout=1) + + + def __getFilePartHash(self, clientId, filePath, fileSize): + if self.__filePartHash: + return self.__filePartHash + + length = 1 * 1024 * 1024 + if fileSize < length: + length = fileSize + + try: + fp = open(AliyunVodUtils.toUnicode(filePath), 'rb') + strVal = fp.read(length) + self.__filePartHash = AliyunVodUtils.getStringMd5(strVal, False) + fp.close() + except: + self.__filePartHash = "%s|%s|%s" % (clientId, filePath, self.__mtime) + + return self.__filePartHash diff --git a/voduploadsdk/AliyunVodUtils.py b/voduploadsdk/AliyunVodUtils.py new file mode 100644 index 0000000..5a477b9 --- /dev/null +++ b/voduploadsdk/AliyunVodUtils.py @@ -0,0 +1,325 @@ +# -*- coding: UTF-8 -*- +import os,sys +import hashlib +import datetime +import functools +import logging +from oss2.exceptions import OssError +from aliyunsdkcore.acs_exception.exceptions import ServerException +from aliyunsdkcore.acs_exception.exceptions import ClientException +import traceback +import requests + +if sys.version_info[0] == 3: + import urllib.parse +else: + from urllib import unquote + + +VOD_PRINT_INFO_LOG_SWITCH = 1 + +class AliyunVodLog: + """ + VOD日志类,基于logging实现 + """ + @staticmethod + def printLogStr(msg, *args, **kwargs): + if VOD_PRINT_INFO_LOG_SWITCH: + print("[%s]%s" % (AliyunVodUtils.getCurrentTimeStr(), msg)) + + @staticmethod + def info(msg, *args, **kwargs): + logging.info(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + + @staticmethod + def error(msg, *args, **kwargs): + logging.error(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + + @staticmethod + def warning(msg, *args, **kwargs): + logging.warning(msg, *args, **kwargs) + AliyunVodLog.printLogStr(msg, *args, **kwargs) + +logger = AliyunVodLog + + +class AliyunVodUtils: + """ + VOD上传SDK的工具类,提供截取字符串、获取扩展名、获取文件名等静态函数 + """ + + # 截取字符串,在不超过最大字节数前提下确保中文字符不被截断出现乱码(先转换成unicode,再取子串,然后转换成utf-8) + @staticmethod + def subString(strVal, maxBytes, charSet='utf-8'): + i = maxBytes + if sys.version_info[0] == 3: + while len(strVal.encode(charSet)) > maxBytes: + if i < 0: + return '' + strVal = strVal[:i] + i -= 1 + else: + while len(strVal) > maxBytes: + if i < 0: + return '' + strVal = strVal.decode(charSet)[:i].encode(charSet) + i -= 1 + return strVal + + @staticmethod + def getFileExtension(fileName): + end = fileName.rfind('?') + if end <= 0: + end = len(fileName) + + i = fileName.rfind('.') + if i >= 0: + return fileName[i+1:end].lower() + else: + return None + + # urldecode + @staticmethod + def urlDecode(fileUrl): + if sys.version_info[0] == 3: + return urllib.parse.unquote(fileUrl) + else: + return unquote(fileUrl) + + # urlencode + @staticmethod + def urlEncode(fileUrl): + if sys.version_info[0] == 3: + return urllib.parse.urlencode(fileUrl) + else: + return urllib.urlencode(fileUrl) + + # 获取Url的摘要地址(去除?后的参数,如果有)以及文件名 + @staticmethod + def getFileBriefPath(fileUrl): + #fileUrl = AliyunVodUtils.urlDecode(fileUrl) + i = fileUrl.rfind('?') + if i > 0: + briefPath = fileUrl[:i] + else: + briefPath = fileUrl + + briefName = os.path.basename(briefPath) + return briefPath, AliyunVodUtils.urlDecode(briefName) + + @staticmethod + def getStringMd5(strVal, isEncode=True): + m = hashlib.md5() + m.update(strVal.encode('utf-8') if isEncode else strVal) + return m.hexdigest() + + @staticmethod + def getCurrentTimeStr(): + now = datetime.datetime.now() + return now.strftime("%Y-%m-%d %H:%M:%S") + + # 将oss地址转换为内网地址(如果脚本部署的ecs与oss bucket在同一区域) + @staticmethod + def convertOssInternal(ossUrl, ecsRegion=None, isVpc=False): + if (not ossUrl) or (not ecsRegion): + return ossUrl + + availableRegions = ['cn-qingdao', 'cn-beijing', 'cn-zhangjiakou', 'cn-huhehaote', 'cn-hangzhou', 'cn-shanghai', 'cn-shenzhen', + 'cn-hongkong', 'ap-southeast-1', 'ap-southeast-2', 'ap-southeast-3', + 'ap-northeast-1', 'us-west-1', 'us-east-1', 'eu-central-1', 'me-east-1'] + if ecsRegion not in availableRegions: + return ossUrl + + ossUrl = ossUrl.replace("https:", "http:") + if isVpc: + return ossUrl.replace("oss-%s.aliyuncs.com" % (ecsRegion), "vpc100-oss-%s.aliyuncs.com" % (ecsRegion)) + else: + return ossUrl.replace("oss-%s.aliyuncs.com" % (ecsRegion), "oss-%s-internal.aliyuncs.com" % (ecsRegion)) + + # 把输入转换为unicode + @staticmethod + def toUnicode(data): + if isinstance(data, bytes): + return data.decode('utf-8') + else: + return data + + # 替换路径中的文件名;考虑分隔符为"/" 或 "\"(windows) + @staticmethod + def replaceFileName(filePath, replace): + if len(filePath) <= 0 or len(replace) <= 0: + return filePath + + filePath = AliyunVodUtils.urlDecode(filePath) + separator = '/' + start = filePath.rfind(separator) + if start < 0: + separator = '\\' + start = filePath.rfind(separator) + if start < 0: + return None + + result = "%s%s%s" % (filePath[0:start], separator, replace) + return result + + # 创建文件中的目录 + @staticmethod + def mkDir(filePath): + if len(filePath) <= 0: + return -1 + + separator = '/' + i = filePath.rfind(separator) + if i < 0: + separator = '\\' + i = filePath.rfind(separator) + if i < 0: + return -2 + + dirs = filePath[:i] + if os.path.exists(dirs) and os.path.isdir(dirs): + return 0 + + os.makedirs(dirs) + return 1 + + + +class AliyunVodException(Exception): + """ + VOD上传SDK的异常类,做统一的异常处理,外部捕获此异常即可 + """ + + def __init__(self, type, code, msg, http_status=None, request_id=None): + Exception.__init__(self) + self.type = type or 'UnkownError' + self.code = code + self.message = msg + self.http_status = http_status or 'NULL' + self.request_id = request_id or 'NULL' + + def __str__(self): + return "Type: %s, Code: %s, Message: %s, HTTPStatus: %s, RequestId: %s" % ( + self.type, self.code, self.message, str(self.http_status), self.request_id) + +def catch_error(method): + """ + 装饰器,将内部异常转换成统一的异常类AliyunVodException + """ + + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + try: + return method(self, *args, **kwargs) + except ServerException as e: + # 可能原因:AK错误、账号无权限、参数错误等 + raise AliyunVodException('ServerException', e.get_error_code(), e.get_error_msg(), e.get_http_status(), e.get_request_id()) + logger.error("ServerException: %s", e) + except ClientException as e: + # 可能原因:本地网络故障(如不能连接外网)等 + raise AliyunVodException('ClientException', e.get_error_code(), e.get_error_msg()) + logger.error("ClientException: %s", e) + except OssError as e: + # 可能原因:上传凭证过期等 + raise AliyunVodException('OssError', e.code, e.message, e.status, e.request_id) + logger.error("OssError: %s", e) + except IOError as e: + # 可能原因:文件URL不能访问、本地文件无法读取等 + raise AliyunVodException('IOError', repr(e), traceback.format_exc()) + logger.error("IOError: %s", traceback.format_exc()) + except OSError as e: + # 可能原因:本地文件不存在等 + raise AliyunVodException('OSError', repr(e), traceback.format_exc()) + logger.error("OSError: %s", traceback.format_exc()) + except AliyunVodException as e: + # 可能原因:参数错误 + raise e + logger.error("VodException: %s", e) + except Exception as e: + raise AliyunVodException('UnkownException', repr(e), traceback.format_exc()) + logger.error("UnkownException: %s", traceback.format_exc()) + except: + raise AliyunVodException('UnkownError', 'UnkownError', traceback.format_exc()) + logger.error("UnkownError: %s", traceback.format_exc()) + + return wrapper + + +class AliyunVodDownloader: + """ + VOD网络文件的下载类,上传网络文件时会先下载到本地临时目录,再上传到点播 + """ + + def __init__(self, localDir=None): + if localDir: + self.__localDir = localDir + else: + p = os.path.dirname(os.path.realpath(__file__)) + self.__localDir = os.path.dirname(p) + '/dlfiles' + + def setSaveLocalDir(self, localDir): + self.__localDir = localDir + + def getSaveLocalDir(self): + return self.__localDir + + def downloadFile(self, fileUrl, localFileName, fileSize=None): + localPath = self.__localDir + '/' + localFileName + logger.info("Download %s To %s" % (fileUrl, localPath)) + try: + lsize = self.getFileSize(localPath) + if fileSize and lsize == fileSize: + logger.info('Download OK, File Exists') + return 0, localPath + + AliyunVodUtils.mkDir(self.__localDir) + + err, webPage = self.__openWebFile(fileUrl, lsize) + if err == 0: + logger.info('Download OK, File Exists') + webPage.close() + return 0, localPath + + fileObj = open(localPath, 'ab+') + for chunk in webPage.iter_content(chunk_size=8 * 1024): + if chunk: + fileObj.write(chunk) + except Exception as e: + logger.error("Download fail: %s" % (e)) + return -1, None + + fileObj.close() + webPage.close() + logger.info('Download OK') + return 1, localPath + + def getFileSize(self, filePath): + try: + lsize = os.stat(filePath).st_size + except: + lsize = 0 + + return lsize + + + def __openWebFile(self, fileUrl, offset): + webPage = None + try: + headers = {'Range': 'bytes=%d-' % offset} + webPage = requests.get(fileUrl, stream=True, headers=headers, timeout=120, verify=False) + status_code = webPage.status_code + err = -1 + if status_code in [200, 206]: + err = 1 + elif status_code == 416: + err = 0 + else: + logger.error("Download offset %s fail, invalid url, status: %s" % (offset, status_code)) + except Exception as e: + logger.error("Download offset %s fail: %s" % (offset, e)) + err = -2 + finally: + return err, webPage + diff --git a/voduploadsdk/ChangeLog.txt b/voduploadsdk/ChangeLog.txt new file mode 100755 index 0000000..084c65b --- /dev/null +++ b/voduploadsdk/ChangeLog.txt @@ -0,0 +1,20 @@ +2019-04-12 Version: 1.3.1 +1. 上传时可指定应用ID,以实现多应用体系的资源隔离 +2. 支持上传时指定工作流ID,可自动化媒体处理 + +2019-02-12 Version: 1.3.0 +1. 可指定点播中心(默认为上海)和存储区域,便于海外上传 +2. 支持辅助媒资(水印、字幕文件等)的上传 +3. 支持上传时设置UserData等个性化配置和更多元数据 +4. 上传网络文件调整为先下载到本地,再上传到点播,以支持大文件上传(最大48.8TB) +5. 改进m3u8视频的上传,提供默认解析m3u8分片信息的接口,也可自定义分片列表 + +2018-07-05 Version: 1.2.1 +1. 支持设置视频存储区域UploadVideoRequest.setStorageLocation +2. 修复上传大文件时上传凭证过期未刷新的问题 + +2017-12-21 Version: 1.1.1 +1. 支持上传本地单个视频、m3u8视频(含ts分片)、图片文件到点播 +2. 支持上传网络上的(HTTP/HTTPS链接,含OSS链接)的单个视频、m3u8视频(含ts分片)、图片文件到点播. +3. 支持Python2、Python3版本 + diff --git a/voduploadsdk/UploadAttachedMediaRequest.py b/voduploadsdk/UploadAttachedMediaRequest.py new file mode 100644 index 0000000..a09cfe9 --- /dev/null +++ b/voduploadsdk/UploadAttachedMediaRequest.py @@ -0,0 +1,87 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadAttachedMediaRequest + # + # Aliyun VoD's Upload Attached Media(such as watermark,subtitle files) Request class, which wraps parameters to upload an media file into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadAttachedMediaRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/98467.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadAttachedMediaRequest: + def __init__(self, filePath, businessType, title=None, fileExt=None): + """ + constructor for UploadAttachedMediaRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :return + """ + self.businessType = businessType + self.filePath = None + self.fileName = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.fileSize = None + self.cateId = None + self.tags = None + self.description = None + self.userData = None + self.storageLocation = None + self.appId = None + self.workflowId = None + + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setBusinessType(self, businessType): + self.businessType = businessType + + def setTitle(self, title): + self.title = title + + def setFileSize(self, fileSize): + self.fileSize = fileSize + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setStorageLocation(self, storageLocation): + self.storageLocation = storageLocation + + def setUserData(self, userData): + self.userData = userData + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId + + diff --git a/voduploadsdk/UploadImageRequest.py b/voduploadsdk/UploadImageRequest.py new file mode 100644 index 0000000..73f6baa --- /dev/null +++ b/voduploadsdk/UploadImageRequest.py @@ -0,0 +1,84 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadImageRequest + # + # Aliyun VoD's Upload Image Request class, which wraps parameters to upload an image into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadImageRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/55619.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadImageRequest: + def __init__(self, filePath, title=None, fileExt=None): + """ + constructor for UploadVideoRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :param title: string, 图片标题 + :return + """ + self.filePath = None + self.fileName = None + self.imageExt = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.imageType = 'default' + self.cateId = None + self.tags = None + self.description = None + self.userData = None + self.storageLocation = None + self.appId = None + self.workflowId = None + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.imageExt = fileExt + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setImageType(self, imageType): + self.imageType = imageType + + def setTitle(self, title): + self.title = title + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setStorageLocation(self, storageLocation): + self.storageLocation = storageLocation + + def setUserData(self, userData): + self.userData = userData + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId diff --git a/voduploadsdk/UploadVideoRequest.py b/voduploadsdk/UploadVideoRequest.py new file mode 100644 index 0000000..afe3b35 --- /dev/null +++ b/voduploadsdk/UploadVideoRequest.py @@ -0,0 +1,86 @@ +# -*- coding: UTF-8 -*- +""" + # Class UploadVideoRequest + # + # Aliyun VoD's Upload Video Request class, which wraps parameters to upload a video into VoD. + # Users could pass parameters to AliyunVodUploader, including File Path,Title,etc. via an UploadVideoRequest instance. + # For more details, please check out the VoD API document: https://help.aliyun.com/document_detail/55407.html +""" + +from voduploadsdk.AliyunVodUtils import * +class UploadVideoRequest: + def __init__(self, filePath, title=None, fileExt=None): + """ + constructor for UploadVideoRequest + :param filePath: string, 文件的绝对路径,或者网络文件的URL,必须含有扩展名 + :param title: string, 视频标题,最长128字节,不传则使用文件名为标题 + :return + """ + self.filePath = None + self.fileName = None + self.mediaExt = None + self.title = None + self.setFilePath(filePath, title, fileExt) + + self.cateId = None + self.tags = None + self.description = None + self.coverURL = None + self.templateGroupId = None + self.isShowWatermark = None + self.userData = None + self.storageLocation = None + self.uploadId = None + self.appId = None + self.workflowId = None + + def setFilePath(self, filePath, title=None, fileExt=None): + if fileExt is None: + fileExt = AliyunVodUtils.getFileExtension(filePath) + if not fileExt: + raise AliyunVodException('ParameterError', 'InvalidParameter', 'filePath has no Extension') + + fileExt = fileExt.lstrip('.') + self.mediaExt = fileExt + self.filePath = AliyunVodUtils.toUnicode(filePath) + + briefPath, briefName = AliyunVodUtils.getFileBriefPath(self.filePath) + self.fileName = briefPath + if fileExt and (not self.fileName.endswith('.' + fileExt)): + self.fileName = self.fileName + '.' + fileExt + + if title: + self.title = title + else: + if self.title is None: + self.title = briefName + + + def setCateId(self, cateId): + self.cateId = cateId + + def setTags(self, tags): + self.tags = tags + + def setDescription(self, description): + self.description = description + + def setCoverURL(self, coverURL): + self.coverURL = coverURL + + def setTemplateGroupId(self, templateGroupId): + self.templateGroupId = templateGroupId + + # 关闭水印,仅用于配置全局水印且转码模板开启水印后,单次上传时关闭水印 + def shutdownWatermark(self): + self.isShowWatermark = False + + # 设置上传ID,可用于关联导入视频 + def setUploadId(self, uploadId): + self.uploadId = uploadId + + def setAppId(self, appId): + self.appId = appId + + def setWorkflowId(self, workflowId): + self.workflowId = workflowId diff --git a/voduploadsdk/__init__.py b/voduploadsdk/__init__.py new file mode 100644 index 0000000..7bd766b --- /dev/null +++ b/voduploadsdk/__init__.py @@ -0,0 +1,3 @@ +__version__ = '1.3.1' + + diff --git a/voduploadsdk/__pycache__/AliyunVodUploader.cpython-38.pyc b/voduploadsdk/__pycache__/AliyunVodUploader.cpython-38.pyc new file mode 100644 index 0000000..0022e16 Binary files /dev/null and b/voduploadsdk/__pycache__/AliyunVodUploader.cpython-38.pyc differ diff --git a/voduploadsdk/__pycache__/AliyunVodUtils.cpython-38.pyc b/voduploadsdk/__pycache__/AliyunVodUtils.cpython-38.pyc new file mode 100644 index 0000000..21a4b0a Binary files /dev/null and b/voduploadsdk/__pycache__/AliyunVodUtils.cpython-38.pyc differ diff --git a/voduploadsdk/__pycache__/UploadAttachedMediaRequest.cpython-38.pyc b/voduploadsdk/__pycache__/UploadAttachedMediaRequest.cpython-38.pyc new file mode 100644 index 0000000..1ff2ce9 Binary files /dev/null and b/voduploadsdk/__pycache__/UploadAttachedMediaRequest.cpython-38.pyc differ diff --git a/voduploadsdk/__pycache__/UploadImageRequest.cpython-38.pyc b/voduploadsdk/__pycache__/UploadImageRequest.cpython-38.pyc new file mode 100644 index 0000000..47b84f9 Binary files /dev/null and b/voduploadsdk/__pycache__/UploadImageRequest.cpython-38.pyc differ diff --git a/voduploadsdk/__pycache__/UploadVideoRequest.cpython-38.pyc b/voduploadsdk/__pycache__/UploadVideoRequest.cpython-38.pyc new file mode 100644 index 0000000..5c7233a Binary files /dev/null and b/voduploadsdk/__pycache__/UploadVideoRequest.cpython-38.pyc differ diff --git a/voduploadsdk/__pycache__/__init__.cpython-38.pyc b/voduploadsdk/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..5de1595 Binary files /dev/null and b/voduploadsdk/__pycache__/__init__.cpython-38.pyc differ diff --git a/weights/BiSeNet/checkpoint.pth b/weights/BiSeNet/checkpoint.pth new file mode 100644 index 0000000..e7b49ec Binary files /dev/null and b/weights/BiSeNet/checkpoint.pth differ diff --git a/weights/yolov5/class5/best_5classes.pt b/weights/yolov5/class5/best_5classes.pt new file mode 100644 index 0000000..f4db424 Binary files /dev/null and b/weights/yolov5/class5/best_5classes.pt differ diff --git a/weights/yolov5/class5/labelnames.json b/weights/yolov5/class5/labelnames.json new file mode 100644 index 0000000..99d6440 --- /dev/null +++ b/weights/yolov5/class5/labelnames.json @@ -0,0 +1,4 @@ +{ + "labelnames":["排口","排污口","水生植被","漂浮物","其它"], + "labelIndexs":["SL014","SL011","SL013","SL001","SL001" ] +} diff --git a/weights/yolov5/class9/F1_curve.png b/weights/yolov5/class9/F1_curve.png new file mode 100644 index 0000000..cad3d9e Binary files /dev/null and b/weights/yolov5/class9/F1_curve.png differ diff --git a/weights/yolov5/class9/PR_curve.png b/weights/yolov5/class9/PR_curve.png new file mode 100644 index 0000000..4bd833c Binary files /dev/null and b/weights/yolov5/class9/PR_curve.png differ diff --git a/weights/yolov5/class9/P_curve.png b/weights/yolov5/class9/P_curve.png new file mode 100644 index 0000000..882c62a Binary files /dev/null and b/weights/yolov5/class9/P_curve.png differ diff --git a/weights/yolov5/class9/R_curve.png b/weights/yolov5/class9/R_curve.png new file mode 100644 index 0000000..5721b37 Binary files /dev/null and b/weights/yolov5/class9/R_curve.png differ diff --git a/weights/yolov5/class9/best.pt b/weights/yolov5/class9/best.pt new file mode 100644 index 0000000..9b24f6e Binary files /dev/null and b/weights/yolov5/class9/best.pt differ diff --git a/weights/yolov5/class9/confusion_matrix.png b/weights/yolov5/class9/confusion_matrix.png new file mode 100644 index 0000000..24393f7 Binary files /dev/null and b/weights/yolov5/class9/confusion_matrix.png differ diff --git a/weights/yolov5/class9/events.out.tfevents.1655953657.f899b1b7e8da.17376.0 b/weights/yolov5/class9/events.out.tfevents.1655953657.f899b1b7e8da.17376.0 new file mode 100644 index 0000000..f5ff0f0 Binary files /dev/null and b/weights/yolov5/class9/events.out.tfevents.1655953657.f899b1b7e8da.17376.0 differ diff --git a/weights/yolov5/class9/hyp.yaml b/weights/yolov5/class9/hyp.yaml new file mode 100644 index 0000000..bd4aa8c --- /dev/null +++ b/weights/yolov5/class9/hyp.yaml @@ -0,0 +1,28 @@ +lr0: 0.01 +lrf: 0.01 +momentum: 0.937 +weight_decay: 0.0005 +warmup_epochs: 3.0 +warmup_momentum: 0.8 +warmup_bias_lr: 0.1 +box: 0.05 +cls: 0.5 +cls_pw: 1.0 +obj: 1.0 +obj_pw: 1.0 +iou_t: 0.2 +anchor_t: 4.0 +fl_gamma: 0.0 +hsv_h: 0.015 +hsv_s: 0.7 +hsv_v: 0.4 +degrees: 0.35 +translate: 0.2 +scale: 0.6 +shear: 0.5 +perspective: 0.001 +flipud: 0.5 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.243 +copy_paste: 0.0 diff --git a/weights/yolov5/class9/labelnames.json b/weights/yolov5/class9/labelnames.json new file mode 100644 index 0000000..26b634b --- /dev/null +++ b/weights/yolov5/class9/labelnames.json @@ -0,0 +1,3 @@ +{ + "labelnames":["排口","水生植被", "其它" ,"漂浮物","污口","菜地","违建","垃圾","河床"] +} diff --git a/weights/yolov5/class9/labels.jpg b/weights/yolov5/class9/labels.jpg new file mode 100644 index 0000000..c0e4fce Binary files /dev/null and b/weights/yolov5/class9/labels.jpg differ diff --git a/weights/yolov5/class9/labels_correlogram.jpg b/weights/yolov5/class9/labels_correlogram.jpg new file mode 100644 index 0000000..9dace54 Binary files /dev/null and b/weights/yolov5/class9/labels_correlogram.jpg differ diff --git a/weights/yolov5/class9/opt.yaml b/weights/yolov5/class9/opt.yaml new file mode 100644 index 0000000..1c4ef4d --- /dev/null +++ b/weights/yolov5/class9/opt.yaml @@ -0,0 +1,39 @@ +weights: yolov5s.pt +cfg: '' +data: data/River_detection_data.yaml +hyp: data/hyps/hyp.scratch-low.yaml +epochs: 200 +batch_size: 8 +imgsz: 640 +rect: false +resume: false +nosave: false +noval: false +noautoanchor: false +noplots: false +evolve: null +bucket: '' +cache: null +image_weights: false +device: '' +multi_scale: false +single_cls: false +optimizer: SGD +sync_bn: false +workers: 16 +project: runs/train +name: exp +exist_ok: false +quad: false +cos_lr: false +label_smoothing: 0.0 +patience: 100 +freeze: +- 0 +save_period: -1 +local_rank: -1 +entity: null +upload_dataset: false +bbox_interval: -1 +artifact_alias: latest +save_dir: runs/train/exp7 diff --git a/weights/yolov5/class9/results.csv b/weights/yolov5/class9/results.csv new file mode 100644 index 0000000..e48f369 --- /dev/null +++ b/weights/yolov5/class9/results.csv @@ -0,0 +1,201 @@ + epoch, train/box_loss, train/obj_loss, train/cls_loss, metrics/precision, metrics/recall, metrics/mAP_0.5,metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss, x/lr0, x/lr1, x/lr2 + 0, 0.1163, 0.078471, 0.060603, 0.0091424, 0.053632, 0.0032591, 0.00068739, 0.10289, 0.073506, 0.052194, 0.0033077, 0.0033077, 0.070231 + 1, 0.096567, 0.095328, 0.046852, 0.48139, 0.071425, 0.019772, 0.0038298, 0.089826, 0.087395, 0.038753, 0.0066082, 0.0066082, 0.040198 + 2, 0.092639, 0.096597, 0.03695, 0.60906, 0.11302, 0.044254, 0.0098758, 0.088638, 0.079321, 0.028477, 0.0098756, 0.0098756, 0.010132 + 3, 0.088354, 0.093358, 0.031132, 0.53835, 0.13594, 0.075434, 0.019504, 0.082118, 0.071749, 0.023135, 0.0098515, 0.0098515, 0.0098515 + 4, 0.084038, 0.095268, 0.026507, 0.59598, 0.16171, 0.12862, 0.037239, 0.075409, 0.070509, 0.017207, 0.0098515, 0.0098515, 0.0098515 + 5, 0.081941, 0.095587, 0.023229, 0.58377, 0.20923, 0.15806, 0.049477, 0.071079, 0.073671, 0.015088, 0.009802, 0.009802, 0.009802 + 6, 0.07997, 0.093637, 0.021312, 0.54366, 0.20309, 0.1499, 0.04285, 0.073025, 0.071019, 0.0136, 0.0097525, 0.0097525, 0.0097525 + 7, 0.078453, 0.097821, 0.02096, 0.4899, 0.20122, 0.13173, 0.033875, 0.077732, 0.068175, 0.012874, 0.009703, 0.009703, 0.009703 + 8, 0.077643, 0.094062, 0.020389, 0.61771, 0.183, 0.18765, 0.06058, 0.071304, 0.072565, 0.013283, 0.0096535, 0.0096535, 0.0096535 + 9, 0.077455, 0.092403, 0.018928, 0.68859, 0.17815, 0.19471, 0.057794, 0.070921, 0.075355, 0.012061, 0.009604, 0.009604, 0.009604 + 10, 0.076227, 0.095143, 0.018652, 0.54529, 0.22365, 0.2113, 0.07751, 0.068299, 0.070042, 0.011792, 0.0095545, 0.0095545, 0.0095545 + 11, 0.075621, 0.096919, 0.017805, 0.60475, 0.22383, 0.22727, 0.084465, 0.066867, 0.070019, 0.01066, 0.009505, 0.009505, 0.009505 + 12, 0.075278, 0.092505, 0.017624, 0.318, 0.38178, 0.2386, 0.088241, 0.06641, 0.069216, 0.010871, 0.0094555, 0.0094555, 0.0094555 + 13, 0.074596, 0.092825, 0.017083, 0.57635, 0.23396, 0.22908, 0.069304, 0.06832, 0.070965, 0.0096148, 0.009406, 0.009406, 0.009406 + 14, 0.074336, 0.090165, 0.01704, 0.34469, 0.35945, 0.24647, 0.093281, 0.067523, 0.067457, 0.010291, 0.0093565, 0.0093565, 0.0093565 + 15, 0.074199, 0.091868, 0.016145, 0.53659, 0.24416, 0.2452, 0.089367, 0.066737, 0.06926, 0.0096651, 0.009307, 0.009307, 0.009307 + 16, 0.073709, 0.091386, 0.016228, 0.58383, 0.27631, 0.26264, 0.095983, 0.065483, 0.06832, 0.0089162, 0.0092575, 0.0092575, 0.0092575 + 17, 0.073127, 0.092894, 0.015744, 0.37985, 0.32613, 0.24739, 0.087963, 0.068443, 0.066683, 0.0084403, 0.009208, 0.009208, 0.009208 + 18, 0.072616, 0.088464, 0.015487, 0.56692, 0.28385, 0.27279, 0.10299, 0.064793, 0.06803, 0.0089056, 0.0091585, 0.0091585, 0.0091585 + 19, 0.073291, 0.092542, 0.015206, 0.56882, 0.26733, 0.25371, 0.094288, 0.065888, 0.067756, 0.0085961, 0.009109, 0.009109, 0.009109 + 20, 0.072293, 0.090466, 0.015108, 0.57946, 0.30154, 0.27734, 0.10642, 0.0646, 0.067511, 0.0085873, 0.0090595, 0.0090595, 0.0090595 + 21, 0.072206, 0.092318, 0.015516, 0.6099, 0.27121, 0.26615, 0.097604, 0.066132, 0.067926, 0.0087777, 0.00901, 0.00901, 0.00901 + 22, 0.0725, 0.089683, 0.015057, 0.57866, 0.29392, 0.27893, 0.10599, 0.064014, 0.068478, 0.0088765, 0.0089605, 0.0089605, 0.0089605 + 23, 0.07183, 0.087295, 0.014614, 0.52155, 0.2966, 0.27902, 0.10725, 0.066293, 0.067119, 0.0092347, 0.008911, 0.008911, 0.008911 + 24, 0.072084, 0.09317, 0.014796, 0.49656, 0.32987, 0.28109, 0.11037, 0.064766, 0.067627, 0.0081019, 0.0088615, 0.0088615, 0.0088615 + 25, 0.07202, 0.09287, 0.014637, 0.52159, 0.3276, 0.29186, 0.11875, 0.063771, 0.067623, 0.0075126, 0.008812, 0.008812, 0.008812 + 26, 0.071227, 0.09077, 0.01366, 0.51896, 0.29976, 0.27309, 0.099052, 0.06738, 0.068287, 0.0089357, 0.0087625, 0.0087625, 0.0087625 + 27, 0.071496, 0.08987, 0.014003, 0.54018, 0.3222, 0.29007, 0.11414, 0.064039, 0.068832, 0.0080796, 0.008713, 0.008713, 0.008713 + 28, 0.070916, 0.093107, 0.014703, 0.47723, 0.34298, 0.29723, 0.11451, 0.064219, 0.066814, 0.0076718, 0.0086635, 0.0086635, 0.0086635 + 29, 0.070427, 0.088971, 0.013238, 0.54622, 0.31494, 0.30256, 0.11241, 0.06479, 0.067063, 0.0074613, 0.008614, 0.008614, 0.008614 + 30, 0.071102, 0.08735, 0.013443, 0.56721, 0.30333, 0.29104, 0.098722, 0.067164, 0.066289, 0.0075761, 0.0085645, 0.0085645, 0.0085645 + 31, 0.070841, 0.086304, 0.013342, 0.60408, 0.30416, 0.31094, 0.11943, 0.063399, 0.067872, 0.0075819, 0.008515, 0.008515, 0.008515 + 32, 0.071053, 0.090118, 0.014227, 0.53406, 0.32637, 0.31198, 0.12228, 0.064635, 0.066901, 0.0076553, 0.0084655, 0.0084655, 0.0084655 + 33, 0.069379, 0.090244, 0.013159, 0.55659, 0.31976, 0.31874, 0.12192, 0.064553, 0.06729, 0.0073647, 0.008416, 0.008416, 0.008416 + 34, 0.069977, 0.088556, 0.013386, 0.51855, 0.35026, 0.32884, 0.13784, 0.063374, 0.065856, 0.0072325, 0.0083665, 0.0083665, 0.0083665 + 35, 0.069105, 0.08728, 0.012953, 0.54258, 0.30226, 0.30634, 0.11756, 0.064441, 0.066554, 0.0070503, 0.008317, 0.008317, 0.008317 + 36, 0.069297, 0.087483, 0.013251, 0.46931, 0.36652, 0.32269, 0.13031, 0.063113, 0.067067, 0.0072046, 0.0082675, 0.0082675, 0.0082675 + 37, 0.069017, 0.089875, 0.012903, 0.53854, 0.31763, 0.32451, 0.12181, 0.064756, 0.068596, 0.0070815, 0.008218, 0.008218, 0.008218 + 38, 0.070204, 0.087944, 0.013116, 0.55487, 0.31301, 0.31483, 0.1313, 0.062245, 0.067089, 0.007585, 0.0081685, 0.0081685, 0.0081685 + 39, 0.068852, 0.088236, 0.012347, 0.45483, 0.37616, 0.32405, 0.13517, 0.062677, 0.067016, 0.0069761, 0.008119, 0.008119, 0.008119 + 40, 0.068883, 0.087472, 0.012411, 0.3262, 0.4185, 0.32651, 0.12907, 0.064086, 0.066548, 0.0071477, 0.0080695, 0.0080695, 0.0080695 + 41, 0.068694, 0.087602, 0.012271, 0.58739, 0.33346, 0.34273, 0.12701, 0.064972, 0.066784, 0.0071221, 0.00802, 0.00802, 0.00802 + 42, 0.067869, 0.08467, 0.012307, 0.50697, 0.35637, 0.33964, 0.13608, 0.06288, 0.067245, 0.0071069, 0.0079705, 0.0079705, 0.0079705 + 43, 0.068691, 0.085801, 0.012611, 0.53732, 0.3621, 0.34559, 0.1272, 0.064582, 0.067814, 0.0069667, 0.007921, 0.007921, 0.007921 + 44, 0.069254, 0.090675, 0.012805, 0.59737, 0.35245, 0.35435, 0.13788, 0.062864, 0.067236, 0.0066713, 0.0078715, 0.0078715, 0.0078715 + 45, 0.068376, 0.085859, 0.011942, 0.50395, 0.37409, 0.34755, 0.13974, 0.062393, 0.067137, 0.007546, 0.007822, 0.007822, 0.007822 + 46, 0.06861, 0.089459, 0.012456, 0.50414, 0.3914, 0.3425, 0.13007, 0.063929, 0.067608, 0.0067689, 0.0077725, 0.0077725, 0.0077725 + 47, 0.068593, 0.088558, 0.012249, 0.60456, 0.35743, 0.35891, 0.15363, 0.061952, 0.067407, 0.0071866, 0.007723, 0.007723, 0.007723 + 48, 0.06807, 0.08762, 0.011871, 0.51236, 0.37961, 0.36165, 0.15042, 0.061985, 0.066772, 0.0065586, 0.0076735, 0.0076735, 0.0076735 + 49, 0.069001, 0.089288, 0.012612, 0.50785, 0.41838, 0.37019, 0.15476, 0.061558, 0.066265, 0.006661, 0.007624, 0.007624, 0.007624 + 50, 0.067667, 0.086656, 0.011313, 0.46651, 0.46262, 0.37364, 0.1524, 0.061764, 0.067662, 0.0066976, 0.0075745, 0.0075745, 0.0075745 + 51, 0.067883, 0.087964, 0.011696, 0.55395, 0.3629, 0.36086, 0.13546, 0.063416, 0.066609, 0.0065205, 0.007525, 0.007525, 0.007525 + 52, 0.067801, 0.08903, 0.011977, 0.53392, 0.39244, 0.35415, 0.14072, 0.063178, 0.066399, 0.006225, 0.0074755, 0.0074755, 0.0074755 + 53, 0.067799, 0.0835, 0.011333, 0.5501, 0.38187, 0.35995, 0.13908, 0.063433, 0.066017, 0.006538, 0.007426, 0.007426, 0.007426 + 54, 0.067447, 0.088694, 0.011632, 0.54091, 0.38112, 0.36278, 0.14907, 0.061905, 0.066289, 0.0065171, 0.0073765, 0.0073765, 0.0073765 + 55, 0.067586, 0.085356, 0.011703, 0.52286, 0.40525, 0.36899, 0.14865, 0.06177, 0.066779, 0.0062129, 0.007327, 0.007327, 0.007327 + 56, 0.066878, 0.083479, 0.011399, 0.60013, 0.38319, 0.36966, 0.15262, 0.061542, 0.067507, 0.0064337, 0.0072775, 0.0072775, 0.0072775 + 57, 0.068031, 0.091226, 0.012066, 0.50613, 0.44063, 0.36909, 0.15244, 0.061868, 0.066166, 0.0065633, 0.007228, 0.007228, 0.007228 + 58, 0.067418, 0.085757, 0.011622, 0.56517, 0.40061, 0.36943, 0.14651, 0.062586, 0.066619, 0.0062601, 0.0071785, 0.0071785, 0.0071785 + 59, 0.066718, 0.085864, 0.011158, 0.55397, 0.38493, 0.36709, 0.14512, 0.063075, 0.066393, 0.0065429, 0.007129, 0.007129, 0.007129 + 60, 0.066731, 0.087387, 0.011068, 0.58739, 0.38874, 0.37592, 0.15718, 0.061648, 0.067504, 0.0069853, 0.0070795, 0.0070795, 0.0070795 + 61, 0.067061, 0.086715, 0.011811, 0.60299, 0.37194, 0.3742, 0.14995, 0.062568, 0.066529, 0.0066613, 0.00703, 0.00703, 0.00703 + 62, 0.066752, 0.084938, 0.010905, 0.61709, 0.35762, 0.36524, 0.14986, 0.062524, 0.067757, 0.0065433, 0.0069805, 0.0069805, 0.0069805 + 63, 0.066791, 0.084643, 0.010893, 0.62242, 0.41571, 0.39725, 0.16297, 0.061438, 0.066376, 0.0059869, 0.006931, 0.006931, 0.006931 + 64, 0.066845, 0.089024, 0.010864, 0.51756, 0.42181, 0.37814, 0.15135, 0.062043, 0.067269, 0.0060459, 0.0068815, 0.0068815, 0.0068815 + 65, 0.066513, 0.085811, 0.011389, 0.57159, 0.40589, 0.38348, 0.1593, 0.061298, 0.067347, 0.0068386, 0.006832, 0.006832, 0.006832 + 66, 0.066189, 0.084214, 0.010857, 0.52411, 0.43847, 0.37121, 0.15857, 0.061873, 0.066419, 0.00703, 0.0067825, 0.0067825, 0.0067825 + 67, 0.066244, 0.084572, 0.010595, 0.56425, 0.42456, 0.38402, 0.16437, 0.061371, 0.066397, 0.0064646, 0.006733, 0.006733, 0.006733 + 68, 0.066373, 0.086031, 0.010447, 0.55259, 0.39495, 0.36706, 0.13834, 0.063205, 0.065358, 0.0063739, 0.0066835, 0.0066835, 0.0066835 + 69, 0.066427, 0.086668, 0.010629, 0.61355, 0.38296, 0.36921, 0.15198, 0.062032, 0.066947, 0.0064047, 0.006634, 0.006634, 0.006634 + 70, 0.066244, 0.082839, 0.01088, 0.54489, 0.40873, 0.36629, 0.14945, 0.062507, 0.066586, 0.0066613, 0.0065845, 0.0065845, 0.0065845 + 71, 0.065914, 0.083775, 0.010386, 0.57188, 0.40664, 0.38227, 0.15763, 0.061905, 0.065879, 0.0061664, 0.006535, 0.006535, 0.006535 + 72, 0.065846, 0.083656, 0.010348, 0.61598, 0.39283, 0.37813, 0.15818, 0.061937, 0.067622, 0.0063666, 0.0064855, 0.0064855, 0.0064855 + 73, 0.065892, 0.085602, 0.010373, 0.62465, 0.367, 0.37421, 0.15764, 0.062358, 0.066741, 0.0063928, 0.006436, 0.006436, 0.006436 + 74, 0.065946, 0.086742, 0.010123, 0.61873, 0.38325, 0.37564, 0.16515, 0.062037, 0.067166, 0.0060427, 0.0063865, 0.0063865, 0.0063865 + 75, 0.065979, 0.084694, 0.010405, 0.59271, 0.40393, 0.37981, 0.15395, 0.06157, 0.067572, 0.0063874, 0.006337, 0.006337, 0.006337 + 76, 0.065851, 0.08633, 0.010715, 0.601, 0.38391, 0.37534, 0.15587, 0.061249, 0.067263, 0.0060625, 0.0062875, 0.0062875, 0.0062875 + 77, 0.065805, 0.085466, 0.01038, 0.61726, 0.36675, 0.37618, 0.15247, 0.061583, 0.066389, 0.0056583, 0.006238, 0.006238, 0.006238 + 78, 0.064958, 0.085547, 0.010087, 0.60557, 0.37817, 0.37016, 0.15905, 0.061912, 0.067135, 0.005876, 0.0061885, 0.0061885, 0.0061885 + 79, 0.065643, 0.08418, 0.010639, 0.63468, 0.38455, 0.36693, 0.15109, 0.061682, 0.068052, 0.0059359, 0.006139, 0.006139, 0.006139 + 80, 0.065556, 0.085941, 0.01048, 0.57815, 0.40519, 0.37027, 0.14387, 0.062216, 0.066558, 0.0059213, 0.0060895, 0.0060895, 0.0060895 + 81, 0.065182, 0.083699, 0.010384, 0.59147, 0.40593, 0.38949, 0.16124, 0.061847, 0.066603, 0.0060445, 0.00604, 0.00604, 0.00604 + 82, 0.065421, 0.085555, 0.010329, 0.61879, 0.37087, 0.3649, 0.1519, 0.06202, 0.066681, 0.0061421, 0.0059905, 0.0059905, 0.0059905 + 83, 0.065366, 0.086068, 0.010347, 0.55812, 0.39094, 0.36624, 0.15682, 0.062595, 0.067239, 0.0059068, 0.005941, 0.005941, 0.005941 + 84, 0.065097, 0.084055, 0.010372, 0.53991, 0.42806, 0.38254, 0.15846, 0.06136, 0.066975, 0.0057967, 0.0058915, 0.0058915, 0.0058915 + 85, 0.064374, 0.086181, 0.010123, 0.59861, 0.40198, 0.39171, 0.16501, 0.062239, 0.068405, 0.006259, 0.005842, 0.005842, 0.005842 + 86, 0.065197, 0.085523, 0.010152, 0.64409, 0.37603, 0.38836, 0.16013, 0.061907, 0.066445, 0.0059465, 0.0057925, 0.0057925, 0.0057925 + 87, 0.063779, 0.084176, 0.0089023, 0.63461, 0.37991, 0.38777, 0.15515, 0.0622, 0.06603, 0.0055444, 0.005743, 0.005743, 0.005743 + 88, 0.064477, 0.087534, 0.010278, 0.42722, 0.40099, 0.36072, 0.15154, 0.062104, 0.067563, 0.006415, 0.0056935, 0.0056935, 0.0056935 + 89, 0.064742, 0.084291, 0.0098472, 0.61324, 0.357, 0.36387, 0.13968, 0.063151, 0.067409, 0.0057912, 0.005644, 0.005644, 0.005644 + 90, 0.064703, 0.081543, 0.0099267, 0.6087, 0.38481, 0.37916, 0.16156, 0.062021, 0.067536, 0.0059791, 0.0055945, 0.0055945, 0.0055945 + 91, 0.064966, 0.086366, 0.010074, 0.61269, 0.38964, 0.39016, 0.15864, 0.061301, 0.067608, 0.0058913, 0.005545, 0.005545, 0.005545 + 92, 0.063874, 0.084096, 0.010313, 0.65604, 0.38164, 0.39434, 0.16384, 0.061271, 0.067982, 0.0055352, 0.0054955, 0.0054955, 0.0054955 + 93, 0.064593, 0.084133, 0.010451, 0.62026, 0.38759, 0.39417, 0.16559, 0.061492, 0.067733, 0.0062295, 0.005446, 0.005446, 0.005446 + 94, 0.064062, 0.081924, 0.0093699, 0.62588, 0.39245, 0.38855, 0.15933, 0.062091, 0.06688, 0.0056458, 0.0053965, 0.0053965, 0.0053965 + 95, 0.064347, 0.083627, 0.010303, 0.68128, 0.37355, 0.39119, 0.16429, 0.061663, 0.067386, 0.0060654, 0.005347, 0.005347, 0.005347 + 96, 0.064265, 0.083972, 0.0099317, 0.58813, 0.37959, 0.37562, 0.16018, 0.06199, 0.066333, 0.0057443, 0.0052975, 0.0052975, 0.0052975 + 97, 0.063302, 0.083542, 0.0094549, 0.59221, 0.40444, 0.38679, 0.16226, 0.061384, 0.066408, 0.0060679, 0.005248, 0.005248, 0.005248 + 98, 0.063991, 0.082154, 0.0095181, 0.56433, 0.4257, 0.38644, 0.16509, 0.061664, 0.067919, 0.0061783, 0.0051985, 0.0051985, 0.0051985 + 99, 0.064648, 0.086819, 0.010362, 0.62136, 0.38195, 0.39053, 0.15959, 0.061459, 0.066701, 0.006003, 0.005149, 0.005149, 0.005149 + 100, 0.063241, 0.081441, 0.0093159, 0.56769, 0.41884, 0.37789, 0.15233, 0.062306, 0.067186, 0.0058342, 0.0050995, 0.0050995, 0.0050995 + 101, 0.062968, 0.0836, 0.0089659, 0.64785, 0.37956, 0.38932, 0.16422, 0.061863, 0.067672, 0.0059326, 0.00505, 0.00505, 0.00505 + 102, 0.06284, 0.078934, 0.0090736, 0.57377, 0.40535, 0.38972, 0.16505, 0.061779, 0.067911, 0.0057767, 0.0050005, 0.0050005, 0.0050005 + 103, 0.064299, 0.082681, 0.010138, 0.61472, 0.40008, 0.38363, 0.1538, 0.061702, 0.066864, 0.0059251, 0.004951, 0.004951, 0.004951 + 104, 0.063765, 0.08581, 0.0096124, 0.62708, 0.38161, 0.38659, 0.16376, 0.061379, 0.067747, 0.0065927, 0.0049015, 0.0049015, 0.0049015 + 105, 0.064029, 0.086371, 0.0094885, 0.44298, 0.42325, 0.38235, 0.16022, 0.0611, 0.067532, 0.0059926, 0.004852, 0.004852, 0.004852 + 106, 0.063933, 0.083755, 0.0092936, 0.57128, 0.40904, 0.37533, 0.15955, 0.061493, 0.067812, 0.0060433, 0.0048025, 0.0048025, 0.0048025 + 107, 0.063925, 0.083809, 0.0096163, 0.55432, 0.41368, 0.37486, 0.16001, 0.061462, 0.068598, 0.005863, 0.004753, 0.004753, 0.004753 + 108, 0.062201, 0.083453, 0.0085994, 0.57548, 0.41776, 0.38787, 0.16373, 0.061403, 0.067542, 0.0060933, 0.0047035, 0.0047035, 0.0047035 + 109, 0.06275, 0.08118, 0.008878, 0.54169, 0.42289, 0.37186, 0.15456, 0.061683, 0.068245, 0.0057971, 0.004654, 0.004654, 0.004654 + 110, 0.063428, 0.082393, 0.0098573, 0.54333, 0.42902, 0.39734, 0.16654, 0.061067, 0.067816, 0.0057777, 0.0046045, 0.0046045, 0.0046045 + 111, 0.063, 0.080144, 0.0090875, 0.65566, 0.38288, 0.40283, 0.17192, 0.061325, 0.067714, 0.0060259, 0.004555, 0.004555, 0.004555 + 112, 0.06263, 0.083642, 0.0093641, 0.41437, 0.42205, 0.37776, 0.15887, 0.061718, 0.067645, 0.0063483, 0.0045055, 0.0045055, 0.0045055 + 113, 0.063118, 0.084509, 0.0096402, 0.62197, 0.38187, 0.38449, 0.16554, 0.060996, 0.067075, 0.0060038, 0.004456, 0.004456, 0.004456 + 114, 0.062873, 0.082505, 0.0088858, 0.60133, 0.3802, 0.38675, 0.1559, 0.060974, 0.068178, 0.0056237, 0.0044065, 0.0044065, 0.0044065 + 115, 0.062607, 0.082866, 0.008828, 0.57434, 0.3973, 0.37809, 0.17002, 0.061668, 0.068287, 0.0055489, 0.004357, 0.004357, 0.004357 + 116, 0.062916, 0.08195, 0.0095846, 0.59209, 0.39282, 0.37895, 0.15746, 0.061779, 0.067903, 0.0057769, 0.0043075, 0.0043075, 0.0043075 + 117, 0.062263, 0.080514, 0.0084992, 0.54274, 0.4159, 0.38896, 0.16493, 0.061529, 0.067438, 0.0059834, 0.004258, 0.004258, 0.004258 + 118, 0.063117, 0.080655, 0.0093576, 0.56906, 0.43282, 0.40171, 0.17407, 0.061155, 0.068624, 0.0057789, 0.0042085, 0.0042085, 0.0042085 + 119, 0.062702, 0.082241, 0.0088405, 0.52481, 0.42562, 0.38037, 0.15888, 0.061468, 0.068739, 0.0058799, 0.004159, 0.004159, 0.004159 + 120, 0.062472, 0.080464, 0.0090944, 0.57049, 0.39505, 0.37787, 0.16326, 0.061481, 0.068984, 0.0057145, 0.0041095, 0.0041095, 0.0041095 + 121, 0.062513, 0.082724, 0.0090526, 0.58679, 0.41222, 0.38838, 0.16373, 0.061139, 0.06887, 0.0058018, 0.00406, 0.00406, 0.00406 + 122, 0.06212, 0.080603, 0.0080341, 0.59962, 0.4259, 0.40007, 0.17061, 0.0617, 0.068887, 0.0057023, 0.0040105, 0.0040105, 0.0040105 + 123, 0.062704, 0.08208, 0.0084618, 0.58263, 0.42756, 0.39383, 0.15642, 0.061847, 0.068772, 0.0057635, 0.003961, 0.003961, 0.003961 + 124, 0.062541, 0.080097, 0.0091065, 0.60489, 0.4021, 0.38306, 0.15652, 0.06178, 0.06825, 0.0057683, 0.0039115, 0.0039115, 0.0039115 + 125, 0.06197, 0.081429, 0.0083321, 0.64762, 0.39049, 0.38741, 0.15672, 0.062131, 0.067943, 0.0056695, 0.003862, 0.003862, 0.003862 + 126, 0.062301, 0.081651, 0.0087167, 0.61908, 0.40452, 0.3906, 0.16294, 0.06121, 0.068418, 0.0056092, 0.0038125, 0.0038125, 0.0038125 + 127, 0.062221, 0.079613, 0.0084921, 0.61394, 0.39371, 0.38955, 0.16974, 0.061417, 0.06805, 0.0058537, 0.003763, 0.003763, 0.003763 + 128, 0.062123, 0.081567, 0.0090106, 0.59164, 0.39826, 0.38816, 0.16825, 0.061512, 0.06776, 0.0058033, 0.0037135, 0.0037135, 0.0037135 + 129, 0.062216, 0.080156, 0.008595, 0.66416, 0.37233, 0.39473, 0.16427, 0.061477, 0.068075, 0.0054373, 0.003664, 0.003664, 0.003664 + 130, 0.062499, 0.082136, 0.0089943, 0.5806, 0.40678, 0.38811, 0.16412, 0.061353, 0.068901, 0.0055116, 0.0036145, 0.0036145, 0.0036145 + 131, 0.06183, 0.079962, 0.0087681, 0.56545, 0.40643, 0.37984, 0.1655, 0.061294, 0.068665, 0.0058172, 0.003565, 0.003565, 0.003565 + 132, 0.061493, 0.079948, 0.0080891, 0.54577, 0.40385, 0.37962, 0.16849, 0.060879, 0.068566, 0.0059687, 0.0035155, 0.0035155, 0.0035155 + 133, 0.06131, 0.078567, 0.0086257, 0.60001, 0.38724, 0.3776, 0.16521, 0.061301, 0.06911, 0.0061088, 0.003466, 0.003466, 0.003466 + 134, 0.061697, 0.081441, 0.0085654, 0.6225, 0.39107, 0.3943, 0.16714, 0.061478, 0.06788, 0.0060577, 0.0034165, 0.0034165, 0.0034165 + 135, 0.061623, 0.079985, 0.0085603, 0.59139, 0.39628, 0.3785, 0.16194, 0.060983, 0.068897, 0.0057355, 0.003367, 0.003367, 0.003367 + 136, 0.061479, 0.079573, 0.0086732, 0.59881, 0.3988, 0.38341, 0.16106, 0.061175, 0.068809, 0.0055626, 0.0033175, 0.0033175, 0.0033175 + 137, 0.061413, 0.079408, 0.0084376, 0.61248, 0.39101, 0.3826, 0.15207, 0.061572, 0.068304, 0.0058183, 0.003268, 0.003268, 0.003268 + 138, 0.061046, 0.082144, 0.0089468, 0.55422, 0.42804, 0.38352, 0.1605, 0.06132, 0.069155, 0.0055395, 0.0032185, 0.0032185, 0.0032185 + 139, 0.061471, 0.07995, 0.0081675, 0.64321, 0.3717, 0.38275, 0.16285, 0.061362, 0.068849, 0.0055299, 0.003169, 0.003169, 0.003169 + 140, 0.060823, 0.080007, 0.0086115, 0.61921, 0.38559, 0.39302, 0.16613, 0.061234, 0.068911, 0.0052536, 0.0031195, 0.0031195, 0.0031195 + 141, 0.062534, 0.080749, 0.0094048, 0.58546, 0.40447, 0.38576, 0.16285, 0.061139, 0.069, 0.0056188, 0.00307, 0.00307, 0.00307 + 142, 0.060494, 0.080903, 0.0081691, 0.58388, 0.39437, 0.37923, 0.16453, 0.06183, 0.068096, 0.005568, 0.0030205, 0.0030205, 0.0030205 + 143, 0.061919, 0.083564, 0.0094533, 0.57831, 0.41394, 0.38564, 0.16276, 0.061135, 0.068703, 0.0057063, 0.002971, 0.002971, 0.002971 + 144, 0.060503, 0.077934, 0.0082693, 0.64683, 0.38783, 0.38866, 0.16462, 0.06107, 0.06893, 0.0057078, 0.0029215, 0.0029215, 0.0029215 + 145, 0.060227, 0.079083, 0.0080657, 0.58952, 0.41561, 0.39865, 0.16874, 0.061168, 0.068317, 0.0057771, 0.002872, 0.002872, 0.002872 + 146, 0.060703, 0.0805, 0.008231, 0.6058, 0.39931, 0.39018, 0.16269, 0.061424, 0.069072, 0.0056074, 0.0028225, 0.0028225, 0.0028225 + 147, 0.060809, 0.079416, 0.0084209, 0.58098, 0.42434, 0.39792, 0.17197, 0.060992, 0.069045, 0.0056534, 0.002773, 0.002773, 0.002773 + 148, 0.061181, 0.082232, 0.0086708, 0.6496, 0.37019, 0.38515, 0.16745, 0.061349, 0.070234, 0.0058444, 0.0027235, 0.0027235, 0.0027235 + 149, 0.060623, 0.083097, 0.0083962, 0.61744, 0.3864, 0.39006, 0.16122, 0.061336, 0.069071, 0.0055872, 0.002674, 0.002674, 0.002674 + 150, 0.061013, 0.078397, 0.0081109, 0.55319, 0.40884, 0.38888, 0.16894, 0.061426, 0.068424, 0.0055361, 0.0026245, 0.0026245, 0.0026245 + 151, 0.060906, 0.081067, 0.0087698, 0.58694, 0.40101, 0.38418, 0.16181, 0.061248, 0.069114, 0.0055448, 0.002575, 0.002575, 0.002575 + 152, 0.059971, 0.078016, 0.0082625, 0.54703, 0.42067, 0.38596, 0.15784, 0.060987, 0.068635, 0.0055849, 0.0025255, 0.0025255, 0.0025255 + 153, 0.060802, 0.080326, 0.0084251, 0.57626, 0.40981, 0.38869, 0.16595, 0.06094, 0.069052, 0.0055836, 0.002476, 0.002476, 0.002476 + 154, 0.060876, 0.080142, 0.0084623, 0.64034, 0.39032, 0.39224, 0.17052, 0.061258, 0.069313, 0.0057674, 0.0024265, 0.0024265, 0.0024265 + 155, 0.061197, 0.082908, 0.0082472, 0.62902, 0.40014, 0.39678, 0.17092, 0.060819, 0.069366, 0.0055072, 0.002377, 0.002377, 0.002377 + 156, 0.060326, 0.081083, 0.008418, 0.50159, 0.41416, 0.40481, 0.16998, 0.061234, 0.069954, 0.0053375, 0.0023275, 0.0023275, 0.0023275 + 157, 0.060145, 0.079134, 0.008171, 0.47626, 0.42082, 0.39914, 0.16988, 0.06098, 0.068871, 0.0056765, 0.002278, 0.002278, 0.002278 + 158, 0.059991, 0.078631, 0.0080613, 0.43714, 0.42794, 0.39301, 0.17251, 0.061006, 0.069104, 0.0055834, 0.0022285, 0.0022285, 0.0022285 + 159, 0.060168, 0.077415, 0.0081355, 0.4283, 0.43196, 0.39506, 0.1677, 0.06154, 0.069498, 0.0056595, 0.002179, 0.002179, 0.002179 + 160, 0.060013, 0.079332, 0.0078322, 0.48257, 0.40145, 0.39793, 0.16728, 0.06111, 0.069211, 0.0056615, 0.0021295, 0.0021295, 0.0021295 + 161, 0.059797, 0.076389, 0.0085948, 0.5621, 0.40947, 0.38788, 0.16676, 0.061514, 0.06914, 0.0058107, 0.00208, 0.00208, 0.00208 + 162, 0.05953, 0.079817, 0.0073943, 0.58375, 0.41093, 0.39391, 0.16412, 0.061514, 0.069279, 0.0055135, 0.0020305, 0.0020305, 0.0020305 + 163, 0.060089, 0.077, 0.0075225, 0.46733, 0.41864, 0.39178, 0.16438, 0.061474, 0.068694, 0.0057247, 0.001981, 0.001981, 0.001981 + 164, 0.060458, 0.082368, 0.0081539, 0.6308, 0.40624, 0.39519, 0.16866, 0.061197, 0.070284, 0.0057369, 0.0019315, 0.0019315, 0.0019315 + 165, 0.060204, 0.080686, 0.0078595, 0.58455, 0.40365, 0.39361, 0.16931, 0.061302, 0.070159, 0.0055198, 0.001882, 0.001882, 0.001882 + 166, 0.059445, 0.077755, 0.0076423, 0.57182, 0.44039, 0.40162, 0.16522, 0.061931, 0.069638, 0.005605, 0.0018325, 0.0018325, 0.0018325 + 167, 0.059385, 0.07858, 0.0077505, 0.5719, 0.4126, 0.38621, 0.16781, 0.061554, 0.069619, 0.0055225, 0.001783, 0.001783, 0.001783 + 168, 0.059453, 0.077021, 0.0081007, 0.6831, 0.38097, 0.39635, 0.16965, 0.061492, 0.069905, 0.0054181, 0.0017335, 0.0017335, 0.0017335 + 169, 0.059746, 0.07885, 0.0076966, 0.47484, 0.43266, 0.39458, 0.16799, 0.061473, 0.070302, 0.0055999, 0.001684, 0.001684, 0.001684 + 170, 0.060082, 0.080544, 0.0080393, 0.60734, 0.4093, 0.39553, 0.16867, 0.061064, 0.070145, 0.0056745, 0.0016345, 0.0016345, 0.0016345 + 171, 0.059243, 0.078759, 0.0079038, 0.59986, 0.41792, 0.3949, 0.17225, 0.061454, 0.070232, 0.0056883, 0.001585, 0.001585, 0.001585 + 172, 0.06003, 0.079498, 0.0080035, 0.58769, 0.41244, 0.40095, 0.17154, 0.061297, 0.069892, 0.0056825, 0.0015355, 0.0015355, 0.0015355 + 173, 0.05939, 0.077445, 0.0076199, 0.63194, 0.39694, 0.40471, 0.17308, 0.061558, 0.070094, 0.0056222, 0.001486, 0.001486, 0.001486 + 174, 0.058707, 0.078033, 0.0074211, 0.5845, 0.42189, 0.40162, 0.16921, 0.061337, 0.070011, 0.0058099, 0.0014365, 0.0014365, 0.0014365 + 175, 0.05945, 0.077614, 0.0084547, 0.5733, 0.42687, 0.40296, 0.17239, 0.061514, 0.06996, 0.0057241, 0.001387, 0.001387, 0.001387 + 176, 0.059536, 0.078818, 0.0078729, 0.64568, 0.39487, 0.40622, 0.1751, 0.061238, 0.06992, 0.0056799, 0.0013375, 0.0013375, 0.0013375 + 177, 0.059717, 0.07678, 0.0082633, 0.46492, 0.42903, 0.40088, 0.17398, 0.061486, 0.06953, 0.0054343, 0.001288, 0.001288, 0.001288 + 178, 0.058306, 0.075138, 0.0075247, 0.45756, 0.4475, 0.40696, 0.17211, 0.061272, 0.069687, 0.0055336, 0.0012385, 0.0012385, 0.0012385 + 179, 0.05981, 0.078592, 0.007826, 0.61119, 0.40411, 0.4047, 0.17375, 0.061233, 0.070374, 0.0056963, 0.001189, 0.001189, 0.001189 + 180, 0.059516, 0.07941, 0.0078784, 0.44491, 0.429, 0.39605, 0.16979, 0.061378, 0.070209, 0.0056918, 0.0011395, 0.0011395, 0.0011395 + 181, 0.059005, 0.080269, 0.0074365, 0.61792, 0.40438, 0.40462, 0.17199, 0.061282, 0.069842, 0.0056687, 0.00109, 0.00109, 0.00109 + 182, 0.059388, 0.0773, 0.0079592, 0.63607, 0.39592, 0.39934, 0.17166, 0.061353, 0.06989, 0.0055814, 0.0010405, 0.0010405, 0.0010405 + 183, 0.058627, 0.077392, 0.0073762, 0.65207, 0.39113, 0.39734, 0.16954, 0.061314, 0.070393, 0.0056366, 0.000991, 0.000991, 0.000991 + 184, 0.059958, 0.081703, 0.0084177, 0.60146, 0.41408, 0.40499, 0.17261, 0.061612, 0.070475, 0.0056101, 0.0009415, 0.0009415, 0.0009415 + 185, 0.059215, 0.079674, 0.0077917, 0.58245, 0.40966, 0.39842, 0.16618, 0.061862, 0.070253, 0.005696, 0.000892, 0.000892, 0.000892 + 186, 0.058119, 0.07413, 0.0074922, 0.58256, 0.41551, 0.39886, 0.16668, 0.061806, 0.070799, 0.0057426, 0.0008425, 0.0008425, 0.0008425 + 187, 0.059019, 0.08088, 0.0071937, 0.43465, 0.43227, 0.39255, 0.16627, 0.061765, 0.070654, 0.0057164, 0.000793, 0.000793, 0.000793 + 188, 0.05869, 0.0781, 0.0074471, 0.6328, 0.39031, 0.39202, 0.16877, 0.061584, 0.070445, 0.0056742, 0.0007435, 0.0007435, 0.0007435 + 189, 0.058051, 0.076634, 0.0070859, 0.62861, 0.39466, 0.39986, 0.17171, 0.061221, 0.070759, 0.005697, 0.000694, 0.000694, 0.000694 + 190, 0.059161, 0.078122, 0.007997, 0.6439, 0.38945, 0.39997, 0.16842, 0.061556, 0.070606, 0.0056897, 0.0006445, 0.0006445, 0.0006445 + 191, 0.059396, 0.079488, 0.0075062, 0.65862, 0.38499, 0.39836, 0.16873, 0.061452, 0.070499, 0.005647, 0.000595, 0.000595, 0.000595 + 192, 0.059279, 0.077722, 0.0074302, 0.62596, 0.40425, 0.40382, 0.17101, 0.061447, 0.070135, 0.0056625, 0.0005455, 0.0005455, 0.0005455 + 193, 0.058946, 0.077397, 0.0073196, 0.58967, 0.42201, 0.40048, 0.17041, 0.061256, 0.070125, 0.005794, 0.000496, 0.000496, 0.000496 + 194, 0.058338, 0.077359, 0.0075816, 0.60158, 0.40707, 0.3958, 0.17155, 0.06142, 0.070538, 0.0056139, 0.0004465, 0.0004465, 0.0004465 + 195, 0.058652, 0.079973, 0.0076763, 0.61455, 0.39871, 0.39545, 0.17112, 0.061313, 0.070764, 0.0056792, 0.000397, 0.000397, 0.000397 + 196, 0.058626, 0.076117, 0.0075257, 0.58013, 0.41444, 0.39569, 0.16962, 0.06143, 0.070803, 0.00566, 0.0003475, 0.0003475, 0.0003475 + 197, 0.058519, 0.07889, 0.0073568, 0.60364, 0.39333, 0.3906, 0.16771, 0.061568, 0.070652, 0.005606, 0.000298, 0.000298, 0.000298 + 198, 0.058598, 0.079198, 0.0076465, 0.61812, 0.39651, 0.3927, 0.17121, 0.061296, 0.070684, 0.0056408, 0.0002485, 0.0002485, 0.0002485 + 199, 0.058884, 0.077597, 0.0077865, 0.59517, 0.40793, 0.39424, 0.16871, 0.061455, 0.070537, 0.0056641, 0.000199, 0.000199, 0.000199 diff --git a/weights/yolov5/class9/results.png b/weights/yolov5/class9/results.png new file mode 100644 index 0000000..2ebc8ce Binary files /dev/null and b/weights/yolov5/class9/results.png differ diff --git a/weights/yolov5/class9/train_batch0.jpg b/weights/yolov5/class9/train_batch0.jpg new file mode 100644 index 0000000..7c3d5e8 Binary files /dev/null and b/weights/yolov5/class9/train_batch0.jpg differ diff --git a/weights/yolov5/class9/train_batch1.jpg b/weights/yolov5/class9/train_batch1.jpg new file mode 100644 index 0000000..3c2a24e Binary files /dev/null and b/weights/yolov5/class9/train_batch1.jpg differ diff --git a/weights/yolov5/class9/train_batch2.jpg b/weights/yolov5/class9/train_batch2.jpg new file mode 100644 index 0000000..84c4db6 Binary files /dev/null and b/weights/yolov5/class9/train_batch2.jpg differ diff --git a/weights/yolov5/class9/val_batch0_labels.jpg b/weights/yolov5/class9/val_batch0_labels.jpg new file mode 100644 index 0000000..95382b7 Binary files /dev/null and b/weights/yolov5/class9/val_batch0_labels.jpg differ diff --git a/weights/yolov5/class9/val_batch0_pred.jpg b/weights/yolov5/class9/val_batch0_pred.jpg new file mode 100644 index 0000000..49d0068 Binary files /dev/null and b/weights/yolov5/class9/val_batch0_pred.jpg differ diff --git a/weights/yolov5/class9/val_batch1_labels.jpg b/weights/yolov5/class9/val_batch1_labels.jpg new file mode 100644 index 0000000..42e060e Binary files /dev/null and b/weights/yolov5/class9/val_batch1_labels.jpg differ diff --git a/weights/yolov5/class9/val_batch1_pred.jpg b/weights/yolov5/class9/val_batch1_pred.jpg new file mode 100644 index 0000000..06a2458 Binary files /dev/null and b/weights/yolov5/class9/val_batch1_pred.jpg differ diff --git a/weights/yolov5/class9/val_batch2_labels.jpg b/weights/yolov5/class9/val_batch2_labels.jpg new file mode 100644 index 0000000..94fb870 Binary files /dev/null and b/weights/yolov5/class9/val_batch2_labels.jpg differ diff --git a/weights/yolov5/class9/val_batch2_pred.jpg b/weights/yolov5/class9/val_batch2_pred.jpg new file mode 100644 index 0000000..574c332 Binary files /dev/null and b/weights/yolov5/class9/val_batch2_pred.jpg differ diff --git a/weights/yolov5/class9/weights/best.pt b/weights/yolov5/class9/weights/best.pt new file mode 100644 index 0000000..9b24f6e Binary files /dev/null and b/weights/yolov5/class9/weights/best.pt differ diff --git a/weights/yolov5/class9/weights/last.pt b/weights/yolov5/class9/weights/last.pt new file mode 100644 index 0000000..baec14a Binary files /dev/null and b/weights/yolov5/class9/weights/last.pt differ