diff --git a/111.jpg b/111.jpg new file mode 100644 index 0000000..0a8a1c9 Binary files /dev/null and b/111.jpg differ diff --git a/AI.py b/AI.py new file mode 100644 index 0000000..68df4e0 --- /dev/null +++ b/AI.py @@ -0,0 +1,267 @@ +''' +这个版本增加了船舶过滤功能 +''' +import time +import sys +from core.models.bisenet import BiSeNet +from models.AIDetector_pytorch import Detector +from models.AIDetector_pytorch import plot_one_box,Colors +from utils.postprocess_utils import center_coordinate,fourcorner_coordinate,remove_simivalue,remove_sameeleme_inalist +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '1' +from models.model_stages import BiSeNet +import cv2 +import torch +import torch.nn.functional as F +from PIL import Image +import numpy as np +import torchvision.transforms as transforms +from utils.segutils import colour_code_segmentation +from utils.segutils import get_label_info +os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' +os.environ["CUDA_VISIBLE_DEVICES"] = "0" +sys.path.append("../") # 为了导入上级目录的,添加一个新路径 + + +def AI_postprocess(pred,_img_cv,_mask_cv): + '''还未考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + t5=time.time() + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + print('t5-t4',t5-t4) + + + '''新增模块:如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + final_img=_img_cv + final_head_person_filterwater=[] + return final_img, final_head_person_filterwater + else: + max_contour = contour_info[0] + print(max_contour) + t7 = time.time() + + + '''2.1、pred中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(pred[1])): + if pred[1][i][4]=='head' or pred[1][i][4]=='person': + init_head_person.append(pred[1][i]) + else: + init_boat.append(pred[1][i]) + t8 = time.time() + + '''新增模块:2.2、pred中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、pred中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour[0], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + t11 = time.time() + + '''5、输出最终落水人员,并绘制保存检测图''' + colors = Colors() + if final_head_person_filterwater is not None: + for i in range(len(final_head_person_filterboat)): + # lbl = self.names[int(cls_id)] + lbl = final_head_person_filterboat[i][4] + xyxy=[final_head_person_filterboat[i][0],final_head_person_filterboat[i][1],final_head_person_filterboat[i][2],final_head_person_filterboat[i][3]] + c = int(5) + plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3) + final_img=_img_cv + t12 = time.time() + # cv2.imwrite('final_result.png', _img_cv) + t13 = time.time() + + print('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + + return final_img,final_head_person_filterwater #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + +def AI_process(model, segmodel, args1,path1): + '''对原图进行目标检测和水域分割''' + '''输入:检测模型、分割模型、配置参数、路径 + 返回:返回目标检测结果、原图像、分割图像, + ''' + '''检测图片''' + t21=time.time() + _img_cv = cv2.imread(path1) # 将这里的送入yolov5 + t22 = time.time() + + # _img_cv=_img_cv.numpy() + pred = model.detect(_img_cv) # 检测结果 + print('pred', pred) + + t23 = time.time() + '''分割图片''' + img = Image.open(path1).convert('RGB') + t231 = time.time() + transf1 = transforms.ToTensor() + transf2 = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + imgs = transf1(img) + imgs = transf2(imgs) + print(path1) # numpy数组格式为(H,W,C) + + size = [360, 640] + imgs = imgs.unsqueeze(0) + imgs = imgs.cuda() + N, C, H, W = imgs.size() + + self_scale = 360 / H + new_hw = [int(H * self_scale), int(W * self_scale)] + print("line50", new_hw) + imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True) + t24 = time.time() + with torch.no_grad(): + logits = segmodel(imgs)[0] + t241 = time.time() + logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True) + probs = torch.softmax(logits, dim=1) + preds = torch.argmax(probs, dim=1) + preds_squeeze = preds.squeeze(0) + preds_squeeze_predict = colour_code_segmentation(np.array(preds_squeeze.cpu()), args1['label_info']) + preds_squeeze_predict = cv2.resize(np.uint8(preds_squeeze_predict), (W, H)) + predict_mask = cv2.cvtColor(np.uint8(preds_squeeze_predict), cv2.COLOR_RGB2BGR) + _mask_cv =predict_mask + t25 = time.time() + cv2.imwrite('seg_result.png', _mask_cv) + t26 = time.time() + print('存分割图:%s, 分割后处理:%s ,分割推理:%s ,分割图变小:%s,分割图读图:%s,检测模型推理:%s,读图片:%s' + %((t26-t25) * 1000,(t25-t241) * 1000,(t241-t24) * 1000,(t24-t231) * 1000,(t231-t23) * 1000,(t23-t22) * 1000,(t22-t21) * 1000 ) ) + + return pred, _img_cv, _mask_cv #返回目标检测结果、原图像、分割图像 + +def main(): + + '''配置参数''' + label_info = get_label_info('utils/class_dict.csv') + args1={'cuda':'0','crop_size':512,'input_dir':'input_dir','output_dir':'output_dir','workers':16,'label_info':label_info, + 'dspth':'./data/','backbone':'STDCNet813','use_boundary_2':False, 'use_boundary_4':False, 'use_boundary_8':True, 'use_boundary_16':False,'use_conv_last':False} + + + dete_weights='weights/best_luoshui20230608.pt' + '''分割模型权重路径''' + seg_weights = 'weights/model_final.pth' + + '''初始化目标检测模型''' + model = Detector(dete_weights) + + + '''初始化分割模型2''' + n_classes = 2 + segmodel = BiSeNet(backbone=args1['backbone'], n_classes=n_classes, + use_boundary_2=args1['use_boundary_2'], use_boundary_4=args1['use_boundary_4'], + use_boundary_8=args1['use_boundary_8'], use_boundary_16=args1['use_boundary_16'], + use_conv_last=args1['use_conv_last']) + segmodel.load_state_dict(torch.load(seg_weights)) + segmodel.cuda() + segmodel.eval() + + + '''图像测试''' + folders = os.listdir(args1['input_dir']) + for i in range(len(folders)): + path1 = args1['input_dir'] + '/' + folders[i] + + t1=time.time() + + '''对原图进行目标检测和水域分割''' + pred, _img_cv, _mask_cv=AI_process(model,segmodel, args1,path1) + + t2 = time.time() + + '''进入后处理,判断水域内有落水人员''' + hhh=AI_postprocess(pred, _img_cv, _mask_cv) + t3 = time.time() + + print('总时间分布:前处理t2-t1,后处理t3-t2',t2-t1,t3-t2) + +if __name__ == "__main__": + main() + diff --git a/AI20230801.py b/AI20230801.py new file mode 100644 index 0000000..e9245aa --- /dev/null +++ b/AI20230801.py @@ -0,0 +1,279 @@ +''' +这个版本增加了船舶过滤功能 +''' +import time +import sys +from core.models.bisenet import BiSeNet +from models.AIDetector_pytorch import Detector +from models.AIDetector_pytorch import plot_one_box,Colors +from utils.postprocess_utils import center_coordinate,fourcorner_coordinate,remove_simivalue,remove_sameeleme_inalist +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '1' +from models.model_stages import BiSeNet +import cv2 +import torch +import torch.nn.functional as F +from PIL import Image +import numpy as np +import torchvision.transforms as transforms +from utils.segutils import colour_code_segmentation +from utils.segutils import get_label_info +os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' +os.environ["CUDA_VISIBLE_DEVICES"] = "0" +sys.path.append("../") # 为了导入上级目录的,添加一个新路径 + + +def AI_postprocess(preds,_mask_cv,pars,_img_cv): + '''考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][4]=='head' or preds[i][4]=='person': + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + t11 = time.time() + + '''5、输出最终落水人员,并绘制保存检测图''' + colors = Colors() + if final_head_person_filterwater is not None: + for i in range(len(final_head_person_filterboat)): + # lbl = self.names[int(cls_id)] + lbl = final_head_person_filterboat[i][4] + xyxy=[final_head_person_filterboat[i][0],final_head_person_filterboat[i][1],final_head_person_filterboat[i][2],final_head_person_filterboat[i][3]] + c = int(5) + plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3) + final_img=_img_cv + t12 = time.time() + # cv2.imwrite('final_result.png', _img_cv) + t13 = time.time() + + print('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + return final_head_person_filterwater,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + +def AI_process(model, segmodel, args1,path1): + '''对原图进行目标检测和水域分割''' + '''输入:检测模型、分割模型、配置参数、路径 + 返回:返回目标检测结果、原图像、分割图像, + ''' + '''检测图片''' + t21=time.time() + _img_cv = cv2.imread(path1) # 将这里的送入yolov5 + t22 = time.time() + + # _img_cv=_img_cv.numpy() + pred = model.detect(_img_cv) # 检测结果 + #对pred处理,处理成list嵌套 + pred=[[*x[0:4],x[4],x[5].cpu().tolist()] for x in pred[1]] + # pred=[[x[0],*x[1:5],x[5].cpu().float()] for x in pred[1]] + print('pred', pred) + + t23 = time.time() + '''分割图片''' + img = Image.open(path1).convert('RGB') + t231 = time.time() + transf1 = transforms.ToTensor() + transf2 = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + imgs = transf1(img) + imgs = transf2(imgs) + print(path1) # numpy数组格式为(H,W,C) + + size = [360, 640] + imgs = imgs.unsqueeze(0) + imgs = imgs.cuda() + N, C, H, W = imgs.size() + + self_scale = 360 / H + new_hw = [int(H * self_scale), int(W * self_scale)] + print("line50", new_hw) + imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True) + t24 = time.time() + with torch.no_grad(): + logits = segmodel(imgs)[0] + t241 = time.time() + logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True) + probs = torch.softmax(logits, dim=1) + preds = torch.argmax(probs, dim=1) + preds_squeeze = preds.squeeze(0) + preds_squeeze_predict = colour_code_segmentation(np.array(preds_squeeze.cpu()), args1['label_info']) + preds_squeeze_predict = cv2.resize(np.uint8(preds_squeeze_predict), (W, H)) + predict_mask = cv2.cvtColor(np.uint8(preds_squeeze_predict), cv2.COLOR_RGB2BGR) + _mask_cv =predict_mask + t25 = time.time() + cv2.imwrite('seg_result.png', _mask_cv) + t26 = time.time() + print('存分割图:%s, 分割后处理:%s ,分割推理:%s ,分割图变小:%s,分割图读图:%s,检测模型推理:%s,读图片:%s' + %((t26-t25) * 1000,(t25-t241) * 1000,(t241-t24) * 1000,(t24-t231) * 1000,(t231-t23) * 1000,(t23-t22) * 1000,(t22-t21) * 1000 ) ) + + return pred, _img_cv, _mask_cv #返回目标检测结果、原图像、分割图像 + +def main(): + + '''配置参数''' + label_info = get_label_info('utils/class_dict.csv') + pars={'cuda':'0','crop_size':512,'input_dir':'input_dir','output_dir':'output_dir','workers':16,'label_info':label_info, + 'dspth':'./data/','backbone':'STDCNet813','use_boundary_2':False, 'use_boundary_4':False, 'use_boundary_8':True, 'use_boundary_16':False,'use_conv_last':False} + + + dete_weights='weights/best_luoshui20230608.pt' + '''分割模型权重路径''' + seg_weights = 'weights/model_final.pth' + + '''初始化目标检测模型''' + model = Detector(dete_weights) + + + '''初始化分割模型2''' + n_classes = 2 + segmodel = BiSeNet(backbone=pars['backbone'], n_classes=n_classes, + use_boundary_2=pars['use_boundary_2'], use_boundary_4=pars['use_boundary_4'], + use_boundary_8=pars['use_boundary_8'], use_boundary_16=pars['use_boundary_16'], + use_conv_last=pars['use_conv_last']) + segmodel.load_state_dict(torch.load(seg_weights)) + segmodel.cuda() + segmodel.eval() + + + '''图像测试''' + folders = os.listdir(pars['input_dir']) + for i in range(len(folders)): + path1 = pars['input_dir'] + '/' + folders[i] + + t1=time.time() + + '''对原图进行目标检测和水域分割''' + pred, _img_cv, _mask_cv=AI_process(model,segmodel, pars,path1) + + t2 = time.time() + + '''进入后处理,判断水域内有落水人员''' + haha,zzzz=AI_postprocess(pred, _mask_cv,pars,_img_cv ) + t3 = time.time() + + print('总时间分布:前处理t2-t1,后处理t3-t2',(t2-t1)*1000,(t3-t2)*1000) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/AI20230801_caogao.py b/AI20230801_caogao.py new file mode 100644 index 0000000..94453c1 --- /dev/null +++ b/AI20230801_caogao.py @@ -0,0 +1,282 @@ +''' +这个版本增加了船舶过滤功能 +''' +import time +import sys +from core.models.bisenet import BiSeNet +from models.AIDetector_pytorch import Detector +from models.AIDetector_pytorch import plot_one_box,Colors +from utils.postprocess_utils import center_coordinate,fourcorner_coordinate,remove_simivalue,remove_sameeleme_inalist +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '1' +from models.model_stages import BiSeNet +import cv2 +import torch +import torch.nn.functional as F +from PIL import Image +import numpy as np +import torchvision.transforms as transforms +from utils.segutils import colour_code_segmentation +from utils.segutils import get_label_info +os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' +os.environ["CUDA_VISIBLE_DEVICES"] = "0" +sys.path.append("../") # 为了导入上级目录的,添加一个新路径 + + +def AI_postprocess(preds,_mask_cv,pars,_img_cv): + '''考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour1=max_contour[0] + max_contour_X=max_contour1[0][0][:] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + # max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][4]=='head' or preds[i][4]=='person': + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + t11 = time.time() + + '''5、输出最终落水人员,并绘制保存检测图''' + colors = Colors() + if final_head_person_filterwater is not None: + for i in range(len(final_head_person_filterboat)): + # lbl = self.names[int(cls_id)] + lbl = final_head_person_filterboat[i][4] + xyxy=[final_head_person_filterboat[i][0],final_head_person_filterboat[i][1],final_head_person_filterboat[i][2],final_head_person_filterboat[i][3]] + c = int(5) + plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3) + final_img=_img_cv + t12 = time.time() + # cv2.imwrite('final_result.png', _img_cv) + t13 = time.time() + + print('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + return final_head_person_filterwater,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + +def AI_process(model, segmodel, args1,path1): + '''对原图进行目标检测和水域分割''' + '''输入:检测模型、分割模型、配置参数、路径 + 返回:返回目标检测结果、原图像、分割图像, + ''' + '''检测图片''' + t21=time.time() + _img_cv = cv2.imread(path1) # 将这里的送入yolov5 + t22 = time.time() + + # _img_cv=_img_cv.numpy() + pred = model.detect(_img_cv) # 检测结果 + #对pred处理,处理成list嵌套 + pred=[[*x[0:4],x[4],x[5].cpu().tolist()] for x in pred[1]] + # pred=[[x[0],*x[1:5],x[5].cpu().float()] for x in pred[1]] + print('pred', pred) + + t23 = time.time() + '''分割图片''' + img = Image.open(path1).convert('RGB') + t231 = time.time() + transf1 = transforms.ToTensor() + transf2 = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + imgs = transf1(img) + imgs = transf2(imgs) + print(path1) # numpy数组格式为(H,W,C) + + size = [360, 640] + imgs = imgs.unsqueeze(0) + imgs = imgs.cuda() + N, C, H, W = imgs.size() + + self_scale = 360 / H + new_hw = [int(H * self_scale), int(W * self_scale)] + print("line50", new_hw) + imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True) + t24 = time.time() + with torch.no_grad(): + logits = segmodel(imgs)[0] + t241 = time.time() + logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True) + probs = torch.softmax(logits, dim=1) + preds = torch.argmax(probs, dim=1) + preds_squeeze = preds.squeeze(0) + preds_squeeze_predict = colour_code_segmentation(np.array(preds_squeeze.cpu()), args1['label_info']) + preds_squeeze_predict = cv2.resize(np.uint8(preds_squeeze_predict), (W, H)) + predict_mask = cv2.cvtColor(np.uint8(preds_squeeze_predict), cv2.COLOR_RGB2BGR) + _mask_cv =predict_mask + t25 = time.time() + cv2.imwrite('seg_result.png', _mask_cv) + t26 = time.time() + print('存分割图:%s, 分割后处理:%s ,分割推理:%s ,分割图变小:%s,分割图读图:%s,检测模型推理:%s,读图片:%s' + %((t26-t25) * 1000,(t25-t241) * 1000,(t241-t24) * 1000,(t24-t231) * 1000,(t231-t23) * 1000,(t23-t22) * 1000,(t22-t21) * 1000 ) ) + + return pred, _img_cv, _mask_cv #返回目标检测结果、原图像、分割图像 + +def main(): + + '''配置参数''' + label_info = get_label_info('utils/class_dict.csv') + pars={'cuda':'0','crop_size':512,'input_dir':'input_dir','output_dir':'output_dir','workers':16,'label_info':label_info, + 'dspth':'./data/','backbone':'STDCNet813','use_boundary_2':False, 'use_boundary_4':False, 'use_boundary_8':True, 'use_boundary_16':False,'use_conv_last':False} + + + dete_weights='weights/best_luoshui20230608.pt' + '''分割模型权重路径''' + seg_weights = 'weights/model_final.pth' + + '''初始化目标检测模型''' + model = Detector(dete_weights) + + + '''初始化分割模型2''' + n_classes = 2 + segmodel = BiSeNet(backbone=pars['backbone'], n_classes=n_classes, + use_boundary_2=pars['use_boundary_2'], use_boundary_4=pars['use_boundary_4'], + use_boundary_8=pars['use_boundary_8'], use_boundary_16=pars['use_boundary_16'], + use_conv_last=pars['use_conv_last']) + segmodel.load_state_dict(torch.load(seg_weights)) + segmodel.cuda() + segmodel.eval() + + + '''图像测试''' + folders = os.listdir(pars['input_dir']) + for i in range(len(folders)): + path1 = pars['input_dir'] + '/' + folders[i] + + t1=time.time() + + '''对原图进行目标检测和水域分割''' + pred, _img_cv, _mask_cv=AI_process(model,segmodel, pars,path1) + + t2 = time.time() + + '''进入后处理,判断水域内有落水人员''' + haha,zzzz=AI_postprocess(pred, _mask_cv,pars,_img_cv ) + t3 = time.time() + + print('总时间分布:前处理t2-t1,后处理t3-t2',(t2-t1)*1000,(t3-t2)*1000) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/AIqq.py b/AIqq.py new file mode 100644 index 0000000..b450bd8 --- /dev/null +++ b/AIqq.py @@ -0,0 +1,279 @@ +''' +这个版本增加了船舶过滤功能 +''' +import time +import sys +from core.models.bisenet import BiSeNet +from models.AIDetector_pytorch import Detector +from models.AIDetector_pytorch import plot_one_box,Colors +from utils.postprocess_utils import center_coordinate,fourcorner_coordinate,remove_simivalue,remove_sameeleme_inalist +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '1' +from models.model_stages import BiSeNet +import cv2 +import torch +import torch.nn.functional as F +from PIL import Image +import numpy as np +import torchvision.transforms as transforms +from utils.segutils import colour_code_segmentation +from utils.segutils import get_label_info +os.environ['KMP_DUPLICATE_LIB_OK']='TRUE' +os.environ["CUDA_VISIBLE_DEVICES"] = "0" +sys.path.append("../") # 为了导入上级目录的,添加一个新路径 + + +def AI_postprocess(preds,_mask_cv,pars,_img_cv): + '''还未考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][4]=='head' or preds[i][4]=='person': + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + t11 = time.time() + + '''5、输出最终落水人员,并绘制保存检测图''' + colors = Colors() + if final_head_person_filterwater is not None: + for i in range(len(final_head_person_filterboat)): + # lbl = self.names[int(cls_id)] + lbl = final_head_person_filterboat[i][4] + xyxy=[final_head_person_filterboat[i][0],final_head_person_filterboat[i][1],final_head_person_filterboat[i][2],final_head_person_filterboat[i][3]] + c = int(5) + plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3) + final_img=_img_cv + t12 = time.time() + # cv2.imwrite('final_result.png', _img_cv) + t13 = time.time() + + print('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + return final_head_person_filterwater,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + +def AI_process(model, segmodel, args1,path1): + '''对原图进行目标检测和水域分割''' + '''输入:检测模型、分割模型、配置参数、路径 + 返回:返回目标检测结果、原图像、分割图像, + ''' + '''检测图片''' + t21=time.time() + _img_cv = cv2.imread(path1) # 将这里的送入yolov5 + t22 = time.time() + + # _img_cv=_img_cv.numpy() + pred = model.detect(_img_cv) # 检测结果 + #对pred处理,处理成list嵌套 + pred=[[*x[0:4],x[4],x[5].cpu().tolist()] for x in pred[1]] + # pred=[[x[0],*x[1:5],x[5].cpu().float()] for x in pred[1]] + print('pred', pred) + + t23 = time.time() + '''分割图片''' + img = Image.open(path1).convert('RGB') + t231 = time.time() + transf1 = transforms.ToTensor() + transf2 = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + imgs = transf1(img) + imgs = transf2(imgs) + print(path1) # numpy数组格式为(H,W,C) + + size = [360, 640] + imgs = imgs.unsqueeze(0) + imgs = imgs.cuda() + N, C, H, W = imgs.size() + + self_scale = 360 / H + new_hw = [int(H * self_scale), int(W * self_scale)] + print("line50", new_hw) + imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True) + t24 = time.time() + with torch.no_grad(): + logits = segmodel(imgs)[0] + t241 = time.time() + logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True) + probs = torch.softmax(logits, dim=1) + preds = torch.argmax(probs, dim=1) + preds_squeeze = preds.squeeze(0) + preds_squeeze_predict = colour_code_segmentation(np.array(preds_squeeze.cpu()), args1['label_info']) + preds_squeeze_predict = cv2.resize(np.uint8(preds_squeeze_predict), (W, H)) + predict_mask = cv2.cvtColor(np.uint8(preds_squeeze_predict), cv2.COLOR_RGB2BGR) + _mask_cv =predict_mask + t25 = time.time() + cv2.imwrite('seg_result.png', _mask_cv) + t26 = time.time() + print('存分割图:%s, 分割后处理:%s ,分割推理:%s ,分割图变小:%s,分割图读图:%s,检测模型推理:%s,读图片:%s' + %((t26-t25) * 1000,(t25-t241) * 1000,(t241-t24) * 1000,(t24-t231) * 1000,(t231-t23) * 1000,(t23-t22) * 1000,(t22-t21) * 1000 ) ) + + return pred, _img_cv, _mask_cv #返回目标检测结果、原图像、分割图像 + +def main(): + + '''配置参数''' + label_info = get_label_info('utils/class_dict.csv') + pars={'cuda':'0','crop_size':512,'input_dir':'input_dir','output_dir':'output_dir','workers':16,'label_info':label_info, + 'dspth':'./data/','backbone':'STDCNet813','use_boundary_2':False, 'use_boundary_4':False, 'use_boundary_8':True, 'use_boundary_16':False,'use_conv_last':False} + + + dete_weights='weights/best_luoshui20230608.pt' + '''分割模型权重路径''' + seg_weights = 'weights/model_final.pth' + + '''初始化目标检测模型''' + model = Detector(dete_weights) + + + '''初始化分割模型2''' + n_classes = 2 + segmodel = BiSeNet(backbone=pars['backbone'], n_classes=n_classes, + use_boundary_2=pars['use_boundary_2'], use_boundary_4=pars['use_boundary_4'], + use_boundary_8=pars['use_boundary_8'], use_boundary_16=pars['use_boundary_16'], + use_conv_last=pars['use_conv_last']) + segmodel.load_state_dict(torch.load(seg_weights)) + segmodel.cuda() + segmodel.eval() + + + '''图像测试''' + folders = os.listdir(pars['input_dir']) + for i in range(len(folders)): + path1 = pars['input_dir'] + '/' + folders[i] + + t1=time.time() + + '''对原图进行目标检测和水域分割''' + pred, _img_cv, _mask_cv=AI_process(model,segmodel, pars,path1) + + t2 = time.time() + + '''进入后处理,判断水域内有落水人员''' + haha,zzzz=AI_postprocess(pred, _mask_cv,pars,_img_cv ) + t3 = time.time() + + print('总时间分布:前处理t2-t1,后处理t3-t2',(t2-t1)*1000,(t3-t2)*1000) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/DJI_20221108135632_0001_Z.jpg b/DJI_20221108135632_0001_Z.jpg new file mode 100644 index 0000000..385c36e Binary files /dev/null and b/DJI_20221108135632_0001_Z.jpg differ diff --git a/__pycache__/cityscapes.cpython-37.pyc b/__pycache__/cityscapes.cpython-37.pyc new file mode 100644 index 0000000..e5aa247 Binary files /dev/null and b/__pycache__/cityscapes.cpython-37.pyc differ diff --git a/__pycache__/cityscapes.cpython-38.pyc b/__pycache__/cityscapes.cpython-38.pyc new file mode 100644 index 0000000..4e2cf16 Binary files /dev/null and b/__pycache__/cityscapes.cpython-38.pyc differ diff --git a/__pycache__/evaluation.cpython-37.pyc b/__pycache__/evaluation.cpython-37.pyc new file mode 100644 index 0000000..e84fa80 Binary files /dev/null and b/__pycache__/evaluation.cpython-37.pyc differ diff --git a/__pycache__/evaluation.cpython-38.pyc b/__pycache__/evaluation.cpython-38.pyc new file mode 100644 index 0000000..10595d3 Binary files /dev/null and b/__pycache__/evaluation.cpython-38.pyc differ diff --git a/__pycache__/evaluation_process.cpython-37.pyc b/__pycache__/evaluation_process.cpython-37.pyc new file mode 100644 index 0000000..3f48e19 Binary files /dev/null and b/__pycache__/evaluation_process.cpython-37.pyc differ diff --git a/__pycache__/evaluation_process.cpython-38.pyc b/__pycache__/evaluation_process.cpython-38.pyc new file mode 100644 index 0000000..b9fae40 Binary files /dev/null and b/__pycache__/evaluation_process.cpython-38.pyc differ diff --git a/__pycache__/heliushuju.cpython-37.pyc b/__pycache__/heliushuju.cpython-37.pyc new file mode 100644 index 0000000..48361dd Binary files /dev/null and b/__pycache__/heliushuju.cpython-37.pyc differ diff --git a/__pycache__/heliushuju.cpython-38.pyc b/__pycache__/heliushuju.cpython-38.pyc new file mode 100644 index 0000000..06abb8d Binary files /dev/null and b/__pycache__/heliushuju.cpython-38.pyc differ diff --git a/__pycache__/heliushuju_process.cpython-37.pyc b/__pycache__/heliushuju_process.cpython-37.pyc new file mode 100644 index 0000000..7588f86 Binary files /dev/null and b/__pycache__/heliushuju_process.cpython-37.pyc differ diff --git a/__pycache__/heliushuju_process.cpython-38.pyc b/__pycache__/heliushuju_process.cpython-38.pyc new file mode 100644 index 0000000..bae8f0b Binary files /dev/null and b/__pycache__/heliushuju_process.cpython-38.pyc differ diff --git a/__pycache__/logger.cpython-37.pyc b/__pycache__/logger.cpython-37.pyc new file mode 100644 index 0000000..9b6c0ac Binary files /dev/null and b/__pycache__/logger.cpython-37.pyc differ diff --git a/__pycache__/logger.cpython-38.pyc b/__pycache__/logger.cpython-38.pyc new file mode 100644 index 0000000..9a20c7f Binary files /dev/null and b/__pycache__/logger.cpython-38.pyc differ diff --git a/__pycache__/logger.cpython-39.pyc b/__pycache__/logger.cpython-39.pyc new file mode 100644 index 0000000..afe197d Binary files /dev/null and b/__pycache__/logger.cpython-39.pyc differ diff --git a/__pycache__/optimizer_loss.cpython-37.pyc b/__pycache__/optimizer_loss.cpython-37.pyc new file mode 100644 index 0000000..4f71662 Binary files /dev/null and b/__pycache__/optimizer_loss.cpython-37.pyc differ diff --git a/__pycache__/optimizer_loss.cpython-38.pyc b/__pycache__/optimizer_loss.cpython-38.pyc new file mode 100644 index 0000000..5d5b25b Binary files /dev/null and b/__pycache__/optimizer_loss.cpython-38.pyc differ diff --git a/__pycache__/transform.cpython-37.pyc b/__pycache__/transform.cpython-37.pyc new file mode 100644 index 0000000..02fb02b Binary files /dev/null and b/__pycache__/transform.cpython-37.pyc differ diff --git a/__pycache__/transform.cpython-38.pyc b/__pycache__/transform.cpython-38.pyc new file mode 100644 index 0000000..42702de Binary files /dev/null and b/__pycache__/transform.cpython-38.pyc differ diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 0000000..453f410 --- /dev/null +++ b/core/__init__.py @@ -0,0 +1 @@ +from . import nn, models, utils, data \ No newline at end of file diff --git a/core/__pycache__/__init__.cpython-37.pyc b/core/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..2e74a57 Binary files /dev/null and b/core/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/__pycache__/__init__.cpython-38.pyc b/core/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..cebd812 Binary files /dev/null and b/core/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/__pycache__/__init__.cpython-39.pyc b/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..edc5127 Binary files /dev/null and b/core/__pycache__/__init__.cpython-39.pyc differ diff --git a/core/data/__init__.py b/core/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/data/__pycache__/__init__.cpython-37.pyc b/core/data/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..1833b52 Binary files /dev/null and b/core/data/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/data/__pycache__/__init__.cpython-38.pyc b/core/data/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..fc56711 Binary files /dev/null and b/core/data/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/data/dataloader/__init__.py b/core/data/dataloader/__init__.py new file mode 100644 index 0000000..b22f962 --- /dev/null +++ b/core/data/dataloader/__init__.py @@ -0,0 +1,23 @@ +""" +This module provides data loaders and transformers for popular vision datasets. +""" +from .mscoco import COCOSegmentation +from .cityscapes import CitySegmentation +from .ade import ADE20KSegmentation +from .pascal_voc import VOCSegmentation +from .pascal_aug import VOCAugSegmentation +from .sbu_shadow import SBUSegmentation + +datasets = { + 'ade20k': ADE20KSegmentation, + 'pascal_voc': VOCSegmentation, + 'pascal_aug': VOCAugSegmentation, + 'coco': COCOSegmentation, + 'citys': CitySegmentation, + 'sbu': SBUSegmentation, +} + + +def get_segmentation_dataset(name, **kwargs): + """Segmentation Datasets""" + return datasets[name.lower()](**kwargs) diff --git a/core/data/dataloader/ade.py b/core/data/dataloader/ade.py new file mode 100644 index 0000000..522ecbd --- /dev/null +++ b/core/data/dataloader/ade.py @@ -0,0 +1,172 @@ +"""Pascal ADE20K Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class ADE20KSegmentation(SegmentationDataset): + """ADE20K Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/ade' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = ADE20KSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'ADEChallengeData2016' + NUM_CLASS = 150 + + def __init__(self, root='../datasets/ade', split='test', mode=None, transform=None, **kwargs): + super(ADE20KSegmentation, self).__init__(root, split, mode, transform, **kwargs) + root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(root), "Please setup the dataset using ../datasets/ade20k.py" + self.images, self.masks = _get_ade20k_pairs(root, split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + print('Found {} images in the folder {}'.format(len(self.images), root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and to Tensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32') - 1) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 1 + + @property + def classes(self): + """Category names.""" + return ("wall", "building, edifice", "sky", "floor, flooring", "tree", + "ceiling", "road, route", "bed", "windowpane, window", "grass", + "cabinet", "sidewalk, pavement", + "person, individual, someone, somebody, mortal, soul", + "earth, ground", "door, double door", "table", "mountain, mount", + "plant, flora, plant life", "curtain, drape, drapery, mantle, pall", + "chair", "car, auto, automobile, machine, motorcar", + "water", "painting, picture", "sofa, couch, lounge", "shelf", + "house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair", + "seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press", + "lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion", + "base, pedestal, stand", "box", "column, pillar", "signboard, sign", + "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", + "skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox", + "grandstand, covered stand", "path", "stairs, steps", "runway", + "case, display case, showcase, vitrine", + "pool table, billiard table, snooker table", "pillow", + "screen door, screen", "stairway, staircase", "river", "bridge, span", + "bookcase", "blind, screen", "coffee table, cocktail table", + "toilet, can, commode, crapper, pot, potty, stool, throne", + "flower", "book", "hill", "bench", "countertop", + "stove, kitchen stove, range, kitchen range, cooking stove", + "palm, palm tree", "kitchen island", + "computer, computing machine, computing device, data processor, " + "electronic computer, information processing system", + "swivel chair", "boat", "bar", "arcade machine", + "hovel, hut, hutch, shack, shanty", + "bus, autobus, coach, charabanc, double-decker, jitney, motorbus, " + "motorcoach, omnibus, passenger vehicle", + "towel", "light, light source", "truck, motortruck", "tower", + "chandelier, pendant, pendent", "awning, sunshade, sunblind", + "streetlight, street lamp", "booth, cubicle, stall, kiosk", + "television receiver, television, television set, tv, tv set, idiot " + "box, boob tube, telly, goggle box", + "airplane, aeroplane, plane", "dirt track", + "apparel, wearing apparel, dress, clothes", + "pole", "land, ground, soil", + "bannister, banister, balustrade, balusters, handrail", + "escalator, moving staircase, moving stairway", + "ottoman, pouf, pouffe, puff, hassock", + "bottle", "buffet, counter, sideboard", + "poster, posting, placard, notice, bill, card", + "stage", "van", "ship", "fountain", + "conveyer belt, conveyor belt, conveyer, conveyor, transporter", + "canopy", "washer, automatic washer, washing machine", + "plaything, toy", "swimming pool, swimming bath, natatorium", + "stool", "barrel, cask", "basket, handbasket", "waterfall, falls", + "tent, collapsible shelter", "bag", "minibike, motorbike", "cradle", + "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", + "trade name, brand name, brand, marque", "microwave, microwave oven", + "pot, flowerpot", "animal, animate being, beast, brute, creature, fauna", + "bicycle, bike, wheel, cycle", "lake", + "dishwasher, dish washer, dishwashing machine", + "screen, silver screen, projection screen", + "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", + "traffic light, traffic signal, stoplight", "tray", + "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, " + "dustbin, trash barrel, trash bin", + "fan", "pier, wharf, wharfage, dock", "crt screen", + "plate", "monitor, monitoring device", "bulletin board, notice board", + "shower", "radiator", "glass, drinking glass", "clock", "flag") + + +def _get_ade20k_pairs(folder, mode='train'): + img_paths = [] + mask_paths = [] + if mode == 'train': + img_folder = os.path.join(folder, 'images/training') + mask_folder = os.path.join(folder, 'annotations/training') + else: + img_folder = os.path.join(folder, 'images/validation') + mask_folder = os.path.join(folder, 'annotations/validation') + for filename in os.listdir(img_folder): + basename, _ = os.path.splitext(filename) + if filename.endswith(".jpg"): + imgpath = os.path.join(img_folder, filename) + maskname = basename + '.png' + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask:', maskpath) + + return img_paths, mask_paths + + +if __name__ == '__main__': + train_dataset = ADE20KSegmentation() diff --git a/core/data/dataloader/cityscapes.py b/core/data/dataloader/cityscapes.py new file mode 100644 index 0000000..7d5de71 --- /dev/null +++ b/core/data/dataloader/cityscapes.py @@ -0,0 +1,137 @@ +"""Prepare Cityscapes dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class CitySegmentation(SegmentationDataset): + """Cityscapes Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to Cityscapes folder. Default is './datasets/citys' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = CitySegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'cityscapes' + NUM_CLASS = 19 + + def __init__(self, root='../datasets/citys', split='train', mode=None, transform=None, **kwargs): + super(CitySegmentation, self).__init__(root, split, mode, transform, **kwargs) + # self.root = os.path.join(root, self.BASE_DIR) + assert os.path.exists(self.root), "Please setup the dataset using ../datasets/cityscapes.py" + self.images, self.mask_paths = _get_city_pairs(self.root, self.split) + assert (len(self.images) == len(self.mask_paths)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 31, 32, 33] + self._key = np.array([-1, -1, -1, -1, -1, -1, + -1, -1, 0, 1, -1, -1, + 2, 3, 4, -1, -1, -1, + 5, -1, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, + -1, -1, 16, 17, 18]) + self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32') + + def _class_to_index(self, mask): + # assert the value + values = np.unique(mask) + for value in values: + assert (value in self._mapping) + index = np.digitize(mask.ravel(), self._mapping, right=True) + return self._key[index].reshape(mask.shape) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.mask_paths[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = self._class_to_index(np.array(mask).astype('int32')) + return torch.LongTensor(np.array(target).astype('int32')) + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_city_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + for filename in files: + if filename.endswith('.png'): + imgpath = os.path.join(root, filename) + foldername = os.path.basename(os.path.dirname(imgpath)) + maskname = filename.replace('leftImg8bit', 'gtFine_labelIds') + maskpath = os.path.join(mask_folder, foldername, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split in ('train', 'val'): + img_folder = os.path.join(folder, 'leftImg8bit/' + split) + mask_folder = os.path.join(folder, 'gtFine/' + split) + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + else: + assert split == 'trainval' + print('trainval set') + train_img_folder = os.path.join(folder, 'leftImg8bit/train') + train_mask_folder = os.path.join(folder, 'gtFine/train') + val_img_folder = os.path.join(folder, 'leftImg8bit/val') + val_mask_folder = os.path.join(folder, 'gtFine/val') + train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder) + val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder) + img_paths = train_img_paths + val_img_paths + mask_paths = train_mask_paths + val_mask_paths + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = CitySegmentation() diff --git a/core/data/dataloader/lip_parsing.py b/core/data/dataloader/lip_parsing.py new file mode 100644 index 0000000..245beda --- /dev/null +++ b/core/data/dataloader/lip_parsing.py @@ -0,0 +1,90 @@ +"""Look into Person Dataset""" +import os +import torch +import numpy as np + +from PIL import Image +from core.data.dataloader.segbase import SegmentationDataset + + +class LIPSegmentation(SegmentationDataset): + """Look into person parsing dataset """ + + BASE_DIR = 'LIP' + NUM_CLASS = 20 + + def __init__(self, root='../datasets/LIP', split='train', mode=None, transform=None, **kwargs): + super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _trainval_image_dir = os.path.join(root, 'TrainVal_images') + _testing_image_dir = os.path.join(root, 'Testing_images') + _trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations') + if split == 'train': + _image_dir = os.path.join(_trainval_image_dir, 'train_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'train_id.txt') + elif split == 'val': + _image_dir = os.path.join(_trainval_image_dir, 'val_images') + _mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations') + _split_f = os.path.join(_trainval_image_dir, 'val_id.txt') + elif split == 'test': + _image_dir = os.path.join(_testing_image_dir, 'testing_images') + _split_f = os.path.join(_testing_image_dir, 'test_id.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), 'r') as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + '.jpg') + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + '.png') + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} {} images in the folder {}'.format(len(self.images), split, root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category name.""" + return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes', + 'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt', + 'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe', + 'rightShoe') + + +if __name__ == '__main__': + dataset = LIPSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/core/data/dataloader/mscoco.py b/core/data/dataloader/mscoco.py new file mode 100644 index 0000000..6e280c8 --- /dev/null +++ b/core/data/dataloader/mscoco.py @@ -0,0 +1,136 @@ +"""MSCOCO Semantic Segmentation pretraining for VOC.""" +import os +import pickle +import torch +import numpy as np + +from tqdm import trange +from PIL import Image +from .segbase import SegmentationDataset + + +class COCOSegmentation(SegmentationDataset): + """COCO Semantic Segmentation Dataset for VOC Pre-training. + + Parameters + ---------- + root : string + Path to ADE20K folder. Default is './datasets/coco' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)), + >>> ]) + >>> # Create Dataset + >>> trainset = COCOSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, + 1, 64, 20, 63, 7, 72] + NUM_CLASS = 21 + + def __init__(self, root='../datasets/coco', split='train', mode=None, transform=None, **kwargs): + super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # lazy import pycocotools + from pycocotools.coco import COCO + from pycocotools import mask + if split == 'train': + print('train set') + ann_file = os.path.join(root, 'annotations/instances_train2017.json') + ids_file = os.path.join(root, 'annotations/train_ids.mx') + self.root = os.path.join(root, 'train2017') + else: + print('val set') + ann_file = os.path.join(root, 'annotations/instances_val2017.json') + ids_file = os.path.join(root, 'annotations/val_ids.mx') + self.root = os.path.join(root, 'val2017') + self.coco = COCO(ann_file) + self.coco_mask = mask + if os.path.exists(ids_file): + with open(ids_file, 'rb') as f: + self.ids = pickle.load(f) + else: + ids = list(self.coco.imgs.keys()) + self.ids = self._preprocess(ids, ids_file) + self.transform = transform + + def __getitem__(self, index): + coco = self.coco + img_id = self.ids[index] + img_metadata = coco.loadImgs(img_id)[0] + path = img_metadata['file_name'] + img = Image.open(os.path.join(self.root, path)).convert('RGB') + cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id)) + mask = Image.fromarray(self._gen_seg_mask( + cocotarget, img_metadata['height'], img_metadata['width'])) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.ids[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _gen_seg_mask(self, target, h, w): + mask = np.zeros((h, w), dtype=np.uint8) + coco_mask = self.coco_mask + for instance in target: + rle = coco_mask.frPyObjects(instance['Segmentation'], h, w) + m = coco_mask.decode(rle) + cat = instance['category_id'] + if cat in self.CAT_LIST: + c = self.CAT_LIST.index(cat) + else: + continue + if len(m.shape) < 3: + mask[:, :] += (mask == 0) * (m * c) + else: + mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8) + return mask + + def _preprocess(self, ids, ids_file): + print("Preprocessing mask, this will take a while." + \ + "But don't worry, it only run once for each split.") + tbar = trange(len(ids)) + new_ids = [] + for i in tbar: + img_id = ids[i] + cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id)) + img_metadata = self.coco.loadImgs(img_id)[0] + mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width']) + # more than 1k pixels + if (mask > 0).sum() > 1000: + new_ids.append(img_id) + tbar.set_description('Doing: {}/{}, got {} qualified images'. \ + format(i, len(ids), len(new_ids))) + print('Found number of qualified images: ', len(new_ids)) + with open(ids_file, 'wb') as f: + pickle.dump(new_ids, f) + return new_ids + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') diff --git a/core/data/dataloader/pascal_aug.py b/core/data/dataloader/pascal_aug.py new file mode 100644 index 0000000..1cbe238 --- /dev/null +++ b/core/data/dataloader/pascal_aug.py @@ -0,0 +1,104 @@ +"""Pascal Augmented VOC Semantic Segmentation Dataset.""" +import os +import torch +import scipy.io as sio +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCAugSegmentation(SegmentationDataset): + """Pascal VOC Augmented Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/voc' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCAugSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOCaug/dataset/' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCAugSegmentation, self).__init__(root, split, mode, transform, **kwargs) + # train/val/test splits are pre-cut + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'cls') + _image_dir = os.path.join(_voc_root, 'img') + if split == 'train': + _split_f = os.path.join(_voc_root, 'trainval.txt') + elif split == 'val': + _split_f = os.path.join(_voc_root, 'val.txt') + else: + raise RuntimeError('Unknown dataset split: {}'.format(split)) + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".mat") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + target = self._load_mat(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, target = self._sync_transform(img, target) + elif self.mode == 'val': + img, target = self._val_sync_transform(img, target) + else: + raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode)) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, target, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + return torch.LongTensor(np.array(mask).astype('int32')) + + def _load_mat(self, filename): + mat = sio.loadmat(filename, mat_dtype=True, squeeze_me=True, struct_as_record=False) + mask = mat['GTcls'].Segmentation + return Image.fromarray(mask) + + def __len__(self): + return len(self.images) + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCAugSegmentation() \ No newline at end of file diff --git a/core/data/dataloader/pascal_voc.py b/core/data/dataloader/pascal_voc.py new file mode 100644 index 0000000..94db82c --- /dev/null +++ b/core/data/dataloader/pascal_voc.py @@ -0,0 +1,112 @@ +"""Pascal VOC Semantic Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class VOCSegmentation(SegmentationDataset): + """Pascal VOC Semantic Segmentation Dataset. + + Parameters + ---------- + root : string + Path to VOCdevkit folder. Default is './datasets/VOCdevkit' + split: string + 'train', 'val' or 'test' + transform : callable, optional + A function that transforms the image + Examples + -------- + >>> from torchvision import transforms + >>> import torch.utils.data as data + >>> # Transforms for Normalization + >>> input_transform = transforms.Compose([ + >>> transforms.ToTensor(), + >>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]), + >>> ]) + >>> # Create Dataset + >>> trainset = VOCSegmentation(split='train', transform=input_transform) + >>> # Create Training Loader + >>> train_data = data.DataLoader( + >>> trainset, 4, shuffle=True, + >>> num_workers=4) + """ + BASE_DIR = 'VOC2012' + NUM_CLASS = 21 + + def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs): + super(VOCSegmentation, self).__init__(root, split, mode, transform, **kwargs) + _voc_root = os.path.join(root, self.BASE_DIR) + _mask_dir = os.path.join(_voc_root, 'SegmentationClass') + _image_dir = os.path.join(_voc_root, 'JPEGImages') + # train/val/test splits are pre-cut + _splits_dir = os.path.join(_voc_root, 'ImageSets/Segmentation') + if split == 'train': + _split_f = os.path.join(_splits_dir, 'train.txt') + elif split == 'val': + _split_f = os.path.join(_splits_dir, 'val.txt') + elif split == 'test': + _split_f = os.path.join(_splits_dir, 'test.txt') + else: + raise RuntimeError('Unknown dataset split.') + + self.images = [] + self.masks = [] + with open(os.path.join(_split_f), "r") as lines: + for line in lines: + _image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg") + assert os.path.isfile(_image) + self.images.append(_image) + if split != 'test': + _mask = os.path.join(_mask_dir, line.rstrip('\n') + ".png") + assert os.path.isfile(_mask) + self.masks.append(_mask) + + if split != 'test': + assert (len(self.images) == len(self.masks)) + print('Found {} images in the folder {}'.format(len(self.images), _voc_root)) + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + img = self._img_transform(img) + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchronized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + + return img, mask, os.path.basename(self.images[index]) + + def __len__(self): + return len(self.images) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target == 255] = -1 + return torch.from_numpy(target).long() + + @property + def classes(self): + """Category names.""" + return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle', + 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', + 'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train', + 'tv') + + +if __name__ == '__main__': + dataset = VOCSegmentation() \ No newline at end of file diff --git a/core/data/dataloader/sbu_shadow.py b/core/data/dataloader/sbu_shadow.py new file mode 100644 index 0000000..0cf4ca9 --- /dev/null +++ b/core/data/dataloader/sbu_shadow.py @@ -0,0 +1,88 @@ +"""SBU Shadow Segmentation Dataset.""" +import os +import torch +import numpy as np + +from PIL import Image +from .segbase import SegmentationDataset + + +class SBUSegmentation(SegmentationDataset): + """SBU Shadow Segmentation Dataset + """ + NUM_CLASS = 2 + + def __init__(self, root='../datasets/sbu', split='train', mode=None, transform=None, **kwargs): + super(SBUSegmentation, self).__init__(root, split, mode, transform, **kwargs) + assert os.path.exists(self.root) + self.images, self.masks = _get_sbu_pairs(self.root, self.split) + assert (len(self.images) == len(self.masks)) + if len(self.images) == 0: + raise RuntimeError("Found 0 images in subfolders of:" + root + "\n") + + def __getitem__(self, index): + img = Image.open(self.images[index]).convert('RGB') + if self.mode == 'test': + if self.transform is not None: + img = self.transform(img) + return img, os.path.basename(self.images[index]) + mask = Image.open(self.masks[index]) + # synchrosized transform + if self.mode == 'train': + img, mask = self._sync_transform(img, mask) + elif self.mode == 'val': + img, mask = self._val_sync_transform(img, mask) + else: + assert self.mode == 'testval' + img, mask = self._img_transform(img), self._mask_transform(mask) + # general resize, normalize and toTensor + if self.transform is not None: + img = self.transform(img) + return img, mask, os.path.basename(self.images[index]) + + def _mask_transform(self, mask): + target = np.array(mask).astype('int32') + target[target > 0] = 1 + return torch.from_numpy(target).long() + + def __len__(self): + return len(self.images) + + @property + def pred_offset(self): + return 0 + + +def _get_sbu_pairs(folder, split='train'): + def get_path_pairs(img_folder, mask_folder): + img_paths = [] + mask_paths = [] + for root, _, files in os.walk(img_folder): + print(root) + for filename in files: + if filename.endswith('.jpg'): + imgpath = os.path.join(root, filename) + maskname = filename.replace('.jpg', '.png') + maskpath = os.path.join(mask_folder, maskname) + if os.path.isfile(imgpath) and os.path.isfile(maskpath): + img_paths.append(imgpath) + mask_paths.append(maskpath) + else: + print('cannot find the mask or image:', imgpath, maskpath) + print('Found {} images in the folder {}'.format(len(img_paths), img_folder)) + return img_paths, mask_paths + + if split == 'train': + img_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowImages') + mask_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + else: + assert split in ('val', 'test') + img_folder = os.path.join(folder, 'SBU-Test/ShadowImages') + mask_folder = os.path.join(folder, 'SBU-Test/ShadowMasks') + img_paths, mask_paths = get_path_pairs(img_folder, mask_folder) + return img_paths, mask_paths + + +if __name__ == '__main__': + dataset = SBUSegmentation(base_size=280, crop_size=256) \ No newline at end of file diff --git a/core/data/dataloader/segbase.py b/core/data/dataloader/segbase.py new file mode 100644 index 0000000..823436d --- /dev/null +++ b/core/data/dataloader/segbase.py @@ -0,0 +1,93 @@ +"""Base segmentation dataset""" +import random +import numpy as np + +from PIL import Image, ImageOps, ImageFilter + +__all__ = ['SegmentationDataset'] + + +class SegmentationDataset(object): + """Segmentation Base Dataset""" + + def __init__(self, root, split, mode, transform, base_size=520, crop_size=480): + super(SegmentationDataset, self).__init__() + self.root = root + self.transform = transform + self.split = split + self.mode = mode if mode is not None else split + self.base_size = base_size + self.crop_size = crop_size + + def _val_sync_transform(self, img, mask): + outsize = self.crop_size + short_size = outsize + w, h = img.size + if w > h: + oh = short_size + ow = int(1.0 * w * oh / h) + else: + ow = short_size + oh = int(1.0 * h * ow / w) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # center crop + w, h = img.size + x1 = int(round((w - outsize) / 2.)) + y1 = int(round((h - outsize) / 2.)) + img = img.crop((x1, y1, x1 + outsize, y1 + outsize)) + mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize)) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _sync_transform(self, img, mask): + # random mirror + if random.random() < 0.5: + img = img.transpose(Image.FLIP_LEFT_RIGHT) + mask = mask.transpose(Image.FLIP_LEFT_RIGHT) + crop_size = self.crop_size + # random scale (short edge) + short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0)) + w, h = img.size + if h > w: + ow = short_size + oh = int(1.0 * h * ow / w) + else: + oh = short_size + ow = int(1.0 * w * oh / h) + img = img.resize((ow, oh), Image.BILINEAR) + mask = mask.resize((ow, oh), Image.NEAREST) + # pad crop + if short_size < crop_size: + padh = crop_size - oh if oh < crop_size else 0 + padw = crop_size - ow if ow < crop_size else 0 + img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0) + mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0) + # random crop crop_size + w, h = img.size + x1 = random.randint(0, w - crop_size) + y1 = random.randint(0, h - crop_size) + img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size)) + # gaussian blur as in PSP + if random.random() < 0.5: + img = img.filter(ImageFilter.GaussianBlur(radius=random.random())) + # final transform + img, mask = self._img_transform(img), self._mask_transform(mask) + return img, mask + + def _img_transform(self, img): + return np.array(img) + + def _mask_transform(self, mask): + return np.array(mask).astype('int32') + + @property + def num_class(self): + """Number of categories.""" + return self.NUM_CLASS + + @property + def pred_offset(self): + return 0 diff --git a/core/data/dataloader/utils.py b/core/data/dataloader/utils.py new file mode 100644 index 0000000..c0bd1ad --- /dev/null +++ b/core/data/dataloader/utils.py @@ -0,0 +1,69 @@ +import os +import hashlib +import errno +import tarfile +from six.moves import urllib +from torch.utils.model_zoo import tqdm + +def gen_bar_updater(): + pbar = tqdm(total=None) + + def bar_update(count, block_size, total_size): + if pbar.total is None and total_size: + pbar.total = total_size + progress_bytes = count * block_size + pbar.update(progress_bytes - pbar.n) + + return bar_update + +def check_integrity(fpath, md5=None): + if md5 is None: + return True + if not os.path.isfile(fpath): + return False + md5o = hashlib.md5() + with open(fpath, 'rb') as f: + # read in 1MB chunks + for chunk in iter(lambda: f.read(1024 * 1024), b''): + md5o.update(chunk) + md5c = md5o.hexdigest() + if md5c != md5: + return False + return True + +def makedir_exist_ok(dirpath): + try: + os.makedirs(dirpath) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + pass + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root.""" + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + makedir_exist_ok(root) + + # downloads file + if os.path.isfile(fpath) and check_integrity(fpath, md5): + print('Using downloaded and verified file: ' + fpath) + else: + try: + print('Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + except OSError: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + ' Downloading ' + url + ' to ' + fpath) + urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) + +def download_extract(url, root, filename, md5): + download_url(url, root, filename, md5) + with tarfile.open(os.path.join(root, filename), "r") as tar: + tar.extractall(path=root) \ No newline at end of file diff --git a/core/data/downloader/__init__.py b/core/data/downloader/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/data/downloader/ade20k.py b/core/data/downloader/ade20k.py new file mode 100644 index 0000000..8187c48 --- /dev/null +++ b/core/data/downloader/ade20k.py @@ -0,0 +1,51 @@ +"""Prepare ADE20K dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/ade') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python setup_ade20k.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_ade(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip', + '219e1696abb36c8ba3a3afe7fb2f4b4606a897c7'), + ( + 'http://data.csail.mit.edu/places/ADEchallenge/release_test.zip', + 'e05747892219d10e9243933371a497e905a4860c'), ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=download_dir, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + download_ade(_TARGET_DIR, overwrite=False) diff --git a/core/data/downloader/cityscapes.py b/core/data/downloader/cityscapes.py new file mode 100644 index 0000000..3b65b88 --- /dev/null +++ b/core/data/downloader/cityscapes.py @@ -0,0 +1,54 @@ +"""Prepare Cityscapes dataset""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, check_sha1 + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/citys') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize ADE20K dataset.', + epilog='Example: python prepare_cityscapes.py', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', default=None, help='dataset directory on disk') + args = parser.parse_args() + return args + + +def download_city(path, overwrite=False): + _CITY_DOWNLOAD_URLS = [ + ('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), + ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for filename, checksum in _CITY_DOWNLOAD_URLS: + if not check_sha1(filename, checksum): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(filename)) + # extract + with zipfile.ZipFile(filename, "r") as zip_ref: + zip_ref.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_city(_TARGET_DIR, overwrite=False) diff --git a/core/data/downloader/mscoco.py b/core/data/downloader/mscoco.py new file mode 100644 index 0000000..6d509b6 --- /dev/null +++ b/core/data/downloader/mscoco.py @@ -0,0 +1,69 @@ +"""Prepare MS COCO datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs, try_import_pycocotools + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/coco') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize MS COCO dataset.', + epilog='Example: python mscoco.py --download-dir ~/mscoco', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/mscoco/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +def download_coco(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://images.cocodataset.org/zips/train2017.zip', + '10ad623668ab00c62c096f0ed636d6aff41faca5'), + ('http://images.cocodataset.org/annotations/annotations_trainval2017.zip', + '8551ee4bb5860311e79dace7e79cb91e432e78b3'), + ('http://images.cocodataset.org/zips/val2017.zip', + '4950dc9d00dbe1c933ee0170f5797584351d2a41'), + # ('http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', + # '46cdcf715b6b4f67e980b529534e79c2edffe084'), + # test2017.zip, for those who want to attend the competition. + # ('http://images.cocodataset.org/zips/test2017.zip', + # '4e443f8a2eca6b1dac8a6c57641b67dd40621a49'), + ] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with zipfile.ZipFile(filename) as zf: + zf.extractall(path=path) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isdir(path) or not os.path.isdir(os.path.join(path, 'train2017')) \ + or not os.path.isdir(os.path.join(path, 'val2017')) \ + or not os.path.isdir(os.path.join(path, 'annotations')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_coco(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) + try_import_pycocotools() diff --git a/core/data/downloader/pascal_voc.py b/core/data/downloader/pascal_voc.py new file mode 100644 index 0000000..849c95b --- /dev/null +++ b/core/data/downloader/pascal_voc.py @@ -0,0 +1,100 @@ +"""Prepare PASCAL VOC datasets""" +import os +import sys +import shutil +import argparse +import tarfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/voc') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize PASCAL VOC dataset.', + epilog='Example: python pascal_voc.py --download-dir ~/VOCdevkit', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default='~/VOCdevkit/', help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract VOC datasets into ``path`` + +def download_voc(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', + '34ed68851bce2a36e2a223fa52c661d592c66b3c'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', + '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), + ('http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', + '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')] + makedirs(path) + for url, checksum in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + + +##################################################################################### +# Download and extract the VOC augmented segmentation dataset into ``path`` + +def download_aug(path, overwrite=False): + _AUG_DOWNLOAD_URLS = [ + ('http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz', + '7129e0a480c2d6afb02b517bb18ac54283bfaa35')] + makedirs(path) + for url, checksum in _AUG_DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) + # extract + with tarfile.open(filename) as tar: + tar.extractall(path=path) + shutil.move(os.path.join(path, 'benchmark_RELEASE'), + os.path.join(path, 'VOCaug')) + filenames = ['VOCaug/dataset/train.txt', 'VOCaug/dataset/val.txt'] + # generate trainval.txt + with open(os.path.join(path, 'VOCaug/dataset/trainval.txt'), 'w') as outfile: + for fname in filenames: + fname = os.path.join(path, fname) + with open(fname) as infile: + for line in infile: + outfile.write(line) + + +if __name__ == '__main__': + args = parse_args() + path = os.path.expanduser(args.download_dir) + if not os.path.isfile(path) or not os.path.isdir(os.path.join(path, 'VOC2007')) \ + or not os.path.isdir(os.path.join(path, 'VOC2012')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_voc(path, overwrite=args.overwrite) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2007'), os.path.join(path, 'VOC2007')) + shutil.move(os.path.join(path, 'VOCdevkit', 'VOC2012'), os.path.join(path, 'VOC2012')) + shutil.rmtree(os.path.join(path, 'VOCdevkit')) + + if not os.path.isdir(os.path.join(path, 'VOCaug')): + if args.no_download: + raise ValueError(('{} is not a valid directory, make sure it is present.' + ' Or you should not disable "--no-download" to grab it'.format(path))) + else: + download_aug(path, overwrite=args.overwrite) + + # make symlink + makedirs(os.path.expanduser('~/.torch/datasets')) + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + os.symlink(path, _TARGET_DIR) diff --git a/core/data/downloader/sbu_shadow.py b/core/data/downloader/sbu_shadow.py new file mode 100644 index 0000000..cdcbdde --- /dev/null +++ b/core/data/downloader/sbu_shadow.py @@ -0,0 +1,56 @@ +"""Prepare SBU Shadow datasets""" +import os +import sys +import argparse +import zipfile + +# TODO: optim code +cur_path = os.path.abspath(os.path.dirname(__file__)) +root_path = os.path.split(os.path.split(os.path.split(cur_path)[0])[0])[0] +sys.path.append(root_path) + +from core.utils import download, makedirs + +_TARGET_DIR = os.path.expanduser('~/.torch/datasets/sbu') + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Initialize SBU Shadow dataset.', + epilog='Example: python sbu_shadow.py --download-dir ~/SBU-shadow', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('--download-dir', type=str, default=None, help='dataset directory on disk') + parser.add_argument('--no-download', action='store_true', help='disable automatic download if set') + parser.add_argument('--overwrite', action='store_true', + help='overwrite downloaded files if set, in case they are corrupted') + args = parser.parse_args() + return args + + +##################################################################################### +# Download and extract SBU shadow datasets into ``path`` + +def download_sbu(path, overwrite=False): + _DOWNLOAD_URLS = [ + ('http://www3.cs.stonybrook.edu/~cvl/content/datasets/shadow_db/SBU-shadow.zip'), + ] + download_dir = os.path.join(path, 'downloads') + makedirs(download_dir) + for url in _DOWNLOAD_URLS: + filename = download(url, path=path, overwrite=overwrite) + # extract + with zipfile.ZipFile(filename, "r") as zf: + zf.extractall(path=path) + print("Extracted", filename) + + +if __name__ == '__main__': + args = parse_args() + makedirs(os.path.expanduser('~/.torch/datasets')) + if args.download_dir is not None: + if os.path.isdir(_TARGET_DIR): + os.remove(_TARGET_DIR) + # make symlink + os.symlink(args.download_dir, _TARGET_DIR) + else: + download_sbu(_TARGET_DIR, overwrite=False) diff --git a/core/lib/psa/functional.py b/core/lib/psa/functional.py new file mode 100644 index 0000000..8e66088 --- /dev/null +++ b/core/lib/psa/functional.py @@ -0,0 +1,5 @@ +from . import functions + + +def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): + return functions.psa_mask(input, psa_type, mask_H_, mask_W_) diff --git a/core/lib/psa/functions/__init__.py b/core/lib/psa/functions/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/core/lib/psa/functions/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/core/lib/psa/functions/psamask.py b/core/lib/psa/functions/psamask.py new file mode 100644 index 0000000..26f34a2 --- /dev/null +++ b/core/lib/psa/functions/psamask.py @@ -0,0 +1,39 @@ +import torch +from torch.autograd import Function +from .. import src + + +class PSAMask(Function): + @staticmethod + def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + num_, channels_, feature_H_, feature_W_ = input.size() + if mask_H_ is None and mask_W_ is None: + mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 + assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) + assert channels_ == mask_H_ * mask_W_ + half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 + output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) + if not input.is_cuda: + src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + output = output.cuda() + src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ + ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ + return output + + @staticmethod + def backward(ctx, grad_output): + psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ + mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ + grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) + if not grad_output.is_cuda: + src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + else: + src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) + return grad_input, None, None, None + + +psa_mask = PSAMask.apply diff --git a/core/lib/psa/modules/__init__.py b/core/lib/psa/modules/__init__.py new file mode 100644 index 0000000..1b4726b --- /dev/null +++ b/core/lib/psa/modules/__init__.py @@ -0,0 +1 @@ +from .psamask import * diff --git a/core/lib/psa/modules/psamask.py b/core/lib/psa/modules/psamask.py new file mode 100644 index 0000000..58ea4d9 --- /dev/null +++ b/core/lib/psa/modules/psamask.py @@ -0,0 +1,15 @@ +from torch import nn +from .. import functional as F + + +class PSAMask(nn.Module): + def __init__(self, psa_type=0, mask_H_=None, mask_W_=None): + super(PSAMask, self).__init__() + assert psa_type in [0, 1] # 0-col, 1-dis + assert (mask_H_ in None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) + self.psa_type = psa_type + self.mask_H_ = mask_H_ + self.mask_W_ = mask_W_ + + def forward(self, input): + return F.psa_mask(input, self.psa_type, self.mask_H_, self.mask_W_) diff --git a/core/lib/psa/src/__init__.py b/core/lib/psa/src/__init__.py new file mode 100644 index 0000000..ead1cfe --- /dev/null +++ b/core/lib/psa/src/__init__.py @@ -0,0 +1,18 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') +print(cpu_path,gpu_path) +cpu = load('psamask_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'psamask.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('psamask_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'psamask_cuda.cu'), + ], build_directory=gpu_path, verbose=False) \ No newline at end of file diff --git a/core/lib/psa/src/cpu/operator.cpp b/core/lib/psa/src/cpu/operator.cpp new file mode 100644 index 0000000..e7b9f6c --- /dev/null +++ b/core/lib/psa/src/cpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cpu, "PSAMASK forward (CPU)"); + m.def("psamask_backward", &psamask_backward_cpu, "PSAMASK backward (CPU)"); +} diff --git a/core/lib/psa/src/cpu/operator.h b/core/lib/psa/src/cpu/operator.h new file mode 100644 index 0000000..abc43cb --- /dev/null +++ b/core/lib/psa/src/cpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); \ No newline at end of file diff --git a/core/lib/psa/src/cpu/psamask.cpp b/core/lib/psa/src/cpu/psamask.cpp new file mode 100644 index 0000000..eb33694 --- /dev/null +++ b/core/lib/psa/src/cpu/psamask.cpp @@ -0,0 +1,133 @@ +#include + +#ifndef min +#define min(a,b) (((a) < (b)) ? (a) : (b)) +#endif + +#ifndef max +#define max(a,b) (((a) > (b)) ? (a) : (b)) +#endif + +void psamask_collect_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_forward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_collect_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } + } + } +} + +void psamask_distribute_backward(const int num_, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + for(int n = 0; n < num_; n++) { + for(int h = 0; h < feature_H_; h++) { + for(int w = 0; w < feature_W_; w++) { + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } + } + } +} + +void psamask_forward_cpu(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cpu(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward(num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/core/lib/psa/src/gpu/operator.cpp b/core/lib/psa/src/gpu/operator.cpp new file mode 100644 index 0000000..5a52f4a --- /dev/null +++ b/core/lib/psa/src/gpu/operator.cpp @@ -0,0 +1,6 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("psamask_forward", &psamask_forward_cuda, "PSAMASK forward (GPU)"); + m.def("psamask_backward", &psamask_backward_cuda, "PSAMASK backward (GPU)"); +} diff --git a/core/lib/psa/src/gpu/operator.h b/core/lib/psa/src/gpu/operator.h new file mode 100644 index 0000000..235a9e1 --- /dev/null +++ b/core/lib/psa/src/gpu/operator.h @@ -0,0 +1,4 @@ +#include + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_); diff --git a/core/lib/psa/src/gpu/psamask_cuda.cu b/core/lib/psa/src/gpu/psamask_cuda.cu new file mode 100644 index 0000000..f3fcb93 --- /dev/null +++ b/core/lib/psa/src/gpu/psamask_cuda.cu @@ -0,0 +1,128 @@ +#include + +// CUDA: grid stride looping +#ifndef CUDA_KERNEL_LOOP +#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) +#endif + +__global__ void psamask_collect_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_forward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* mask_data, float* buffer_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + buffer_data[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)] = + mask_data[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_collect_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)) * feature_H_ * feature_W_ + h * feature_W_ + w]; + } + } + } +} + +__global__ void psamask_distribute_backward_cuda(const int nthreads, + const int feature_H_, const int feature_W_, + const int mask_H_, const int mask_W_, + const int half_mask_H_, const int half_mask_W_, + const float* buffer_diff, float* mask_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int w = index % feature_W_; + const int h = (index / feature_W_) % feature_H_; + const int n = index / feature_W_ / feature_H_; + // effective mask region : [hstart, hend) x [wstart, wend) with mask-indexed + const int hstart = max(0, half_mask_H_ - h); + const int hend = min(mask_H_, feature_H_ + half_mask_H_ - h); + const int wstart = max(0, half_mask_W_ - w); + const int wend = min(mask_W_, feature_W_ + half_mask_W_ - w); + // (hidx, widx ) with mask-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + mask_diff[((n * mask_H_ * mask_W_ + hidx * mask_W_ + widx) * feature_H_ + h) * feature_W_ + w] = + buffer_diff[(n * feature_H_ * feature_W_ + h * feature_W_ + w) * feature_H_ * feature_W_ + (hidx + h - half_mask_H_) * feature_W_ + (widx + w - half_mask_W_)]; + } + } + } +} + +void psamask_forward_cuda(const int psa_type, const at::Tensor& input, at::Tensor& output, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* input_data = input.data(); + float* output_data = output.data(); + if(psa_type == 0) + psamask_collect_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); + else + psamask_distribute_forward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, input_data, output_data); +} + +void psamask_backward_cuda(const int psa_type, const at::Tensor& grad_output, at::Tensor& grad_input, const int num_, const int feature_H_, const int feature_W_, const int mask_H_, const int mask_W_, const int half_mask_H_, const int half_mask_W_) +{ + int nthreads = num_ * feature_H_ * feature_W_; + const float* grad_output_data = grad_output.data(); + float* grad_input_data = grad_input.data(); + if(psa_type == 0) + psamask_collect_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); + else + psamask_distribute_backward_cuda<<>>(nthreads, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_, grad_output_data, grad_input_data); +} diff --git a/core/models/__init__.py b/core/models/__init__.py new file mode 100644 index 0000000..2a8b222 --- /dev/null +++ b/core/models/__init__.py @@ -0,0 +1,2 @@ +"""Model Zoo""" +from .model_zoo import get_model, get_model_list \ No newline at end of file diff --git a/core/models/__pycache__/__init__.cpython-37.pyc b/core/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..16faf62 Binary files /dev/null and b/core/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/models/__pycache__/__init__.cpython-38.pyc b/core/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..c7a567b Binary files /dev/null and b/core/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/models/__pycache__/bisenet.cpython-37.pyc b/core/models/__pycache__/bisenet.cpython-37.pyc new file mode 100644 index 0000000..1459701 Binary files /dev/null and b/core/models/__pycache__/bisenet.cpython-37.pyc differ diff --git a/core/models/__pycache__/bisenet.cpython-38.pyc b/core/models/__pycache__/bisenet.cpython-38.pyc new file mode 100644 index 0000000..314673f Binary files /dev/null and b/core/models/__pycache__/bisenet.cpython-38.pyc differ diff --git a/core/models/__pycache__/ccnet.cpython-37.pyc b/core/models/__pycache__/ccnet.cpython-37.pyc new file mode 100644 index 0000000..e2642ae Binary files /dev/null and b/core/models/__pycache__/ccnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/ccnet.cpython-38.pyc b/core/models/__pycache__/ccnet.cpython-38.pyc new file mode 100644 index 0000000..d2c7628 Binary files /dev/null and b/core/models/__pycache__/ccnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/cgnet.cpython-37.pyc b/core/models/__pycache__/cgnet.cpython-37.pyc new file mode 100644 index 0000000..51fc359 Binary files /dev/null and b/core/models/__pycache__/cgnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/cgnet.cpython-38.pyc b/core/models/__pycache__/cgnet.cpython-38.pyc new file mode 100644 index 0000000..1f423e8 Binary files /dev/null and b/core/models/__pycache__/cgnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/danet.cpython-37.pyc b/core/models/__pycache__/danet.cpython-37.pyc new file mode 100644 index 0000000..44e00f8 Binary files /dev/null and b/core/models/__pycache__/danet.cpython-37.pyc differ diff --git a/core/models/__pycache__/danet.cpython-38.pyc b/core/models/__pycache__/danet.cpython-38.pyc new file mode 100644 index 0000000..a115c71 Binary files /dev/null and b/core/models/__pycache__/danet.cpython-38.pyc differ diff --git a/core/models/__pycache__/deeplabv3.cpython-37.pyc b/core/models/__pycache__/deeplabv3.cpython-37.pyc new file mode 100644 index 0000000..b6c2e2f Binary files /dev/null and b/core/models/__pycache__/deeplabv3.cpython-37.pyc differ diff --git a/core/models/__pycache__/deeplabv3.cpython-38.pyc b/core/models/__pycache__/deeplabv3.cpython-38.pyc new file mode 100644 index 0000000..ffb48f2 Binary files /dev/null and b/core/models/__pycache__/deeplabv3.cpython-38.pyc differ diff --git a/core/models/__pycache__/deeplabv3_plus.cpython-37.pyc b/core/models/__pycache__/deeplabv3_plus.cpython-37.pyc new file mode 100644 index 0000000..79b2804 Binary files /dev/null and b/core/models/__pycache__/deeplabv3_plus.cpython-37.pyc differ diff --git a/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc b/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc new file mode 100644 index 0000000..e3b690d Binary files /dev/null and b/core/models/__pycache__/deeplabv3_plus.cpython-38.pyc differ diff --git a/core/models/__pycache__/denseaspp.cpython-37.pyc b/core/models/__pycache__/denseaspp.cpython-37.pyc new file mode 100644 index 0000000..a64b245 Binary files /dev/null and b/core/models/__pycache__/denseaspp.cpython-37.pyc differ diff --git a/core/models/__pycache__/denseaspp.cpython-38.pyc b/core/models/__pycache__/denseaspp.cpython-38.pyc new file mode 100644 index 0000000..8751ae7 Binary files /dev/null and b/core/models/__pycache__/denseaspp.cpython-38.pyc differ diff --git a/core/models/__pycache__/dfanet.cpython-37.pyc b/core/models/__pycache__/dfanet.cpython-37.pyc new file mode 100644 index 0000000..e19ebc7 Binary files /dev/null and b/core/models/__pycache__/dfanet.cpython-37.pyc differ diff --git a/core/models/__pycache__/dfanet.cpython-38.pyc b/core/models/__pycache__/dfanet.cpython-38.pyc new file mode 100644 index 0000000..28575db Binary files /dev/null and b/core/models/__pycache__/dfanet.cpython-38.pyc differ diff --git a/core/models/__pycache__/dunet.cpython-37.pyc b/core/models/__pycache__/dunet.cpython-37.pyc new file mode 100644 index 0000000..20fb196 Binary files /dev/null and b/core/models/__pycache__/dunet.cpython-37.pyc differ diff --git a/core/models/__pycache__/dunet.cpython-38.pyc b/core/models/__pycache__/dunet.cpython-38.pyc new file mode 100644 index 0000000..529ded0 Binary files /dev/null and b/core/models/__pycache__/dunet.cpython-38.pyc differ diff --git a/core/models/__pycache__/encnet.cpython-37.pyc b/core/models/__pycache__/encnet.cpython-37.pyc new file mode 100644 index 0000000..422539d Binary files /dev/null and b/core/models/__pycache__/encnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/encnet.cpython-38.pyc b/core/models/__pycache__/encnet.cpython-38.pyc new file mode 100644 index 0000000..525d1d3 Binary files /dev/null and b/core/models/__pycache__/encnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/enet.cpython-37.pyc b/core/models/__pycache__/enet.cpython-37.pyc new file mode 100644 index 0000000..89a9825 Binary files /dev/null and b/core/models/__pycache__/enet.cpython-37.pyc differ diff --git a/core/models/__pycache__/enet.cpython-38.pyc b/core/models/__pycache__/enet.cpython-38.pyc new file mode 100644 index 0000000..6cb84b1 Binary files /dev/null and b/core/models/__pycache__/enet.cpython-38.pyc differ diff --git a/core/models/__pycache__/espnet.cpython-37.pyc b/core/models/__pycache__/espnet.cpython-37.pyc new file mode 100644 index 0000000..4f8378c Binary files /dev/null and b/core/models/__pycache__/espnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/espnet.cpython-38.pyc b/core/models/__pycache__/espnet.cpython-38.pyc new file mode 100644 index 0000000..440f6ac Binary files /dev/null and b/core/models/__pycache__/espnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/fcn.cpython-37.pyc b/core/models/__pycache__/fcn.cpython-37.pyc new file mode 100644 index 0000000..cda56fd Binary files /dev/null and b/core/models/__pycache__/fcn.cpython-37.pyc differ diff --git a/core/models/__pycache__/fcn.cpython-38.pyc b/core/models/__pycache__/fcn.cpython-38.pyc new file mode 100644 index 0000000..8960f96 Binary files /dev/null and b/core/models/__pycache__/fcn.cpython-38.pyc differ diff --git a/core/models/__pycache__/fcnv2.cpython-37.pyc b/core/models/__pycache__/fcnv2.cpython-37.pyc new file mode 100644 index 0000000..e65a6d1 Binary files /dev/null and b/core/models/__pycache__/fcnv2.cpython-37.pyc differ diff --git a/core/models/__pycache__/fcnv2.cpython-38.pyc b/core/models/__pycache__/fcnv2.cpython-38.pyc new file mode 100644 index 0000000..171d37d Binary files /dev/null and b/core/models/__pycache__/fcnv2.cpython-38.pyc differ diff --git a/core/models/__pycache__/icnet.cpython-37.pyc b/core/models/__pycache__/icnet.cpython-37.pyc new file mode 100644 index 0000000..afa032c Binary files /dev/null and b/core/models/__pycache__/icnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/icnet.cpython-38.pyc b/core/models/__pycache__/icnet.cpython-38.pyc new file mode 100644 index 0000000..295d494 Binary files /dev/null and b/core/models/__pycache__/icnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/lednet.cpython-37.pyc b/core/models/__pycache__/lednet.cpython-37.pyc new file mode 100644 index 0000000..af1196a Binary files /dev/null and b/core/models/__pycache__/lednet.cpython-37.pyc differ diff --git a/core/models/__pycache__/lednet.cpython-38.pyc b/core/models/__pycache__/lednet.cpython-38.pyc new file mode 100644 index 0000000..7a970e1 Binary files /dev/null and b/core/models/__pycache__/lednet.cpython-38.pyc differ diff --git a/core/models/__pycache__/model_zoo.cpython-37.pyc b/core/models/__pycache__/model_zoo.cpython-37.pyc new file mode 100644 index 0000000..d085779 Binary files /dev/null and b/core/models/__pycache__/model_zoo.cpython-37.pyc differ diff --git a/core/models/__pycache__/model_zoo.cpython-38.pyc b/core/models/__pycache__/model_zoo.cpython-38.pyc new file mode 100644 index 0000000..37f7118 Binary files /dev/null and b/core/models/__pycache__/model_zoo.cpython-38.pyc differ diff --git a/core/models/__pycache__/ocnet.cpython-37.pyc b/core/models/__pycache__/ocnet.cpython-37.pyc new file mode 100644 index 0000000..c6307e9 Binary files /dev/null and b/core/models/__pycache__/ocnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/ocnet.cpython-38.pyc b/core/models/__pycache__/ocnet.cpython-38.pyc new file mode 100644 index 0000000..04e13d7 Binary files /dev/null and b/core/models/__pycache__/ocnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/psanet.cpython-37.pyc b/core/models/__pycache__/psanet.cpython-37.pyc new file mode 100644 index 0000000..0ad89dd Binary files /dev/null and b/core/models/__pycache__/psanet.cpython-37.pyc differ diff --git a/core/models/__pycache__/psanet.cpython-38.pyc b/core/models/__pycache__/psanet.cpython-38.pyc new file mode 100644 index 0000000..66a604c Binary files /dev/null and b/core/models/__pycache__/psanet.cpython-38.pyc differ diff --git a/core/models/__pycache__/pspnet.cpython-37.pyc b/core/models/__pycache__/pspnet.cpython-37.pyc new file mode 100644 index 0000000..ca977c7 Binary files /dev/null and b/core/models/__pycache__/pspnet.cpython-37.pyc differ diff --git a/core/models/__pycache__/pspnet.cpython-38.pyc b/core/models/__pycache__/pspnet.cpython-38.pyc new file mode 100644 index 0000000..63d49ac Binary files /dev/null and b/core/models/__pycache__/pspnet.cpython-38.pyc differ diff --git a/core/models/__pycache__/segbase.cpython-37.pyc b/core/models/__pycache__/segbase.cpython-37.pyc new file mode 100644 index 0000000..60f6ae5 Binary files /dev/null and b/core/models/__pycache__/segbase.cpython-37.pyc differ diff --git a/core/models/__pycache__/segbase.cpython-38.pyc b/core/models/__pycache__/segbase.cpython-38.pyc new file mode 100644 index 0000000..2a16ca8 Binary files /dev/null and b/core/models/__pycache__/segbase.cpython-38.pyc differ diff --git a/core/models/base_models/__init__.py b/core/models/base_models/__init__.py new file mode 100644 index 0000000..562aa28 --- /dev/null +++ b/core/models/base_models/__init__.py @@ -0,0 +1,6 @@ +from .densenet import * +from .resnet import * +from .resnetv1b import * +from .vgg import * +from .eespnet import * +from .xception import * diff --git a/core/models/base_models/__pycache__/__init__.cpython-37.pyc b/core/models/base_models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..806d7d6 Binary files /dev/null and b/core/models/base_models/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/__init__.cpython-38.pyc b/core/models/base_models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..8009a87 Binary files /dev/null and b/core/models/base_models/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/densenet.cpython-37.pyc b/core/models/base_models/__pycache__/densenet.cpython-37.pyc new file mode 100644 index 0000000..39155b5 Binary files /dev/null and b/core/models/base_models/__pycache__/densenet.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/densenet.cpython-38.pyc b/core/models/base_models/__pycache__/densenet.cpython-38.pyc new file mode 100644 index 0000000..d79392c Binary files /dev/null and b/core/models/base_models/__pycache__/densenet.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/eespnet.cpython-37.pyc b/core/models/base_models/__pycache__/eespnet.cpython-37.pyc new file mode 100644 index 0000000..4ed4b9d Binary files /dev/null and b/core/models/base_models/__pycache__/eespnet.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/eespnet.cpython-38.pyc b/core/models/base_models/__pycache__/eespnet.cpython-38.pyc new file mode 100644 index 0000000..99fee82 Binary files /dev/null and b/core/models/base_models/__pycache__/eespnet.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/resnet.cpython-37.pyc b/core/models/base_models/__pycache__/resnet.cpython-37.pyc new file mode 100644 index 0000000..e3fa288 Binary files /dev/null and b/core/models/base_models/__pycache__/resnet.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/resnet.cpython-38.pyc b/core/models/base_models/__pycache__/resnet.cpython-38.pyc new file mode 100644 index 0000000..222de4c Binary files /dev/null and b/core/models/base_models/__pycache__/resnet.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/resnetv1b.cpython-37.pyc b/core/models/base_models/__pycache__/resnetv1b.cpython-37.pyc new file mode 100644 index 0000000..c47abc3 Binary files /dev/null and b/core/models/base_models/__pycache__/resnetv1b.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc b/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc new file mode 100644 index 0000000..dda80b5 Binary files /dev/null and b/core/models/base_models/__pycache__/resnetv1b.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/vgg.cpython-37.pyc b/core/models/base_models/__pycache__/vgg.cpython-37.pyc new file mode 100644 index 0000000..c8d7521 Binary files /dev/null and b/core/models/base_models/__pycache__/vgg.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/vgg.cpython-38.pyc b/core/models/base_models/__pycache__/vgg.cpython-38.pyc new file mode 100644 index 0000000..ab2a128 Binary files /dev/null and b/core/models/base_models/__pycache__/vgg.cpython-38.pyc differ diff --git a/core/models/base_models/__pycache__/xception.cpython-37.pyc b/core/models/base_models/__pycache__/xception.cpython-37.pyc new file mode 100644 index 0000000..4c7b415 Binary files /dev/null and b/core/models/base_models/__pycache__/xception.cpython-37.pyc differ diff --git a/core/models/base_models/__pycache__/xception.cpython-38.pyc b/core/models/base_models/__pycache__/xception.cpython-38.pyc new file mode 100644 index 0000000..189ac53 Binary files /dev/null and b/core/models/base_models/__pycache__/xception.cpython-38.pyc differ diff --git a/core/models/base_models/densenet.py b/core/models/base_models/densenet.py new file mode 100644 index 0000000..733f21d --- /dev/null +++ b/core/models/base_models/densenet.py @@ -0,0 +1,237 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo + +from collections import OrderedDict + +__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', + 'dilated_densenet121', 'dilated_densenet161', 'dilated_densenet169', 'dilated_densenet201'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseLayer, self).__init__() + self.add_module('norm1', norm_layer(num_input_features)), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, 1, 1, bias=False)), + self.add_module('norm2', norm_layer(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, 3, 1, dilation, dilation, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, + growth_rate, drop_rate, dilation=1, norm_layer=nn.BatchNorm2d): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, + growth_rate, bn_size, drop_rate, dilation, norm_layer) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d): + super(_Transition, self).__init__() + self.add_module('norm', norm_layer(num_input_features)) + self.add_module('relu', nn.ReLU(True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 1, 1, bias=False)) + self.add_module('pool', nn.AvgPool2d(2, 2)) + + +# Net +class DenseNet(nn.Module): #这是一个全新的构建模型的方法,<先构造模块后两步传递数据:features和classifier>;另一种常见的是,<边构造边传递数据> + + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, norm_layer=nn.BatchNorm2d, **kwargs): + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, 7, 2, 3, bias=False)), + ('norm0', norm_layer(num_init_features)), + ('relu0', nn.ReLU(True)), + ('pool0', nn.MaxPool2d(3, 2, 1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers, num_features, bn_size, growth_rate, drop_rate, norm_layer=norm_layer) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_features, num_features // 2, norm_layer=norm_layer) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + self.num_features = num_features + + # Final batch norm + self.features.add_module('norm5', norm_layer(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + print('11',features.shape) + out = F.relu(features, True) + out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1) + out = self.classifier(out) + return out + + +class DilatedDenseNet(DenseNet): + def __init__(self, growth_rate=12, block_config=(6, 12, 24, 16), num_init_features=64, + bn_size=4, drop_rate=0, num_classes=1000, dilate_scale=8, norm_layer=nn.BatchNorm2d, **kwargs): + super(DilatedDenseNet, self).__init__(growth_rate, block_config, num_init_features, + bn_size, drop_rate, num_classes, norm_layer) + assert (dilate_scale == 8 or dilate_scale == 16), "dilate_scale can only set as 8 or 16" + from functools import partial + if dilate_scale == 8: # output_stride + self.features.denseblock3.apply(partial(self._conv_dilate, dilate=2))#利用partial函数给 + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=4)) + del self.features.transition2.pool + del self.features.transition3.pool + elif dilate_scale == 16: + self.features.denseblock4.apply(partial(self._conv_dilate, dilate=2)) + del self.features.transition3.pool + + def _conv_dilate(self, m, dilate): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + if m.kernel_size == (3, 3): + m.padding = (dilate, dilate) + m.dilation = (dilate, dilate) + + +# Specification +densenet_spec = {121: (64, 32, [6, 12, 24, 16]), + 161: (96, 48, [6, 12, 36, 24]), + 169: (64, 32, [6, 12, 32, 32]), + 201: (64, 32, [6, 12, 48, 32])} + + +# Constructor +def get_densenet(num_layers, pretrained=False, **kwargs): + r"""Densenet-BC model from the + `"Densely Connected Convolutional Networks" `_ paper. + + Parameters + ---------- + num_layers : int + Number of layers for the variant of densenet. Options are 121, 161, 169, 201. + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default $TORCH_HOME/models + Location for keeping the model parameters. + """ + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) #初始化(加载权重) + return model + + +def get_dilated_densenet(num_layers, dilate_scale, pretrained=False, **kwargs): + num_init_features, growth_rate, block_config = densenet_spec[num_layers] + model = DilatedDenseNet(growth_rate, block_config, num_init_features, dilate_scale=dilate_scale) + if pretrained: + # '.'s are no longer allowed in module names, but pervious _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + state_dict = model_zoo.load_url(model_urls['densenet%d' % num_layers]) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + return model + + +def densenet121(**kwargs): + return get_densenet(121, **kwargs) + + +def densenet161(**kwargs): + return get_densenet(161, **kwargs) + + +def densenet169(**kwargs): + return get_densenet(169, **kwargs) + + +def densenet201(**kwargs): + return get_densenet(201, **kwargs) + + +def dilated_densenet121(dilate_scale, **kwargs): + return get_dilated_densenet(121, dilate_scale, **kwargs) + + +def dilated_densenet161(dilate_scale, **kwargs): + return get_dilated_densenet(161, dilate_scale, **kwargs) + + +def dilated_densenet169(dilate_scale, **kwargs): + return get_dilated_densenet(169, dilate_scale, **kwargs) + + +def dilated_densenet201(dilate_scale, **kwargs): + return get_dilated_densenet(201, dilate_scale, **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 512, 512).cuda() + model = dilated_densenet121(8).cuda() + outputs = model(img) + print(outputs.shape) + from torchsummary import summary + + summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + for name, parameters in model.named_parameters(): + print(name, ':', parameters.size()) diff --git a/core/models/base_models/eespnet.py b/core/models/base_models/eespnet.py new file mode 100644 index 0000000..7d087fd --- /dev/null +++ b/core/models/base_models/eespnet.py @@ -0,0 +1,202 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _ConvBN, _BNPReLU + +__all__ = ['EESP', 'EESPNet', 'eespnet'] + + +class EESP(nn.Module): + + def __init__(self, in_channels, out_channels, stride=1, k=4, r_lim=7, down_method='esp', norm_layer=nn.BatchNorm2d): + super(EESP, self).__init__() + self.stride = stride + n = int(out_channels / k) + n1 = out_channels - (k - 1) * n + assert down_method in ['avg', 'esp'], 'One of these is suppported (avg or esp)' + assert n == n1, "n(={}) and n1(={}) should be equal for Depth-wise Convolution ".format(n, n1) + self.proj_1x1 = _ConvBNPReLU(in_channels, n, 1, stride=1, groups=k, norm_layer=norm_layer) + + map_receptive_ksize = {3: 1, 5: 2, 7: 3, 9: 4, 11: 5, 13: 6, 15: 7, 17: 8} + self.k_sizes = list() + for i in range(k): + ksize = int(3 + 2 * i) + ksize = ksize if ksize <= r_lim else 3 + self.k_sizes.append(ksize) + self.k_sizes.sort() + self.spp_dw = nn.ModuleList() + for i in range(k): + dilation = map_receptive_ksize[self.k_sizes[i]] + self.spp_dw.append(nn.Conv2d(n, n, 3, stride, dilation, dilation=dilation, groups=n, bias=False)) + self.conv_1x1_exp = _ConvBN(out_channels, out_channels, 1, 1, groups=k, norm_layer=norm_layer) + self.br_after_cat = _BNPReLU(out_channels, norm_layer) + self.module_act = nn.PReLU(out_channels) + self.downAvg = True if down_method == 'avg' else False + + def forward(self, x): + output1 = self.proj_1x1(x) + output = [self.spp_dw[0](output1)] + for k in range(1, len(self.spp_dw)): + out_k = self.spp_dw[k](output1) + out_k = out_k + output[k - 1] + output.append(out_k) + expanded = self.conv_1x1_exp(self.br_after_cat(torch.cat(output, 1))) + del output + if self.stride == 2 and self.downAvg: + return expanded + + if expanded.size() == x.size(): + expanded = expanded + x + + return self.module_act(expanded) + + +class DownSampler(nn.Module): + + def __init__(self, in_channels, out_channels, k=4, r_lim=9, reinf=True, inp_reinf=3, norm_layer=None): + super(DownSampler, self).__init__() + channels_diff = out_channels - in_channels + self.eesp = EESP(in_channels, channels_diff, stride=2, k=k, + r_lim=r_lim, down_method='avg', norm_layer=norm_layer) + self.avg = nn.AvgPool2d(kernel_size=3, padding=1, stride=2) + if reinf: + self.inp_reinf = nn.Sequential( + _ConvBNPReLU(inp_reinf, inp_reinf, 3, 1, 1), + _ConvBN(inp_reinf, out_channels, 1, 1)) + self.act = nn.PReLU(out_channels) + + def forward(self, x, x2=None): + avg_out = self.avg(x) + eesp_out = self.eesp(x) + output = torch.cat([avg_out, eesp_out], 1) + if x2 is not None: + w1 = avg_out.size(2) + while True: + x2 = F.avg_pool2d(x2, kernel_size=3, padding=1, stride=2) + w2 = x2.size(2) + if w2 == w1: + break + output = output + self.inp_reinf(x2) + + return self.act(output) + + +class EESPNet(nn.Module): + def __init__(self, num_classes=1000, scale=1, reinf=True, norm_layer=nn.BatchNorm2d): + super(EESPNet, self).__init__() + inp_reinf = 3 if reinf else None + reps = [0, 3, 7, 3] + r_lim = [13, 11, 9, 7, 5] + K = [4] * len(r_lim) + + # set out_channels + base, levels, base_s = 32, 5, 0 + out_channels = [base] * levels + for i in range(levels): + if i == 0: + base_s = int(base * scale) + base_s = math.ceil(base_s / K[0]) * K[0] + out_channels[i] = base if base_s > base else base_s + else: + out_channels[i] = base_s * pow(2, i) + if scale <= 1.5: + out_channels.append(1024) + elif scale in [1.5, 2]: + out_channels.append(1280) + else: + raise ValueError("Unknown scale value.") + + self.level1 = _ConvBNPReLU(3, out_channels[0], 3, 2, 1, norm_layer=norm_layer) + + self.level2_0 = DownSampler(out_channels[0], out_channels[1], k=K[0], r_lim=r_lim[0], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + + self.level3_0 = DownSampler(out_channels[1], out_channels[2], k=K[1], r_lim=r_lim[1], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level3 = nn.ModuleList() + for i in range(reps[1]): + self.level3.append(EESP(out_channels[2], out_channels[2], k=K[2], r_lim=r_lim[2], + norm_layer=norm_layer)) + + self.level4_0 = DownSampler(out_channels[2], out_channels[3], k=K[2], r_lim=r_lim[2], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level4 = nn.ModuleList() + for i in range(reps[2]): + self.level4.append(EESP(out_channels[3], out_channels[3], k=K[3], r_lim=r_lim[3], + norm_layer=norm_layer)) + + self.level5_0 = DownSampler(out_channels[3], out_channels[4], k=K[3], r_lim=r_lim[3], + reinf=reinf, inp_reinf=inp_reinf, norm_layer=norm_layer) + self.level5 = nn.ModuleList() + for i in range(reps[2]): + self.level5.append(EESP(out_channels[4], out_channels[4], k=K[4], r_lim=r_lim[4], + norm_layer=norm_layer)) + + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[4], 3, 1, 1, + groups=out_channels[4], norm_layer=norm_layer)) + self.level5.append(_ConvBNPReLU(out_channels[4], out_channels[5], 1, 1, 0, + groups=K[4], norm_layer=norm_layer)) + + self.fc = nn.Linear(out_channels[5], num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, std=0.001) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, seg=True): + out_l1 = self.level1(x) + + out_l2 = self.level2_0(out_l1, x) + + out_l3_0 = self.level3_0(out_l2, x) + for i, layer in enumerate(self.level3): + if i == 0: + out_l3 = layer(out_l3_0) + else: + out_l3 = layer(out_l3) + + out_l4_0 = self.level4_0(out_l3, x) + for i, layer in enumerate(self.level4): + if i == 0: + out_l4 = layer(out_l4_0) + else: + out_l4 = layer(out_l4) + + if not seg: + out_l5_0 = self.level5_0(out_l4) # down-sampled + for i, layer in enumerate(self.level5): + if i == 0: + out_l5 = layer(out_l5_0) + else: + out_l5 = layer(out_l5) + + output_g = F.adaptive_avg_pool2d(out_l5, output_size=1) + output_g = F.dropout(output_g, p=0.2, training=self.training) + output_1x1 = output_g.view(output_g.size(0), -1) + + return self.fc(output_1x1) + return out_l1, out_l2, out_l3, out_l4 + + +def eespnet(pretrained=False, **kwargs): + model = EESPNet(**kwargs) + if pretrained: + raise ValueError("Don't support pretrained") + return model + + +if __name__ == '__main__': + img = torch.randn(1, 3, 224, 224) + model = eespnet() + out = model(img) diff --git a/core/models/base_models/hrnet.py b/core/models/base_models/hrnet.py new file mode 100644 index 0000000..775b809 --- /dev/null +++ b/core/models/base_models/hrnet.py @@ -0,0 +1,371 @@ +import torch +import torch.nn as nn + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, padding=1, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, 1, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class HighResolutionModule(nn.Module): + def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, + fuse_method, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + super(HighResolutionModule, self).__init__() + assert num_branches == len(num_blocks) + assert num_branches == len(num_channels) + assert num_branches == len(num_inchannels) + + self.num_inchannels = num_inchannels + self.fuse_method = fuse_method + self.num_branches = num_branches + self.multi_scale_output = multi_scale_output + + self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels, norm_layer=norm_layer) + self.fuse_layers = self._make_fuse_layers(norm_layer) + self.relu = nn.ReLU(True) + + def _make_one_branch(self, branch_index, block, num_blocks, num_channels, + stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.num_inchannels[branch_index], num_channels[branch_index] * block.expansion, + 1, stride, bias=False), + norm_layer(num_channels[branch_index] * block.expansion)) + + layers = list() + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], + stride, downsample, norm_layer=norm_layer)) + self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion + for i in range(1, num_blocks[branch_index]): + layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_branches(self, num_branches, block, num_blocks, num_channels, norm_layer=nn.BatchNorm2d): + branches = list() + for i in range(num_branches): + branches.append( + self._make_one_branch(i, block, num_blocks, num_channels, norm_layer=norm_layer)) + + return nn.ModuleList(branches) + + def _make_fuse_layers(self, norm_layer=nn.BatchNorm2d): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + num_inchannels = self.num_inchannels + fuse_layers = [] + for i in range(num_branches if self.multi_scale_output else 1): + fuse_layer = list() + for j in range(num_branches): + if j > i: + fuse_layer.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, bias=False), + norm_layer(num_inchannels[i]), + nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) + elif j == i: + fuse_layer.append(None) + else: + conv3x3s = list() + for k in range(i - j): + if k == i - j - 1: + num_outchannels_conv3x3 = num_inchannels[i] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3))) + else: + num_outchannels_conv3x3 = num_inchannels[j] + conv3x3s.append(nn.Sequential( + nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), + norm_layer(num_outchannels_conv3x3), + nn.ReLU(False))) + fuse_layer.append(nn.Sequential(*conv3x3s)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def get_num_inchannels(self): + return self.num_inchannels + + def forward(self, x): + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = list() + for i in range(len(self.fuse_layers)): + y = x[0] if i == 0 else self.fuse_layers[i][0](x[0]) + for j in range(1, self.num_branches): + if i == j: + y = y + x[j] + else: + y = y + self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + + return x_fuse + + +class HighResolutionNet(nn.Module): + def __init__(self, blocks, num_channels, num_modules, num_branches, num_blocks, + fuse_method, norm_layer=nn.BatchNorm2d, **kwargs): + super(HighResolutionNet, self).__init__() + self.num_branches = num_branches + + # deep stem + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True)) + + self.layer1 = self._make_layer(Bottleneck, 64, 64, 4, norm_layer=norm_layer) + + # stage 2 + num_channel, block = num_channels[0], blocks[0] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer([256], channels, norm_layer) + self.stage2, pre_stage_channels = self._make_stage(num_modules[0], num_branches[0], + num_blocks[0], channels, block, + fuse_method[0], channels, + norm_layer=norm_layer) + + # stage 3 + num_channel, block = num_channels[1], blocks[1] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage3, pre_stage_channels = self._make_stage(num_modules[1], num_branches[1], + num_blocks[1], channels, block, + fuse_method[1], channels, + norm_layer=norm_layer) + + # stage 4 + num_channel, block = num_channels[2], blocks[2] + channels = [channel * block.expansion for channel in num_channel] + self.transition1 = self._make_transition_layer(pre_stage_channels, channels, norm_layer) + self.stage4, pre_stage_channels = self._make_stage(num_modules[2], num_branches[2], + num_blocks[2], channels, block, + fuse_method[2], channels, + norm_layer=norm_layer) + + self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(pre_stage_channels, norm_layer) + + self.classifier = nn.Linear(2048, 1000) + + def _make_layer(self, block, inplanes, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion)) + + layers = list() + layers.append(block(inplanes, planes, stride, downsample=downsample, norm_layer=norm_layer)) + inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(inplanes, planes, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer, norm_layer=nn.BatchNorm2d): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = list() + for i in range(num_branches_cur): + if i < num_branches_pre: + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append(nn.Sequential( + nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, padding=1, bias=False), + norm_layer(num_channels_cur_layer[i]), + nn.ReLU(True))) + else: + transition_layers.append(None) + else: + conv3x3s = list() + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] if j == i - num_branches_pre else in_channels + conv3x3s.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True))) + transition_layers.append(nn.Sequential(*conv3x3s)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, num_modules, num_branches, num_blocks, num_channels, block, + fuse_method, num_inchannels, multi_scale_output=True, norm_layer=nn.BatchNorm2d): + modules = list() + for i in range(num_modules): + # multi_scale_output is only used last module + if not multi_scale_output and i == num_modules - 1: + reset_multi_scale_output = False + else: + reset_multi_scale_output = True + + modules.append(HighResolutionModule(num_branches, block, num_blocks, num_inchannels, num_channels, + fuse_method, reset_multi_scale_output, norm_layer=norm_layer)) + num_inchannels = modules[-1].get_num_inchannels() + + return nn.Sequential(*modules), num_inchannels + + def _make_head(self, pre_stage_channels, norm_layer=nn.BatchNorm2d): + head_block = Bottleneck + head_channels = [32, 64, 128, 256] + + # Increasing the #channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + incre_modules = list() + for i, channels in enumerate(pre_stage_channels): + incre_module = self._make_layer(head_block, channels, head_channels[i], 1) + incre_modules.append(incre_module) + incre_modules = nn.ModuleList(incre_modules) + + # downsampling modules + downsamp_modules = [] + for i in range(len(pre_stage_channels) - 1): + in_channels = head_channels[i] * head_block.expansion + out_channels = head_channels[i + 1] * head_block.expansion + + downsamp_module = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, 2, 1), + norm_layer(out_channels), + nn.ReLU(True)) + + downsamp_modules.append(downsamp_module) + downsamp_modules = nn.ModuleList(downsamp_modules) + + final_layer = nn.Sequential( + nn.Conv2d(head_channels[3] * head_block.expansion, 2048, 1), + norm_layer(2048), + nn.ReLU(True)) + + return incre_modules, downsamp_modules, final_layer + + def forward(self, x): + x = self.conv1(x) + x = self.layer1(x) + + x_list = list() + for i in range(self.num_branches[0]): + if self.transition1[i] is not None: + tmp = self.transition1[i](x) + print(tmp.size()) + x_list.append(self.transition1[i](x)) + else: + x_list.append(x) + y_list = self.stage2(x_list) + + x_list = [] + for i in range(self.num_branches[1]): + if self.transition2[i] is not None: + x_list.append(self.transition2[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage3(x_list) + + x_list = [] + for i in range(self.num_branches[2]): + if self.transition3[i] is not None: + x_list.append(self.transition3[i](y_list[-1])) + else: + x_list.append(y_list[i]) + y_list = self.stage4(x_list) + + # Classification Head + y = self.incre_modules[0](y_list[0]) + for i in range(len(self.downsamp_modules)): + y = self.incre_modules[i + 1](y_list[i + 1]) + self.downsamp_modules[i](y) + + y = self.final_layer(y) + + y = F.avg_pool2d(y, kernel_size=y.size() + [2:]).view(y.size(0), -1) + + y = self.classifier(y) + + return y + + +blocks = [BasicBlock, BasicBlock, BasicBlock] +num_modules = [1, 1, 1] +num_branches = [2, 3, 4] +num_blocks = [[4, 4], [4, 4, 4], [4, 4, 4, 4]] +num_channels = [[256, 256], [32, 64, 128], [32, 64, 128, 256]] +fuse_method = ['sum', 'sum', 'sum'] + +if __name__ == '__main__': + img = torch.randn(1, 3, 256, 256) + model = HighResolutionNet(blocks, num_channels, num_modules, num_branches, num_blocks, fuse_method) + output = model(img) diff --git a/core/models/base_models/mobilenetv2.py b/core/models/base_models/mobilenetv2.py new file mode 100644 index 0000000..4e4c093 --- /dev/null +++ b/core/models/base_models/mobilenetv2.py @@ -0,0 +1,158 @@ +"""MobileNet and MobileNetV2.""" +import torch +import torch.nn as nn + +from core.nn import _ConvBNReLU, _DepthwiseConv, InvertedResidual + +__all__ = ['MobileNet', 'MobileNetV2', 'get_mobilenet', 'get_mobilenet_v2', + 'mobilenet1_0', 'mobilenet_v2_1_0', 'mobilenet0_75', 'mobilenet_v2_0_75', + 'mobilenet0_5', 'mobilenet_v2_0_5', 'mobilenet0_25', 'mobilenet_v2_0_25'] + + +class MobileNet(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNet, self).__init__() + conv_dw_setting = [ + [64, 1, 1], + [128, 2, 2], + [256, 2, 2], + [512, 6, 2], + [1024, 2, 2]] + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, norm_layer=norm_layer)] + + for c, n, s in conv_dw_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(_DepthwiseConv(input_channels, out_channels, stride, norm_layer)) + input_channels = out_channels + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Linear(int(1024 * multiplier), num_classes) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +class MobileNetV2(nn.Module): + def __init__(self, num_classes=1000, multiplier=1.0, norm_layer=nn.BatchNorm2d, **kwargs): + super(MobileNetV2, self).__init__() + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1]] + # building first layer + input_channels = int(32 * multiplier) if multiplier > 1.0 else 32 + last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280 + features = [_ConvBNReLU(3, input_channels, 3, 2, 1, relu6=True, norm_layer=norm_layer)] + + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + out_channels = int(c * multiplier) + for i in range(n): + stride = s if i == 0 else 1 + features.append(InvertedResidual(input_channels, out_channels, stride, t, norm_layer)) + input_channels = out_channels + + # building last several layers + features.append(_ConvBNReLU(input_channels, last_channels, 1, relu6=True, norm_layer=norm_layer)) + features.append(nn.AdaptiveAvgPool2d(1)) + self.features = nn.Sequential(*features) + + self.classifier = nn.Sequential( + nn.Dropout2d(0.2), + nn.Linear(last_channels, num_classes)) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + if m.bias is not None: + nn.init.zeros_(m.bias) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0), x.size(1))) + return x + + +# Constructor +def get_mobilenet(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNet(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def get_mobilenet_v2(multiplier=1.0, pretrained=False, root='~/.torch/models', **kwargs): + model = MobileNetV2(multiplier=multiplier, **kwargs) + + if pretrained: + raise ValueError("Not support pretrained") + return model + + +def mobilenet1_0(**kwargs): + return get_mobilenet(1.0, **kwargs) + + +def mobilenet_v2_1_0(**kwargs): + return get_mobilenet_v2(1.0, **kwargs) + + +def mobilenet0_75(**kwargs): + return get_mobilenet(0.75, **kwargs) + + +def mobilenet_v2_0_75(**kwargs): + return get_mobilenet_v2(0.75, **kwargs) + + +def mobilenet0_5(**kwargs): + return get_mobilenet(0.5, **kwargs) + + +def mobilenet_v2_0_5(**kwargs): + return get_mobilenet_v2(0.5, **kwargs) + + +def mobilenet0_25(**kwargs): + return get_mobilenet(0.25, **kwargs) + + +def mobilenet_v2_0_25(**kwargs): + return get_mobilenet_v2(0.25, **kwargs) + + +if __name__ == '__main__': + model = mobilenet0_5() diff --git a/core/models/base_models/resnet.py b/core/models/base_models/resnet.py new file mode 100644 index 0000000..d1c5fe2 --- /dev/null +++ b/core/models/base_models/resnet.py @@ -0,0 +1,233 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): # ResNet的网络深度有18,34,50,101,152。50层以下的网络基础块是BasicBlock,50层及以上的网络基础块是BottleNeck。 + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(BasicBlock, self).__init__() + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x # identity代表残差 + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=nn.BatchNorm2d): + super(Bottleneck, self).__init__() + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = norm_layer(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = norm_layer(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=nn.BatchNorm2d): + + super(ResNet, self).__init__() + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) # 原始 + # model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='../../../runs/segmentation/BiSeNet_test/experiment_0')) # 改动 + return model + + +def resnet34(pretrained=False, **kwargs): + """Constructs a ResNet-34 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) + return model + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) + return model + + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + return model + + +def resnet152(pretrained=False, **kwargs): + """Constructs a ResNet-152 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) + return model + + +if __name__ == '__main__': + import torch + + img = torch.randn(4, 3, 224, 224) # 原始 + # img = torch.randn(4, 3, 512, 512) # 改动 + + model = resnet50(True) # 原始 + # model = resnet18(True) # 改动 + + output = model(img) diff --git a/core/models/base_models/resnetv1b.py b/core/models/base_models/resnetv1b.py new file mode 100644 index 0000000..21d67b7 --- /dev/null +++ b/core/models/base_models/resnetv1b.py @@ -0,0 +1,264 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNetV1b', 'resnet18_v1b', 'resnet34_v1b', 'resnet50_v1b', + 'resnet101_v1b', 'resnet152_v1b', 'resnet152_v1s', 'resnet101_v1s', 'resnet50_v1s'] + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +class BasicBlockV1b(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BasicBlockV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(True) + self.conv2 = nn.Conv2d(planes, planes, 3, 1, previous_dilation, + dilation=previous_dilation, bias=False) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class BottleneckV1b(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, + previous_dilation=1, norm_layer=nn.BatchNorm2d): + super(BottleneckV1b, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = norm_layer(planes) + self.conv2 = nn.Conv2d(planes, planes, 3, stride, + dilation, dilation, bias=False) + self.bn2 = norm_layer(planes) + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNetV1b(nn.Module): + + def __init__(self, block, layers, num_classes=1000, dilated=True, deep_stem=False, + zero_init_residual=False, norm_layer=nn.BatchNorm2d): + self.inplanes = 128 if deep_stem else 64 + super(ResNetV1b, self).__init__() + if deep_stem: + self.conv1 = nn.Sequential( + nn.Conv2d(3, 64, 3, 2, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 64, 3, 1, 1, bias=False), + norm_layer(64), + nn.ReLU(True), + nn.Conv2d(64, 128, 3, 1, 1, bias=False) + ) + else: + self.conv1 = nn.Conv2d(3, 64, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, BottleneckV1b): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlockV1b): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion), + ) + + layers = [] + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, dilation=1, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, dilation=2, downsample=downsample, + previous_dilation=dilation, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, dilation=dilation, + previous_dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet18_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [2, 2, 2, 2], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet18']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet34_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BasicBlockV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet34']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet50']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet101_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet101']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet152_v1b(pretrained=False, **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], **kwargs) + if pretrained: + old_dict = model_zoo.load_url(model_urls['resnet152']) + model_dict = model.state_dict() + old_dict = {k: v for k, v in old_dict.items() if (k in model_dict)} + model_dict.update(old_dict) + model.load_state_dict(model_dict) + return model + + +def resnet50_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet50', root=root)), strict=False) + return model + + +def resnet101_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 4, 23, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet101', root=root)), strict=False) + return model + + +def resnet152_v1s(pretrained=False, root='~/.torch/models', **kwargs): + model = ResNetV1b(BottleneckV1b, [3, 8, 36, 3], deep_stem=True, **kwargs) + if pretrained: + from ..model_store import get_resnet_file + model.load_state_dict(torch.load(get_resnet_file('resnet152', root=root)), strict=False) + return model + + +if __name__ == '__main__': + import torch + + img = torch.randn(4, 3, 224, 224) + model = resnet50_v1b(True) + output = model(img) diff --git a/core/models/base_models/resnext.py b/core/models/base_models/resnext.py new file mode 100644 index 0000000..8daf287 --- /dev/null +++ b/core/models/base_models/resnext.py @@ -0,0 +1,154 @@ +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = ['ResNext', 'resnext50_32x4d', 'resnext101_32x8d'] + +model_urls = { + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', +} + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None, **kwargs): + super(Bottleneck, self).__init__() + width = int(planes * (base_width / 64.)) * groups + + self.conv1 = nn.Conv2d(inplanes, width, 1, bias=False) + self.bn1 = norm_layer(width) + self.conv2 = nn.Conv2d(width, width, 3, stride, dilation, dilation, groups, bias=False) + self.bn2 = norm_layer(width) + self.conv3 = nn.Conv2d(width, planes * self.expansion, 1, bias=False) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNext(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, + width_per_group=64, dilated=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(ResNext, self).__init__() + self.inplanes = 64 + self.groups = groups + self.base_width = width_per_group + + self.conv1 = nn.Conv2d(3, self.inplanes, 7, 2, 3, bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(True) + self.maxpool = nn.MaxPool2d(3, 2, 1) + + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + if dilated: + self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, norm_layer=norm_layer) + else: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=nn.BatchNorm2d): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, 1, stride, bias=False), + norm_layer(planes * block.expansion) + ) + + layers = list() + if dilation in (1, 2): + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, norm_layer=norm_layer)) + elif dilation == 4: + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, dilation=2, norm_layer=norm_layer)) + else: + raise RuntimeError("=> unknown dilation size: {}".format(dilation)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, + dilation=dilation, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnext50_32x4d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + model = ResNext(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext50_32x4d']) + model.load_state_dict(state_dict) + return model + + +def resnext101_32x8d(pretrained=False, **kwargs): + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + model = ResNext(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + state_dict = model_zoo.load_url(model_urls['resnext101_32x8d']) + model.load_state_dict(state_dict) + return model + + +if __name__ == '__main__': + model = resnext101_32x8d() diff --git a/core/models/base_models/vgg.py b/core/models/base_models/vgg.py new file mode 100644 index 0000000..fe5c163 --- /dev/null +++ b/core/models/base_models/vgg.py @@ -0,0 +1,191 @@ +import torch +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + + +class VGG(nn.Module): + def __init__(self, features, num_classes=1000, init_weights=True): + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes) + ) + if init_weights: + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg, batch_norm=False): + layers = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += (conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)) + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfg = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def vgg11(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11'])) + return model + + +def vgg11_bn(pretrained=False, **kwargs): + """VGG 11-layer model (configuration "A") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn'])) + return model + + +def vgg13(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13'])) + return model + + +def vgg13_bn(pretrained=False, **kwargs): + """VGG 13-layer model (configuration "B") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn'])) + return model + + +def vgg16(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16'])) + return model + + +def vgg16_bn(pretrained=False, **kwargs): + """VGG 16-layer model (configuration "D") with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn'])) + return model + + +def vgg19(pretrained=False, **kwargs): + """VGG 19-layer model (configuration "E") + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E']), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19'])) + return model + + +def vgg19_bn(pretrained=False, **kwargs): + """VGG 19-layer model (configuration 'E') with batch normalization + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn'])) + return model + + +if __name__ == '__main__': + img = torch.randn((4, 3, 480, 480)) + model = vgg16(pretrained=False) + out = model(img) diff --git a/core/models/base_models/xception.py b/core/models/base_models/xception.py new file mode 100644 index 0000000..51832f1 --- /dev/null +++ b/core/models/base_models/xception.py @@ -0,0 +1,411 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['Enc', 'FCAttention', 'Xception65', 'Xception71', 'get_xception', 'get_xception_71', 'get_xception_a'] + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None): + super(SeparableConv2d, self).__init__() + self.kernel_size = kernel_size + self.dilation = dilation + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, 0, dilation, groups=in_channels, + bias=bias) + self.bn = norm_layer(in_channels) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=bias) + + def forward(self, x): + x = self.fix_padding(x, self.kernel_size, self.dilation) + x = self.conv1(x) + x = self.bn(x) + x = self.pointwise(x) + + return x + + def fix_padding(self, x, kernel_size, dilation): + kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + padded_inputs = F.pad(x, (pad_beg, pad_end, pad_beg, pad_end)) + return padded_inputs + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, stride=1, dilation=1, norm_layer=None, + start_with_relu=True, grow_first=True, is_last=False): + super(Block, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(True) + rep = list() + filters = in_channels + if grow_first: + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + filters = out_channels + for i in range(reps - 1): + if grow_first or start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(filters, filters, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(filters)) + if not grow_first: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + elif is_last: + rep.append(self.relu) + rep.append(SeparableConv2d(out_channels, out_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Xception65(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception65, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True) + self.block3 = Block(256, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class Xception71(nn.Module): + """Modified Aligned Xception + """ + + def __init__(self, num_classes=1000, output_stride=32, norm_layer=nn.BatchNorm2d): + super(Xception71, self).__init__() + if output_stride == 32: + entry_block3_stride = 2 + exit_block20_stride = 2 + middle_block_dilation = 1 + exit_block_dilations = (1, 1) + elif output_stride == 16: + entry_block3_stride = 2 + exit_block20_stride = 1 + middle_block_dilation = 1 + exit_block_dilations = (1, 2) + elif output_stride == 8: + entry_block3_stride = 1 + exit_block20_stride = 1 + middle_block_dilation = 2 + exit_block_dilations = (2, 4) + else: + raise NotImplementedError + # Entry flow + self.conv1 = nn.Conv2d(3, 32, 3, 2, 1, bias=False) + self.bn1 = norm_layer(32) + self.relu = nn.ReLU(True) + + self.conv2 = nn.Conv2d(32, 64, 3, 1, 1, bias=False) + self.bn2 = norm_layer(64) + + self.block1 = Block(64, 128, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False) + self.block2 = nn.Sequential( + Block(128, 256, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True), + Block(256, 728, reps=2, stride=2, norm_layer=norm_layer, start_with_relu=False, grow_first=True)) + self.block3 = Block(728, 728, reps=2, stride=entry_block3_stride, norm_layer=norm_layer, + start_with_relu=True, grow_first=True, is_last=True) + + # Middle flow + midflow = list() + for i in range(4, 20): + midflow.append(Block(728, 728, reps=3, stride=1, dilation=middle_block_dilation, norm_layer=norm_layer, + start_with_relu=True, grow_first=True)) + self.midflow = nn.Sequential(*midflow) + + # Exit flow + self.block20 = Block(728, 1024, reps=2, stride=exit_block20_stride, dilation=exit_block_dilations[0], + norm_layer=norm_layer, start_with_relu=True, grow_first=False, is_last=True) + self.conv3 = SeparableConv2d(1024, 1536, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn3 = norm_layer(1536) + self.conv4 = SeparableConv2d(1536, 1536, 3, stride=1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn4 = norm_layer(1536) + self.conv5 = SeparableConv2d(1536, 2048, 3, 1, dilation=exit_block_dilations[1], norm_layer=norm_layer) + self.bn5 = norm_layer(2048) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(2048, num_classes) + + def forward(self, x): + # Entry flow + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + x = self.conv2(x) + x = self.bn2(x) + x = self.relu(x) + + x = self.block1(x) + x = self.relu(x) + # c1 = x + x = self.block2(x) + # c2 = x + x = self.block3(x) + + # Middle flow + x = self.midflow(x) + # c3 = x + + # Exit flow + x = self.block20(x) + x = self.relu(x) + x = self.conv3(x) + x = self.bn3(x) + x = self.relu(x) + + x = self.conv4(x) + x = self.bn4(x) + x = self.relu(x) + + x = self.conv5(x) + x = self.bn5(x) + x = self.relu(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# ------------------------------------------------- +# For DFANet +# ------------------------------------------------- +class BlockA(nn.Module): + def __init__(self, in_channels, out_channels, stride=1, dilation=1, norm_layer=None, start_with_relu=True): + super(BlockA, self).__init__() + if out_channels != in_channels or stride != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride, bias=False) + self.skipbn = norm_layer(out_channels) + else: + self.skip = None + self.relu = nn.ReLU(False) + rep = list() + inter_channels = out_channels // 4 + + if start_with_relu: + rep.append(self.relu) + rep.append(SeparableConv2d(in_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, inter_channels, 3, 1, dilation, norm_layer=norm_layer)) + rep.append(norm_layer(inter_channels)) + + if stride != 1: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, stride, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + else: + rep.append(self.relu) + rep.append(SeparableConv2d(inter_channels, out_channels, 3, 1, norm_layer=norm_layer)) + rep.append(norm_layer(out_channels)) + self.rep = nn.Sequential(*rep) + + def forward(self, x): + out = self.rep(x) + if self.skip is not None: + skip = self.skipbn(self.skip(x)) + else: + skip = x + out = out + skip + return out + + +class Enc(nn.Module): + def __init__(self, in_channels, out_channels, blocks, norm_layer=nn.BatchNorm2d): + super(Enc, self).__init__() + block = list() + block.append(BlockA(in_channels, out_channels, 2, norm_layer=norm_layer)) + for i in range(blocks - 1): + block.append(BlockA(out_channels, out_channels, 1, norm_layer=norm_layer)) + self.block = nn.Sequential(*block) + + def forward(self, x): + return self.block(x) + + +class FCAttention(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d): + super(FCAttention, self).__init__() + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(in_channels, 1000) + self.conv = nn.Sequential( + nn.Conv2d(1000, in_channels, 1, bias=False), + norm_layer(in_channels), + nn.ReLU(False)) + + def forward(self, x): + n, c, _, _ = x.size() + att = self.avgpool(x).view(n, c) + att = self.fc(att).view(n, 1000, 1, 1) + att = self.conv(att) + return x * att.expand_as(x) + + +class XceptionA(nn.Module): + def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d): + super(XceptionA, self).__init__() + self.conv1 = nn.Sequential(nn.Conv2d(3, 8, 3, 2, 1, bias=False), + norm_layer(8), + nn.ReLU(True)) + + self.enc2 = Enc(8, 48, 4, norm_layer=norm_layer) + self.enc3 = Enc(48, 96, 6, norm_layer=norm_layer) + self.enc4 = Enc(96, 192, 4, norm_layer=norm_layer) + + self.fca = FCAttention(192, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(192, num_classes) + + def forward(self, x): + x = self.conv1(x) + + x = self.enc2(x) + x = self.enc3(x) + x = self.enc4(x) + x = self.fca(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +# Constructor +def get_xception(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception65(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception', root=root))) + return model + + +def get_xception_71(pretrained=False, root='~/.torch/models', **kwargs): + model = Xception71(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception71', root=root))) + return model + + +def get_xception_a(pretrained=False, root='~/.torch/models', **kwargs): + model = XceptionA(**kwargs) + if pretrained: + from ..model_store import get_model_file + model.load_state_dict(torch.load(get_model_file('xception_a', root=root))) + return model + + +if __name__ == '__main__': + model = get_xception_a() diff --git a/core/models/bisenet-ziji.py b/core/models/bisenet-ziji.py new file mode 100644 index 0000000..9c0f117 --- /dev/null +++ b/core/models/bisenet-ziji.py @@ -0,0 +1,251 @@ +"""Bilateral Segmentation Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models.resnet import resnet18, resnet50 +from core.nn import _ConvBNReLU + +__all__ = ['BiSeNet', 'get_bisenet', 'get_bisenet_resnet18_citys'] + + +class BiSeNet(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + self.head = _BiSeHead(256, 64, nclass, **kwargs) + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + x = self.head(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) # x是输入;size是输出大小;mode是可使用的上采样算法; + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + # return tuple(outputs) + return outputs[0] + + +class _BiSeHead(nn.Module): + def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BiSeHead, self).__init__() + self.block = nn.Sequential( + _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + x = self.block(x) + return x + + +class SpatialPath(nn.Module): + """Spatial path""" + + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(SpatialPath, self).__init__() + inter_channels = 64 + self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer) + self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv7x7(x) + x = self.conv3x3_1(x) + x = self.conv3x3_2(x) + x = self.conv1x1(x) + + return x + + +class _GlobalAvgPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_GlobalAvgPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), # AdaptiveAvgPool2d(output_size); output_size-形式为 H x W 的图像的目标输出大小。对于方形图像 H x H,可以是元组 (H, W) 或单个 H; H 和 W 可以是 int 或 None这意味着大小将与输入的大小相同。 + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class AttentionRefinmentModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(AttentionRefinmentModule, self).__init__() + self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x): + x = self.conv3x3(x) + attention = self.channel_attention(x) + x = x * attention + return x + + +class ContextPath(nn.Module): + def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + + super(ContextPath, self).__init__() + if backbone == 'resnet18': + pretrained = resnet18(pretrained=pretrained_base, **kwargs) + elif backbone == 'resnet50': + pretrained = resnet50(pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + + self.conv1 = pretrained.conv1 + self.bn1 = pretrained.bn1 + self.relu = pretrained.relu + self.maxpool = pretrained.maxpool + self.layer1 = pretrained.layer1 + self.layer2 = pretrained.layer2 + self.layer3 = pretrained.layer3 + self.layer4 = pretrained.layer4 + + inter_channels = 128 + self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer) + + self.arms = nn.ModuleList( + [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs), + AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)] + ) + self.refines = nn.ModuleList( + [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)] + ) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + + context_blocks = [] + context_blocks.append(x) + x = self.layer2(x) + context_blocks.append(x) + c3 = self.layer3(x) + context_blocks.append(c3) + c4 = self.layer4(c3) + context_blocks.append(c4) + context_blocks.reverse() + + global_context = self.global_context(c4) + last_feature = global_context + context_outputs = [] + for i, (feature, arm, refine) in enumerate(zip(context_blocks[:2], self.arms, self.refines)): + feature = arm(feature) + feature += last_feature + last_feature = F.interpolate(feature, size=context_blocks[i + 1].size()[2:], + mode='bilinear', align_corners=True) + last_feature = refine(last_feature) + context_outputs.append(last_feature) + + return context_outputs + + +class FeatureFusion(nn.Module): + def __init__(self, in_channels, out_channels, reduction=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFusion, self).__init__() + self.conv1x1 = _ConvBNReLU(in_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels // reduction, 1, 1, 0, norm_layer=norm_layer), + _ConvBNReLU(out_channels // reduction, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x1, x2): + fusion = torch.cat([x1, x2], dim=1) + out = self.conv1x1(fusion) + attention = self.channel_attention(out) + out = out + out * attention + return out + + +def get_bisenet(dataset='citys', backbone='resnet18', pretrained=True, root='~/.torch/models', # 原始 + pretrained_base=True, **kwargs): +# def get_bisenet(dataset='segmentation', backbone='resnet18', pretrained=True, root='~/.torch/models', # 改动 +# pretrained_base=True, **kwargs): + + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + # from ..data.dataloader import datasets + from ..data.dataloader import datasets + model = BiSeNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('bisenet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_bisenet_resnet18_citys(**kwargs): + + return get_bisenet('citys', 'resnet18', **kwargs) # 原始 + # return get_bisenet('segmentation', 'resnet18', **kwargs) # 改动 + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 224, 224) + # model = BiSeNet(19, backbone='resnet18') + # print(model.exclusive) + + input = torch.rand(2, 3, 224, 224) + + model = BiSeNet(4, pretrained_base=True) + + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile # 统计模型的FLOPs和参数量 + + # from torchsummary import summary # 原始 + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/core/models/bisenet.py b/core/models/bisenet.py new file mode 100644 index 0000000..09d335d --- /dev/null +++ b/core/models/bisenet.py @@ -0,0 +1,298 @@ +"""Bilateral Segmentation Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from core.models.base_models.resnet import resnet18,resnet50 +from core.nn import _ConvBNReLU + +__all__ = ['BiSeNet', 'get_bisenet', 'get_bisenet_resnet18_citys'] + + +class BiSeNet(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + self.head = _BiSeHead(256, 64, nclass, **kwargs) + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + x = self.head(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + + + if outsize: + print('######using torch resize#######',outsize) + x = F.interpolate(x, outsize, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + #return tuple(outputs) + return outputs[0] +class BiSeNet_MultiOutput(nn.Module): + def __init__(self, nclass, backbone='resnet18', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(BiSeNet_MultiOutput, self).__init__() + self.aux = aux + self.spatial_path = SpatialPath(3, 128, **kwargs) + self.context_path = ContextPath(backbone, pretrained_base, **kwargs) + self.ffm = FeatureFusion(256, 256, 4, **kwargs) + assert isinstance(nclass,list) + self.outCnt = len(nclass) + for ii,nclassii in enumerate(nclass): + setattr(self,'head%d'%(ii) , _BiSeHead(256, 64, nclassii, **kwargs)) + + if aux: + self.auxlayer1 = _BiSeHead(128, 256, nclass, **kwargs) + self.auxlayer2 = _BiSeHead(128, 256, nclass, **kwargs) + + self.__setattr__('exclusive', + ['spatial_path', 'context_path', 'ffm', 'head', 'auxlayer1', 'auxlayer2'] if aux else [ + 'spatial_path', 'context_path', 'ffm', 'head']) + + def forward(self, x,outsize=None,test_flag=False,smooth_kernel=0): + size = x.size()[2:] + spatial_out = self.spatial_path(x) + context_out = self.context_path(x) + fusion_out = self.ffm(spatial_out, context_out[-1]) + outputs = [] + for ii in range(self.outCnt): + x = getattr(self,'head%d'%(ii))(fusion_out) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout1 = self.auxlayer1(context_out[0]) + auxout1 = F.interpolate(auxout1, size, mode='bilinear', align_corners=True) + outputs.append(auxout1) + auxout2 = self.auxlayer2(context_out[1]) + auxout2 = F.interpolate(auxout2, size, mode='bilinear', align_corners=True) + outputs.append(auxout2) + if test_flag: + outputs = [torch.argmax(outputx ,axis=1) for outputx in outputs] + if smooth_kernel>0: + gaussian_kernel = torch.from_numpy(np.ones((1,1,smooth_kernel,smooth_kernel)) ) + + pad = int((smooth_kernel - 1)/2) + if not gaussian_kernel.is_cuda: + gaussian_kernel = gaussian_kernel.to(x.device) + #print(gaussian_kernel.dtype,gaussian_kernel,outputs[0].dtype) + outputs = [ x.unsqueeze(1).double() for x in outputs] + outputs = [torch.conv2d(x, gaussian_kernel, padding=pad) for x in outputs ] + outputs = [ x.squeeze(1).long() for x in outputs] + #return tuple(outputs) + return outputs + +class _BiSeHead(nn.Module): + def __init__(self, in_channels, inter_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BiSeHead, self).__init__() + self.block = nn.Sequential( + _ConvBNReLU(in_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + x = self.block(x) + return x + + +class SpatialPath(nn.Module): + """Spatial path""" + + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(SpatialPath, self).__init__() + inter_channels = 64 + self.conv7x7 = _ConvBNReLU(in_channels, inter_channels, 7, 2, 3, norm_layer=norm_layer) + self.conv3x3_1 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv3x3_2 = _ConvBNReLU(inter_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv1x1 = _ConvBNReLU(inter_channels, out_channels, 1, 1, 0, norm_layer=norm_layer) + + def forward(self, x): + x = self.conv7x7(x) + x = self.conv3x3_1(x) + x = self.conv3x3_2(x) + x = self.conv1x1(x) + + return x + + +class _GlobalAvgPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_GlobalAvgPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class AttentionRefinmentModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(AttentionRefinmentModule, self).__init__() + self.conv3x3 = _ConvBNReLU(in_channels, out_channels, 3, 1, 1, norm_layer=norm_layer) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x): + x = self.conv3x3(x) + attention = self.channel_attention(x) + x = x * attention + return x + + +class ContextPath(nn.Module): + def __init__(self, backbone='resnet18', pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextPath, self).__init__() + if backbone == 'resnet18': + pretrained = resnet18(pretrained=pretrained_base, **kwargs) + elif backbone=='resnet50': + pretrained = resnet50(pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.conv1 = pretrained.conv1 + self.bn1 = pretrained.bn1 + self.relu = pretrained.relu + self.maxpool = pretrained.maxpool + self.layer1 = pretrained.layer1 + self.layer2 = pretrained.layer2 + self.layer3 = pretrained.layer3 + self.layer4 = pretrained.layer4 + + inter_channels = 128 + self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer) + + self.arms = nn.ModuleList( + [AttentionRefinmentModule(512, inter_channels, norm_layer, **kwargs), + AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)] + ) + self.refines = nn.ModuleList( + [_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), + _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)] + ) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + x = self.layer1(x) + + context_blocks = [] + context_blocks.append(x) + x = self.layer2(x) + context_blocks.append(x) + c3 = self.layer3(x) + context_blocks.append(c3) + c4 = self.layer4(c3) + context_blocks.append(c4) + context_blocks.reverse() + + global_context = self.global_context(c4) + last_feature = global_context + context_outputs = [] + for i, (feature, arm, refine) in enumerate(zip(context_blocks[:2], self.arms, self.refines)): + feature = arm(feature) + feature += last_feature + last_feature = F.interpolate(feature, size=context_blocks[i + 1].size()[2:], + mode='bilinear', align_corners=True) + last_feature = refine(last_feature) + context_outputs.append(last_feature) + + return context_outputs + + +class FeatureFusion(nn.Module): + def __init__(self, in_channels, out_channels, reduction=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFusion, self).__init__() + self.conv1x1 = _ConvBNReLU(in_channels, out_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.channel_attention = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(out_channels, out_channels // reduction, 1, 1, 0, norm_layer=norm_layer), + _ConvBNReLU(out_channels // reduction, out_channels, 1, 1, 0, norm_layer=norm_layer), + nn.Sigmoid() + ) + + def forward(self, x1, x2): + fusion = torch.cat([x1, x2], dim=1) + out = self.conv1x1(fusion) + attention = self.channel_attention(out) + out = out + out * attention + return out + + +def get_bisenet(dataset='citys', backbone='resnet18', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = BiSeNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('bisenet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_bisenet_resnet18_citys(**kwargs): + return get_bisenet('citys', 'resnet18', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 224, 224) + # model = BiSeNet(19, backbone='resnet18') + # print(model.exclusive) + input = torch.rand(2, 3, 224, 224) + model = BiSeNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/core/models/ccnet.py b/core/models/ccnet.py new file mode 100644 index 0000000..b06ca03 --- /dev/null +++ b/core/models/ccnet.py @@ -0,0 +1,166 @@ +"""Criss-Cross Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CrissCrossAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +#失败:NameError: name '_C' is not defined + +__all__ = ['CCNet', 'get_ccnet', 'get_ccnet_resnet50_citys', 'get_ccnet_resnet101_citys', + 'get_ccnet_resnet152_citys', 'get_ccnet_resnet50_ade', 'get_ccnet_resnet101_ade', + 'get_ccnet_resnet152_ade'] + + +class CCNet(SegBaseModel): + r"""CCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zilong Huang, et al. "CCNet: Criss-Cross Attention for Semantic Segmentation." + arXiv preprint arXiv:1811.11721 (2018). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(CCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _CCHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _CCHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_CCHead, self).__init__() + self.rcca = _RCCAModule(2048, 512, norm_layer, **kwargs) + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.rcca(x) + x = self.out(x) + return x + + +class _RCCAModule(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, **kwargs): + super(_RCCAModule, self).__init__() + inter_channels = in_channels // 4 + self.conva = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + self.cca = CrissCrossAttention(inter_channels) + self.convb = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.bottleneck = nn.Sequential( + nn.Conv2d(in_channels + inter_channels, out_channels, 3, padding=1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1)) + + def forward(self, x, recurrence=1): + out = self.conva(x) + for i in range(recurrence): + out = self.cca(out) + out = self.convb(out) + out = torch.cat([x, out], dim=1) + out = self.bottleneck(out) + + return out + + +def get_ccnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = CCNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('ccnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_ccnet_resnet50_citys(**kwargs): + return get_ccnet('citys', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_citys(**kwargs): + return get_ccnet('citys', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_citys(**kwargs): + return get_ccnet('citys', 'resnet152', **kwargs) + + +def get_ccnet_resnet50_ade(**kwargs): + return get_ccnet('ade20k', 'resnet50', **kwargs) + + +def get_ccnet_resnet101_ade(**kwargs): + return get_ccnet('ade20k', 'resnet101', **kwargs) + + +def get_ccnet_resnet152_ade(**kwargs): + return get_ccnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # model = get_ccnet_resnet50_citys() + # img = torch.randn(1, 3, 480, 480) + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = CCNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/cgnet.py b/core/models/cgnet.py new file mode 100644 index 0000000..85cb4e6 --- /dev/null +++ b/core/models/cgnet.py @@ -0,0 +1,228 @@ +"""Context Guided Network for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNPReLU, _BNPReLU + +__all__ = ['CGNet', 'get_cgnet', 'get_cgnet_citys'] + + +class CGNet(nn.Module): + r"""CGNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Tianyi Wu, et al. "CGNet: A Light-weight Context Guided Network for Semantic Segmentation." + arXiv preprint arXiv:1811.08201 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, M=3, N=21, **kwargs): + super(CGNet, self).__init__() + # stage 1 + self.stage1_0 = _ConvBNPReLU(3, 32, 3, 2, 1, **kwargs) + self.stage1_1 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + self.stage1_2 = _ConvBNPReLU(32, 32, 3, 1, 1, **kwargs) + + self.sample1 = _InputInjection(1) + self.sample2 = _InputInjection(2) + self.bn_prelu1 = _BNPReLU(32 + 3, **kwargs) + + # stage 2 + self.stage2_0 = ContextGuidedBlock(32 + 3, 64, dilation=2, reduction=8, down=True, residual=False, **kwargs) + self.stage2 = nn.ModuleList() + for i in range(0, M - 1): + self.stage2.append(ContextGuidedBlock(64, 64, dilation=2, reduction=8, **kwargs)) + self.bn_prelu2 = _BNPReLU(128 + 3, **kwargs) + + # stage 3 + self.stage3_0 = ContextGuidedBlock(128 + 3, 128, dilation=4, reduction=16, down=True, residual=False, **kwargs) + self.stage3 = nn.ModuleList() + for i in range(0, N - 1): + self.stage3.append(ContextGuidedBlock(128, 128, dilation=4, reduction=16, **kwargs)) + self.bn_prelu3 = _BNPReLU(256, **kwargs) + + self.head = nn.Sequential( + nn.Dropout2d(0.1, False), + nn.Conv2d(256, nclass, 1)) + + self.__setattr__('exclusive', ['stage1_0', 'stage1_1', 'stage1_2', 'sample1', 'sample2', + 'bn_prelu1', 'stage2_0', 'stage2', 'bn_prelu2', 'stage3_0', + 'stage3', 'bn_prelu3', 'head']) + + def forward(self, x): + size = x.size()[2:] + # stage1 + out0 = self.stage1_0(x) + out0 = self.stage1_1(out0) + out0 = self.stage1_2(out0) + + inp1 = self.sample1(x) + inp2 = self.sample2(x) + + # stage 2 + out0_cat = self.bn_prelu1(torch.cat([out0, inp1], dim=1)) + out1_0 = self.stage2_0(out0_cat) + for i, layer in enumerate(self.stage2): + if i == 0: + out1 = layer(out1_0) + else: + out1 = layer(out1) + out1_cat = self.bn_prelu2(torch.cat([out1, out1_0, inp2], dim=1)) + + # stage 3 + out2_0 = self.stage3_0(out1_cat) + for i, layer in enumerate(self.stage3): + if i == 0: + out2 = layer(out2_0) + else: + out2 = layer(out2) + out2_cat = self.bn_prelu3(torch.cat([out2_0, out2], dim=1)) + + outputs = [] + out = self.head(out2_cat) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + #return tuple(outputs) + return outputs[0] + + +class _ChannelWiseConv(nn.Module): + def __init__(self, in_channels, out_channels, dilation=1, **kwargs): + super(_ChannelWiseConv, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, 3, 1, dilation, dilation, groups=in_channels, bias=False) + + def forward(self, x): + x = self.conv(x) + return x + + +class _FGlo(nn.Module): + def __init__(self, in_channels, reduction=16, **kwargs): + super(_FGlo, self).__init__() + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels // reduction), + nn.ReLU(True), + nn.Linear(in_channels // reduction, in_channels), + nn.Sigmoid()) + + def forward(self, x): + n, c, _, _ = x.size() + out = self.gap(x).view(n, c) + out = self.fc(out).view(n, c, 1, 1) + return x * out + + +class _InputInjection(nn.Module): + def __init__(self, ratio): + super(_InputInjection, self).__init__() + self.pool = nn.ModuleList() + for i in range(0, ratio): + self.pool.append(nn.AvgPool2d(3, 2, 1)) + + def forward(self, x): + for pool in self.pool: + x = pool(x) + return x + + +class _ConcatInjection(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConcatInjection, self).__init__() + self.bn = norm_layer(in_channels) + self.prelu = nn.PReLU(in_channels) + + def forward(self, x1, x2): + out = torch.cat([x1, x2], dim=1) + out = self.bn(out) + out = self.prelu(out) + return out + + +class ContextGuidedBlock(nn.Module): + def __init__(self, in_channels, out_channels, dilation=2, reduction=16, down=False, + residual=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(ContextGuidedBlock, self).__init__() + inter_channels = out_channels // 2 if not down else out_channels + if down: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 3, 2, 1, norm_layer=norm_layer, **kwargs) + self.reduce = nn.Conv2d(inter_channels * 2, out_channels, 1, bias=False) + else: + self.conv = _ConvBNPReLU(in_channels, inter_channels, 1, 1, 0, norm_layer=norm_layer, **kwargs) + self.f_loc = _ChannelWiseConv(inter_channels, inter_channels, **kwargs) + self.f_sur = _ChannelWiseConv(inter_channels, inter_channels, dilation, **kwargs) + self.bn = norm_layer(inter_channels * 2) + self.prelu = nn.PReLU(inter_channels * 2) + self.f_glo = _FGlo(out_channels, reduction, **kwargs) + self.down = down + self.residual = residual + + def forward(self, x): + out = self.conv(x) + loc = self.f_loc(out) + sur = self.f_sur(out) + + joi_feat = torch.cat([loc, sur], dim=1) + joi_feat = self.prelu(self.bn(joi_feat)) + if self.down: + joi_feat = self.reduce(joi_feat) + + out = self.f_glo(joi_feat) + if self.residual: + out = out + x + + return out + + +def get_cgnet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = CGNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('cgnet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_cgnet_citys(**kwargs): + return get_cgnet('citys', '', **kwargs) + + +if __name__ == '__main__': + # model = get_cgnet_citys() + # print(model) + input = torch.rand(2, 3, 224, 224) + model = CGNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/core/models/danet.py b/core/models/danet.py new file mode 100644 index 0000000..7dae5d3 --- /dev/null +++ b/core/models/danet.py @@ -0,0 +1,232 @@ +"""Dual Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['DANet', 'get_danet', 'get_danet_resnet50_citys', + 'get_danet_resnet101_citys', 'get_danet_resnet152_citys'] + + +class DANet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Jun Fu, Jing Liu, Haijie Tian, Yong Li, Yongjun Bao, Zhiwei Fang,and Hanqing Lu. + "Dual Attention Network for Scene Segmentation." *CVPR*, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DAHead(2048, nclass, aux, **kwargs) + + self.__setattr__('exclusive', ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x0 = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + outputs.append(x0) + + if self.aux: + x1 = F.interpolate(x[1], size, mode='bilinear', align_corners=True) + x2 = F.interpolate(x[2], size, mode='bilinear', align_corners=True) + outputs.append(x1) + outputs.append(x2) + #return outputs + return outputs[0] + +class _PositionAttentionModule(nn.Module): + """ Position attention module""" + + def __init__(self, in_channels, **kwargs): + super(_PositionAttentionModule, self).__init__() + self.conv_b = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_c = nn.Conv2d(in_channels, in_channels // 8, 1) + self.conv_d = nn.Conv2d(in_channels, in_channels, 1) + self.alpha = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_b = self.conv_b(x).view(batch_size, -1, height * width).permute(0, 2, 1) + feat_c = self.conv_c(x).view(batch_size, -1, height * width) + attention_s = self.softmax(torch.bmm(feat_b, feat_c)) + feat_d = self.conv_d(x).view(batch_size, -1, height * width) + feat_e = torch.bmm(feat_d, attention_s.permute(0, 2, 1)).view(batch_size, -1, height, width) + out = self.alpha * feat_e + x + + return out + + +class _ChannelAttentionModule(nn.Module): + """Channel attention module""" + + def __init__(self, **kwargs): + super(_ChannelAttentionModule, self).__init__() + self.beta = nn.Parameter(torch.zeros(1)) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x): + batch_size, _, height, width = x.size() + feat_a = x.view(batch_size, -1, height * width) + feat_a_transpose = x.view(batch_size, -1, height * width).permute(0, 2, 1) + attention = torch.bmm(feat_a, feat_a_transpose) + attention_new = torch.max(attention, dim=-1, keepdim=True)[0].expand_as(attention) - attention + attention = self.softmax(attention_new) + + feat_e = torch.bmm(attention, feat_a).view(batch_size, -1, height, width) + out = self.beta * feat_e + x + + return out + + +class _DAHead(nn.Module): + def __init__(self, in_channels, nclass, aux=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DAHead, self).__init__() + self.aux = aux + inter_channels = in_channels // 4 + self.conv_p1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.pam = _PositionAttentionModule(inter_channels, **kwargs) + self.cam = _ChannelAttentionModule(**kwargs) + self.conv_p2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.conv_c2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.out = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + if aux: + self.conv_p3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + self.conv_c3 = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(inter_channels, nclass, 1) + ) + + def forward(self, x): + feat_p = self.conv_p1(x) + feat_p = self.pam(feat_p) + feat_p = self.conv_p2(feat_p) + + feat_c = self.conv_c1(x) + feat_c = self.cam(feat_c) + feat_c = self.conv_c2(feat_c) + + feat_fusion = feat_p + feat_c + + outputs = [] + fusion_out = self.out(feat_fusion) + outputs.append(fusion_out) + if self.aux: + p_out = self.conv_p3(feat_p) + c_out = self.conv_c3(feat_c) + outputs.append(p_out) + outputs.append(c_out) + + return tuple(outputs) + + +def get_danet(dataset='citys', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""Dual Attention Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_danet(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('danet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_danet_resnet50_citys(**kwargs): + return get_danet('citys', 'resnet50', **kwargs) + + +def get_danet_resnet101_citys(**kwargs): + return get_danet('citys', 'resnet101', **kwargs) + + +def get_danet_resnet152_citys(**kwargs): + return get_danet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_danet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3,512,512) + model = DANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/deeplabv3.py b/core/models/deeplabv3.py new file mode 100644 index 0000000..98d0c02 --- /dev/null +++ b/core/models/deeplabv3.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['DeepLabV3', 'get_deeplabv3', 'get_deeplabv3_resnet50_voc', 'get_deeplabv3_resnet101_voc', + 'get_deeplabv3_resnet152_voc', 'get_deeplabv3_resnet50_ade', 'get_deeplabv3_resnet101_ade', + 'get_deeplabv3_resnet152_ade'] + + +class DeepLabV3(SegBaseModel): + r"""DeepLabV3 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation." + arXiv preprint arXiv:1706.05587 (2017). + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(DeepLabV3, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DeepLabHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1) + ) + + def forward(self, x): + x = self.aspp(x) + return self.block(x) + + +class _ASPPConv(nn.Module): + def __init__(self, in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs): + super(_ASPPConv, self).__init__() + self.block = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + return self.block(x) + + +class _AsppPooling(nn.Module): + def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs): + super(_AsppPooling, self).__init__() + self.gap = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + def forward(self, x): + size = x.size()[2:] + pool = self.gap(x) + out = F.interpolate(pool, size, mode='bilinear', align_corners=True) + return out + + +class _ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs, **kwargs): + super(_ASPP, self).__init__() + out_channels = 256 + self.b0 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs) + self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs) + self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs) + self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + + self.project = nn.Sequential( + nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.5) + ) + + def forward(self, x): + feat1 = self.b0(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + x = self.project(x) + return x + + +def get_deeplabv3(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_resnet50_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_voc(**kwargs): + return get_deeplabv3('pascal_voc', 'resnet152', **kwargs) + + +def get_deeplabv3_resnet50_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet50', **kwargs) + + +def get_deeplabv3_resnet101_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet101', **kwargs) + + +def get_deeplabv3_resnet152_ade(**kwargs): + return get_deeplabv3('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_resnet50_voc() + img = torch.randn(2, 3, 480, 480) + output = model(img) diff --git a/core/models/deeplabv3_plus.py b/core/models/deeplabv3_plus.py new file mode 100644 index 0000000..9b5a703 --- /dev/null +++ b/core/models/deeplabv3_plus.py @@ -0,0 +1,142 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .base_models.xception import get_xception +from .deeplabv3 import _ASPP +from .fcn import _FCNHead +from ..nn import _ConvBNReLU + +__all__ = ['DeepLabV3Plus', 'get_deeplabv3_plus', 'get_deeplabv3_plus_xception_voc'] + + +class DeepLabV3Plus(nn.Module): + r"""DeepLabV3Plus + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'xception'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Chen, Liang-Chieh, et al. "Encoder-Decoder with Atrous Separable Convolution for Semantic + Image Segmentation." + """ + + def __init__(self, nclass, backbone='xception', aux=True, pretrained_base=True, dilated=True, **kwargs): + super(DeepLabV3Plus, self).__init__() + self.aux = aux + self.nclass = nclass + output_stride = 8 if dilated else 32 + + self.pretrained = get_xception(pretrained=pretrained_base, output_stride=output_stride, **kwargs) + + # deeplabv3 plus + self.head = _DeepLabHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(728, nclass, **kwargs) + + def base_forward(self, x): + # Entry flow + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv2(x) + x = self.pretrained.bn2(x) + x = self.pretrained.relu(x) + + x = self.pretrained.block1(x) + # add relu here + x = self.pretrained.relu(x) + low_level_feat = x + + x = self.pretrained.block2(x) + x = self.pretrained.block3(x) + + # Middle flow + x = self.pretrained.midflow(x) + mid_level_feat = x + + # Exit flow + x = self.pretrained.block20(x) + x = self.pretrained.relu(x) + x = self.pretrained.conv3(x) + x = self.pretrained.bn3(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv4(x) + x = self.pretrained.bn4(x) + x = self.pretrained.relu(x) + + x = self.pretrained.conv5(x) + x = self.pretrained.bn5(x) + x = self.pretrained.relu(x) + return low_level_feat, mid_level_feat, x + + def forward(self, x): + size = x.size()[2:] + c1, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4, c1) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _DeepLabHead(nn.Module): + def __init__(self, nclass, c1_channels=128, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DeepLabHead, self).__init__() + self.aspp = _ASPP(2048, [12, 24, 36], norm_layer=norm_layer, **kwargs) + self.c1_block = _ConvBNReLU(c1_channels, 48, 3, padding=1, norm_layer=norm_layer) + self.block = nn.Sequential( + _ConvBNReLU(304, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.5), + _ConvBNReLU(256, 256, 3, padding=1, norm_layer=norm_layer), + nn.Dropout(0.1), + nn.Conv2d(256, nclass, 1)) + + def forward(self, x, c1): + size = c1.size()[2:] + c1 = self.c1_block(c1) + x = self.aspp(x) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + return self.block(torch.cat([x, c1], dim=1)) + + +def get_deeplabv3_plus(dataset='pascal_voc', backbone='xception', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DeepLabV3Plus(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict( + torch.load(get_model_file('deeplabv3_plus_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_deeplabv3_plus_xception_voc(**kwargs): + return get_deeplabv3_plus('pascal_voc', 'xception', **kwargs) + + +if __name__ == '__main__': + model = get_deeplabv3_plus_xception_voc() diff --git a/core/models/denseaspp.py b/core/models/denseaspp.py new file mode 100644 index 0000000..1582375 --- /dev/null +++ b/core/models/denseaspp.py @@ -0,0 +1,198 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models.densenet import * +from core.models.fcn import _FCNHead + +__all__ = ['DenseASPP', 'get_denseaspp', 'get_denseaspp_densenet121_citys', + 'get_denseaspp_densenet161_citys', 'get_denseaspp_densenet169_citys', 'get_denseaspp_densenet201_citys'] + + +class DenseASPP(nn.Module): + def __init__(self, nclass, backbone='densenet121', aux=False, jpu=False, + pretrained_base=True, dilate_scale=8, **kwargs): + super(DenseASPP, self).__init__() + self.nclass = nclass + self.aux = aux + self.dilate_scale = dilate_scale + if backbone == 'densenet121': + self.pretrained = dilated_densenet121(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet161': + self.pretrained = dilated_densenet161(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet169': + self.pretrained = dilated_densenet169(dilate_scale, pretrained=pretrained_base, **kwargs) + elif backbone == 'densenet201': + self.pretrained = dilated_densenet201(dilate_scale, pretrained=pretrained_base, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + in_channels = self.pretrained.num_features + + self.head = _DenseASPPHead(in_channels, nclass) + + if aux: + self.auxlayer = _FCNHead(in_channels, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + #print('size', size) #torch.Size([512, 512]) + features = self.pretrained.features(x) + #print('22',features.shape) #torch.Size([2, 1024, 64, 64]) + if self.dilate_scale > 8: + features = F.interpolate(features, scale_factor=2, mode='bilinear', align_corners=True) + outputs = [] + x = self.head(features) #torch.Size([2, 4, 64, 64]) + #print('x.shape',x.shape) + x = F.interpolate(x, size, mode='bilinear', align_corners=True)#直接64到512。。。。效果还这么好! + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(features) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _DenseASPPHead(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_DenseASPPHead, self).__init__() + self.dense_aspp_block = _DenseASPPBlock(in_channels, 256, 64, norm_layer, norm_kwargs) + self.block = nn.Sequential( + nn.Dropout(0.1), + nn.Conv2d(in_channels + 5 * 64, nclass, 1) + ) + + def forward(self, x): + x = self.dense_aspp_block(x) + return self.block(x) + + +class _DenseASPPConv(nn.Sequential): + def __init__(self, in_channels, inter_channels, out_channels, atrous_rate, + drop_rate=0.1, norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPConv, self).__init__() + self.add_module('conv1', nn.Conv2d(in_channels, inter_channels, 1)), + self.add_module('bn1', norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu1', nn.ReLU(True)), + self.add_module('conv2', nn.Conv2d(inter_channels, out_channels, 3, dilation=atrous_rate, padding=atrous_rate)), + self.add_module('bn2', norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs))), + self.add_module('relu2', nn.ReLU(True)), + self.drop_rate = drop_rate + + def forward(self, x): + features = super(_DenseASPPConv, self).forward(x) + if self.drop_rate > 0: + features = F.dropout(features, p=self.drop_rate, training=self.training) + return features + + +class _DenseASPPBlock(nn.Module): + def __init__(self, in_channels, inter_channels1, inter_channels2, + norm_layer=nn.BatchNorm2d, norm_kwargs=None): + super(_DenseASPPBlock, self).__init__() + self.aspp_3 = _DenseASPPConv(in_channels, inter_channels1, inter_channels2, 3, 0.1, + norm_layer, norm_kwargs) + self.aspp_6 = _DenseASPPConv(in_channels + inter_channels2 * 1, inter_channels1, inter_channels2, 6, 0.1, + norm_layer, norm_kwargs) + self.aspp_12 = _DenseASPPConv(in_channels + inter_channels2 * 2, inter_channels1, inter_channels2, 12, 0.1, + norm_layer, norm_kwargs) + self.aspp_18 = _DenseASPPConv(in_channels + inter_channels2 * 3, inter_channels1, inter_channels2, 18, 0.1, + norm_layer, norm_kwargs) + self.aspp_24 = _DenseASPPConv(in_channels + inter_channels2 * 4, inter_channels1, inter_channels2, 24, 0.1, + norm_layer, norm_kwargs) + + def forward(self, x): + aspp3 = self.aspp_3(x) + x = torch.cat([aspp3, x], dim=1) + + aspp6 = self.aspp_6(x) + x = torch.cat([aspp6, x], dim=1) + + aspp12 = self.aspp_12(x) + x = torch.cat([aspp12, x], dim=1) + + aspp18 = self.aspp_18(x) + x = torch.cat([aspp18, x], dim=1) + + aspp24 = self.aspp_24(x) + x = torch.cat([aspp24, x], dim=1) + + return x + + +def get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + r"""DenseASPP + + Parameters + ---------- + dataset : str, default citys + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + # >>> model = get_denseaspp(dataset='citys', backbone='densenet121', pretrained=False) + # >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DenseASPP(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('denseaspp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_denseaspp_densenet121_citys(**kwargs): + return get_denseaspp('citys', 'densenet121', **kwargs) + + +def get_denseaspp_densenet161_citys(**kwargs): + return get_denseaspp('citys', 'densenet161', **kwargs) + + +def get_denseaspp_densenet169_citys(**kwargs): + return get_denseaspp('citys', 'densenet169', **kwargs) + + +def get_denseaspp_densenet201_citys(**kwargs): + return get_denseaspp('citys', 'densenet201', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 480, 480) + # model = get_denseaspp_densenet121_citys() + # outputs = model(img) + input = torch.rand(2, 3, 512, 512) + model = DenseASPP(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/dfanet.py b/core/models/dfanet.py new file mode 100644 index 0000000..15e3be0 --- /dev/null +++ b/core/models/dfanet.py @@ -0,0 +1,129 @@ +""" Deep Feature Aggregation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import Enc, FCAttention, get_xception_a +from core.nn import _ConvBNReLU + +__all__ = ['DFANet', 'get_dfanet', 'get_dfanet_citys'] + + +class DFANet(nn.Module): + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(DFANet, self).__init__() + self.pretrained = get_xception_a(pretrained_base, **kwargs) + + self.enc2_2 = Enc(240, 48, 4, **kwargs) + self.enc3_2 = Enc(144, 96, 6, **kwargs) + self.enc4_2 = Enc(288, 192, 4, **kwargs) + self.fca_2 = FCAttention(192, **kwargs) + + self.enc2_3 = Enc(240, 48, 4, **kwargs) + self.enc3_3 = Enc(144, 96, 6, **kwargs) + self.enc3_4 = Enc(288, 192, 4, **kwargs) + self.fca_3 = FCAttention(192, **kwargs) + + self.enc2_1_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_2_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.enc2_3_reduce = _ConvBNReLU(48, 32, 1, **kwargs) + self.conv_fusion = _ConvBNReLU(32, 32, 1, **kwargs) + + self.fca_1_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_2_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.fca_3_reduce = _ConvBNReLU(192, 32, 1, **kwargs) + self.conv_out = nn.Conv2d(32, nclass, 1) + + self.__setattr__('exclusive', ['enc2_2', 'enc3_2', 'enc4_2', 'fca_2', 'enc2_3', 'enc3_3', 'enc3_4', 'fca_3', + 'enc2_1_reduce', 'enc2_2_reduce', 'enc2_3_reduce', 'conv_fusion', 'fca_1_reduce', + 'fca_2_reduce', 'fca_3_reduce', 'conv_out']) + + def forward(self, x): + # backbone + stage1_conv1 = self.pretrained.conv1(x) + stage1_enc2 = self.pretrained.enc2(stage1_conv1) + stage1_enc3 = self.pretrained.enc3(stage1_enc2) + stage1_enc4 = self.pretrained.enc4(stage1_enc3) + stage1_fca = self.pretrained.fca(stage1_enc4) + stage1_out = F.interpolate(stage1_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage2 + stage2_enc2 = self.enc2_2(torch.cat([stage1_enc2, stage1_out], dim=1)) + stage2_enc3 = self.enc3_2(torch.cat([stage1_enc3, stage2_enc2], dim=1)) + stage2_enc4 = self.enc4_2(torch.cat([stage1_enc4, stage2_enc3], dim=1)) + stage2_fca = self.fca_2(stage2_enc4) + stage2_out = F.interpolate(stage2_fca, scale_factor=4, mode='bilinear', align_corners=True) + + # stage3 + stage3_enc2 = self.enc2_3(torch.cat([stage2_enc2, stage2_out], dim=1)) + stage3_enc3 = self.enc3_3(torch.cat([stage2_enc3, stage3_enc2], dim=1)) + stage3_enc4 = self.enc3_4(torch.cat([stage2_enc4, stage3_enc3], dim=1)) + stage3_fca = self.fca_3(stage3_enc4) + + stage1_enc2_decoder = self.enc2_1_reduce(stage1_enc2) + stage2_enc2_docoder = F.interpolate(self.enc2_2_reduce(stage2_enc2), scale_factor=2, + mode='bilinear', align_corners=True) + stage3_enc2_decoder = F.interpolate(self.enc2_3_reduce(stage3_enc2), scale_factor=4, + mode='bilinear', align_corners=True) + fusion = stage1_enc2_decoder + stage2_enc2_docoder + stage3_enc2_decoder + fusion1 = self.conv_fusion(fusion) + + stage1_fca_decoder = F.interpolate(self.fca_1_reduce(stage1_fca), scale_factor=4, + mode='bilinear', align_corners=True) + stage2_fca_decoder = F.interpolate(self.fca_2_reduce(stage2_fca), scale_factor=8, + mode='bilinear', align_corners=True) + stage3_fca_decoder = F.interpolate(self.fca_3_reduce(stage3_fca), scale_factor=16, + mode='bilinear', align_corners=True) + #print(fusion.shape,stage1_fca_decoder.shape,stage2_fca_decoder.shape,stage3_fca_decoder.shape) + fusion2 = fusion1 + stage1_fca_decoder + stage2_fca_decoder + stage3_fca_decoder + + outputs = list() + out = self.conv_out(fusion2) + out1 = F.interpolate(out, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(out1) + + #return tuple(outputs) + return outputs[0] + +def get_dfanet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DFANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dfanet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dfanet_citys(**kwargs): + return get_dfanet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_dfanet_citys() + input = torch.rand(2, 3, 512, 512) + model = DFANet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/core/models/dunet.py b/core/models/dunet.py new file mode 100644 index 0000000..affc476 --- /dev/null +++ b/core/models/dunet.py @@ -0,0 +1,172 @@ +"""Decoders Matter for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['DUNet', 'get_dunet', 'get_dunet_resnet50_pascal_voc', + 'get_dunet_resnet101_pascal_voc', 'get_dunet_resnet152_pascal_voc'] + + +# The model may be wrong because lots of details missing in paper. +class DUNet(SegBaseModel): + """Decoders Matter for Semantic Segmentation + + Reference: + Zhi Tian, Tong He, Chunhua Shen, and Youliang Yan. + "Decoders Matter for Semantic Segmentation: + Data-Dependent Decoding Enables Flexible Feature Aggregation." CVPR, 2019 + """ + + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(DUNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _DUHead(2144, **kwargs) + self.dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, 256, **kwargs) + self.aux_dupsample = DUpsampling(256, nclass, scale_factor=8, **kwargs) + + self.__setattr__('exclusive', + ['dupsample', 'head', 'auxlayer', 'aux_dupsample'] if aux else ['dupsample', 'head']) + + def forward(self, x): + c1, c2, c3, c4 = self.base_forward(x)#继承自SegBaseModel;返回的是resnet的layer1,2,3,4的输出 + outputs = [] + x = self.head(c2, c3, c4) + x = self.dupsample(x) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = self.aux_dupsample(auxout) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class FeatureFused(nn.Module): + """Module for fused features""" + + def __init__(self, inter_channels=48, norm_layer=nn.BatchNorm2d, **kwargs): + super(FeatureFused, self).__init__() + self.conv2 = nn.Sequential( + nn.Conv2d(512, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + self.conv3 = nn.Sequential( + nn.Conv2d(1024, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + size = c4.size()[2:] + c2 = self.conv2(F.interpolate(c2, size, mode='bilinear', align_corners=True)) + c3 = self.conv3(F.interpolate(c3, size, mode='bilinear', align_corners=True)) + fused_feature = torch.cat([c4, c3, c2], dim=1) + return fused_feature + + +class _DUHead(nn.Module): + def __init__(self, in_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DUHead, self).__init__() + self.fuse = FeatureFused(norm_layer=norm_layer, **kwargs) + self.block = nn.Sequential( + nn.Conv2d(in_channels, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True), + nn.Conv2d(256, 256, 3, padding=1, bias=False), + norm_layer(256), + nn.ReLU(True) + ) + + def forward(self, c2, c3, c4): + fused_feature = self.fuse(c2, c3, c4) + out = self.block(fused_feature) + return out + + +class DUpsampling(nn.Module): + """DUsampling module""" + + def __init__(self, in_channels, out_channels, scale_factor=2, **kwargs): + super(DUpsampling, self).__init__() + self.scale_factor = scale_factor + self.conv_w = nn.Conv2d(in_channels, out_channels * scale_factor * scale_factor, 1, bias=False) + + def forward(self, x): + x = self.conv_w(x) + n, c, h, w = x.size() + + # N, C, H, W --> N, W, H, C + x = x.permute(0, 3, 2, 1).contiguous() + + # N, W, H, C --> N, W, H * scale, C // scale + x = x.view(n, w, h * self.scale_factor, c // self.scale_factor) + + # N, W, H * scale, C // scale --> N, H * scale, W, C // scale + x = x.permute(0, 2, 1, 3).contiguous() + + # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) + x = x.view(n, h * self.scale_factor, w * self.scale_factor, c // (self.scale_factor * self.scale_factor)) + + # N, H * scale, W * scale, C // (scale ** 2) -- > N, C // (scale ** 2), H * scale, W * scale + x = x.permute(0, 3, 1, 2) + + return x + +def get_dunet(dataset='pascal_voc', backbone='resnet50', pretrained=False, + root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = DUNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('dunet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_dunet_resnet50_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet50', **kwargs) + + +def get_dunet_resnet101_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet101', **kwargs) + + +def get_dunet_resnet152_pascal_voc(**kwargs): + return get_dunet('pascal_voc', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(2, 3, 256, 256) + # model = get_dunet_resnet50_pascal_voc() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = DUNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + input = torch.randn(1, 3, 512, 512) + flop, params = profile(model, inputs=(input, )) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/encnet.py b/core/models/encnet.py new file mode 100644 index 0000000..585557b --- /dev/null +++ b/core/models/encnet.py @@ -0,0 +1,212 @@ +"""Context Encoding for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel +from .fcn import _FCNHead + +__all__ = ['EncNet', 'EncModule', 'get_encnet', 'get_encnet_resnet50_ade', + 'get_encnet_resnet101_ade', 'get_encnet_resnet152_ade'] + + +class EncNet(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, se_loss=True, lateral=False, + pretrained_base=True, **kwargs): + super(EncNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _EncHead(2048, nclass, se_loss=se_loss, lateral=lateral, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + features = self.base_forward(x) + + x = list(self.head(*features)) + x[0] = F.interpolate(x[0], size, mode='bilinear', align_corners=True) + if self.aux: + auxout = self.auxlayer(features[2]) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + x.append(auxout) + return tuple(x) + + +class _EncHead(nn.Module): + def __init__(self, in_channels, nclass, se_loss=True, lateral=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_EncHead, self).__init__() + self.lateral = lateral + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + if lateral: + self.connect = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(512, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + nn.Sequential( + nn.Conv2d(1024, 512, 1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True)), + ]) + self.fusion = nn.Sequential( + nn.Conv2d(3 * 512, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + self.encmodule = EncModule(512, nclass, ncodes=32, se_loss=se_loss, + norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs) + self.conv6 = nn.Sequential( + nn.Dropout(0.1, False), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, *inputs): + feat = self.conv5(inputs[-1]) + if self.lateral: + c2 = self.connect[0](inputs[1]) + c3 = self.connect[1](inputs[2]) + feat = self.fusion(torch.cat([feat, c2, c3], 1)) + outs = list(self.encmodule(feat)) + outs[0] = self.conv6(outs[0]) + return tuple(outs) + + +class EncModule(nn.Module): + def __init__(self, in_channels, nclass, ncodes=32, se_loss=True, + norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(EncModule, self).__init__() + self.se_loss = se_loss + self.encoding = nn.Sequential( + nn.Conv2d(in_channels, in_channels, 1, bias=False), + norm_layer(in_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + Encoding(D=in_channels, K=ncodes), + nn.BatchNorm1d(ncodes), + nn.ReLU(True), + Mean(dim=1) + ) + self.fc = nn.Sequential( + nn.Linear(in_channels, in_channels), + nn.Sigmoid() + ) + if self.se_loss: + self.selayer = nn.Linear(in_channels, nclass) + + def forward(self, x): + en = self.encoding(x) + b, c, _, _ = x.size() + gamma = self.fc(en) + y = gamma.view(b, c, 1, 1) + outputs = [F.relu_(x + x * y)] + if self.se_loss: + outputs.append(self.selayer(en)) + return tuple(outputs) + + +class Encoding(nn.Module): + def __init__(self, D, K): + super(Encoding, self).__init__() + # init codewords and smoothing factor + self.D, self.K = D, K + self.codewords = nn.Parameter(torch.Tensor(K, D), requires_grad=True) + self.scale = nn.Parameter(torch.Tensor(K), requires_grad=True) + self.reset_params() + + def reset_params(self): + std1 = 1. / ((self.K * self.D) ** (1 / 2)) + self.codewords.data.uniform_(-std1, std1) + self.scale.data.uniform_(-1, 0) + + def forward(self, X): + # input X is a 4D tensor + assert (X.size(1) == self.D) + B, D = X.size(0), self.D + if X.dim() == 3: + # BxDxN -> BxNxD + X = X.transpose(1, 2).contiguous() + elif X.dim() == 4: + # BxDxHxW -> Bx(HW)xD + X = X.view(B, D, -1).transpose(1, 2).contiguous() + else: + raise RuntimeError('Encoding Layer unknown input dims!') + # assignment weights BxNxK + A = F.softmax(self.scale_l2(X, self.codewords, self.scale), dim=2) + # aggregate + E = self.aggregate(A, X, self.codewords) + return E + + def __repr__(self): + return self.__class__.__name__ + '(' \ + + 'N x' + str(self.D) + '=>' + str(self.K) + 'x' \ + + str(self.D) + ')' + + @staticmethod + def scale_l2(X, C, S): + S = S.view(1, 1, C.size(0), 1) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + SL = S * (X - C) + SL = SL.pow(2).sum(3) + return SL + + @staticmethod + def aggregate(A, X, C): + A = A.unsqueeze(3) + X = X.unsqueeze(2).expand(X.size(0), X.size(1), C.size(0), C.size(1)) + C = C.unsqueeze(0).unsqueeze(0) + E = A * (X - C) + E = E.sum(1) + return E + + +class Mean(nn.Module): + def __init__(self, dim, keep_dim=False): + super(Mean, self).__init__() + self.dim = dim + self.keep_dim = keep_dim + + def forward(self, input): + return input.mean(self.dim, self.keep_dim) + + +def get_encnet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = EncNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('encnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_encnet_resnet50_ade(**kwargs): + return get_encnet('ade20k', 'resnet50', **kwargs) + + +def get_encnet_resnet101_ade(**kwargs): + return get_encnet('ade20k', 'resnet101', **kwargs) + + +def get_encnet_resnet152_ade(**kwargs): + return get_encnet('ade20k', 'resnet152', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(2, 3, 224, 224) + model = get_encnet_resnet50_ade() + outputs = model(img) diff --git a/core/models/enet.py b/core/models/enet.py new file mode 100644 index 0000000..853fc65 --- /dev/null +++ b/core/models/enet.py @@ -0,0 +1,243 @@ +"""Efficient Neural Network""" +import torch +import torch.nn as nn + +__all__ = ['ENet', 'get_enet', 'get_enet_citys'] + + +class ENet(nn.Module): + """Efficient Neural Network""" + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=None, **kwargs): + super(ENet, self).__init__() + self.initial = InitialBlock(13, **kwargs) + + self.bottleneck1_0 = Bottleneck(16, 16, 64, downsampling=True, **kwargs) + self.bottleneck1_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_2 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_3 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck1_4 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck2_0 = Bottleneck(64, 32, 128, downsampling=True, **kwargs) + self.bottleneck2_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck2_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck2_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck2_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck2_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck2_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck3_1 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_2 = Bottleneck(128, 32, 128, dilation=2, **kwargs) + self.bottleneck3_3 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_4 = Bottleneck(128, 32, 128, dilation=4, **kwargs) + self.bottleneck3_5 = Bottleneck(128, 32, 128, **kwargs) + self.bottleneck3_6 = Bottleneck(128, 32, 128, dilation=8, **kwargs) + self.bottleneck3_7 = Bottleneck(128, 32, 128, asymmetric=True, **kwargs) + self.bottleneck3_8 = Bottleneck(128, 32, 128, dilation=16, **kwargs) + + self.bottleneck4_0 = UpsamplingBottleneck(128, 16, 64, **kwargs) + self.bottleneck4_1 = Bottleneck(64, 16, 64, **kwargs) + self.bottleneck4_2 = Bottleneck(64, 16, 64, **kwargs) + + self.bottleneck5_0 = UpsamplingBottleneck(64, 4, 16, **kwargs) + self.bottleneck5_1 = Bottleneck(16, 4, 16, **kwargs) + + self.fullconv = nn.ConvTranspose2d(16, nclass, 2, 2, bias=False) + + self.__setattr__('exclusive', ['bottleneck1_0', 'bottleneck1_1', 'bottleneck1_2', 'bottleneck1_3', + 'bottleneck1_4', 'bottleneck2_0', 'bottleneck2_1', 'bottleneck2_2', + 'bottleneck2_3', 'bottleneck2_4', 'bottleneck2_5', 'bottleneck2_6', + 'bottleneck2_7', 'bottleneck2_8', 'bottleneck3_1', 'bottleneck3_2', + 'bottleneck3_3', 'bottleneck3_4', 'bottleneck3_5', 'bottleneck3_6', + 'bottleneck3_7', 'bottleneck3_8', 'bottleneck4_0', 'bottleneck4_1', + 'bottleneck4_2', 'bottleneck5_0', 'bottleneck5_1', 'fullconv']) + + def forward(self, x): + # init + x = self.initial(x) + + # stage 1 + x, max_indices1 = self.bottleneck1_0(x) + x = self.bottleneck1_1(x) + x = self.bottleneck1_2(x) + x = self.bottleneck1_3(x) + x = self.bottleneck1_4(x) + + # stage 2 + x, max_indices2 = self.bottleneck2_0(x) + x = self.bottleneck2_1(x) + x = self.bottleneck2_2(x) + x = self.bottleneck2_3(x) + x = self.bottleneck2_4(x) + x = self.bottleneck2_5(x) + x = self.bottleneck2_6(x) + x = self.bottleneck2_7(x) + x = self.bottleneck2_8(x) + + # stage 3 + x = self.bottleneck3_1(x) + x = self.bottleneck3_2(x) + x = self.bottleneck3_3(x) + x = self.bottleneck3_4(x) + x = self.bottleneck3_6(x) + x = self.bottleneck3_7(x) + x = self.bottleneck3_8(x) + + # stage 4 + x = self.bottleneck4_0(x, max_indices2) + x = self.bottleneck4_1(x) + x = self.bottleneck4_2(x) + + # stage 5 + x = self.bottleneck5_0(x, max_indices1) + x = self.bottleneck5_1(x) + + # out + x = self.fullconv(x) + return tuple([x]) + + +class InitialBlock(nn.Module): + """ENet initial block""" + + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(InitialBlock, self).__init__() + self.conv = nn.Conv2d(3, out_channels, 3, 2, 1, bias=False) + self.maxpool = nn.MaxPool2d(2, 2) + self.bn = norm_layer(out_channels + 3) + self.act = nn.PReLU() + + def forward(self, x): + x_conv = self.conv(x) + x_pool = self.maxpool(x) + x = torch.cat([x_conv, x_pool], dim=1) + x = self.bn(x) + x = self.act(x) + return x + + +class Bottleneck(nn.Module): + """Bottlenecks include regular, asymmetric, downsampling, dilated""" + + def __init__(self, in_channels, inter_channels, out_channels, dilation=1, asymmetric=False, + downsampling=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(Bottleneck, self).__init__() + self.downsamping = downsampling + if downsampling: + self.maxpool = nn.MaxPool2d(2, 2, return_indices=True) + self.conv_down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + + if downsampling: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 2, stride=2, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + if asymmetric: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (5, 1), padding=(2, 0), bias=False), + nn.Conv2d(inter_channels, inter_channels, (1, 5), padding=(0, 2), bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + else: + self.conv2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, 3, dilation=dilation, padding=dilation, bias=False), + norm_layer(inter_channels), + nn.PReLU() + ) + self.conv3 = nn.Sequential( + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x): + identity = x + if self.downsamping: + identity, max_indices = self.maxpool(identity) + identity = self.conv_down(identity) + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out = self.act(out + identity) + + if self.downsamping: + return out, max_indices + else: + return out + + +class UpsamplingBottleneck(nn.Module): + """upsampling Block""" + + def __init__(self, in_channels, inter_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(UpsamplingBottleneck, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.upsampling = nn.MaxUnpool2d(2) + + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 1, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.ConvTranspose2d(inter_channels, inter_channels, 2, 2, bias=False), + norm_layer(inter_channels), + nn.PReLU(), + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.Dropout2d(0.1) + ) + self.act = nn.PReLU() + + def forward(self, x, max_indices): + out_up = self.conv(x) + out_up = self.upsampling(out_up, max_indices) + + out_ext = self.block(x) + out = self.act(out_up + out_ext) + return out + + +def get_enet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ENet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('enet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_enet_citys(**kwargs): + return get_enet('citys', '', **kwargs) + + +if __name__ == '__main__': + img = torch.randn(1, 3, 512, 512) + model = get_enet_citys() + output = model(img) diff --git a/core/models/espnet.py b/core/models/espnet.py new file mode 100644 index 0000000..82651f4 --- /dev/null +++ b/core/models/espnet.py @@ -0,0 +1,134 @@ +"ESPNetv2: A Light-weight, Power Efficient, and General Purpose for Semantic Segmentation" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models import eespnet, EESP +from core.nn import _ConvBNPReLU, _BNPReLU + + +class ESPNetV2(nn.Module): + r"""ESPNetV2 + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Sachin Mehta, et al. "ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network." + arXiv preprint arXiv:1811.11431 (2018). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=False, **kwargs): + super(ESPNetV2, self).__init__() + self.pretrained = eespnet(pretrained=pretrained_base, **kwargs) + self.proj_L4_C = _ConvBNPReLU(256, 128, 1, **kwargs) + self.pspMod = nn.Sequential( + EESP(256, 128, stride=1, k=4, r_lim=7, **kwargs), + _PSPModule(128, 128, **kwargs)) + self.project_l3 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(128, nclass, 1, bias=False)) + self.act_l3 = _BNPReLU(nclass, **kwargs) + self.project_l2 = _ConvBNPReLU(64 + nclass, nclass, 1, **kwargs) + self.project_l1 = nn.Sequential( + nn.Dropout2d(0.1), + nn.Conv2d(32 + nclass, nclass, 1, bias=False)) + + self.aux = aux + + self.__setattr__('exclusive', ['proj_L4_C', 'pspMod', 'project_l3', 'act_l3', 'project_l2', 'project_l1']) + + def forward(self, x): + size = x.size()[2:] + out_l1, out_l2, out_l3, out_l4 = self.pretrained(x, seg=True) + out_l4_proj = self.proj_L4_C(out_l4) + up_l4_to_l3 = F.interpolate(out_l4_proj, scale_factor=2, mode='bilinear', align_corners=True) + merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1)) + proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4) + proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act) + out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True) + merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1)) + out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True) + merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1)) + + outputs = list() + merge1_l1 = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True) + outputs.append(merge1_l1) + if self.aux: + # different from paper + auxout = F.interpolate(proj_merge_l3_bef_act, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +# different from PSPNet +class _PSPModule(nn.Module): + def __init__(self, in_channels, out_channels=1024, sizes=(1, 2, 4, 8), **kwargs): + super(_PSPModule, self).__init__() + self.stages = nn.ModuleList( + [nn.Conv2d(in_channels, in_channels, 3, 1, 1, groups=in_channels, bias=False) for _ in sizes]) + self.project = _ConvBNPReLU(in_channels * (len(sizes) + 1), out_channels, 1, 1, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for stage in self.stages: + x = F.avg_pool2d(x, kernel_size=3, stride=2, padding=1) + upsampled = F.interpolate(stage(x), size, mode='bilinear', align_corners=True) + feats.append(upsampled) + return self.project(torch.cat(feats, dim=1)) + + +def get_espnet(dataset='pascal_voc', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = ESPNetV2(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('espnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_espnet_citys(**kwargs): + return get_espnet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_espnet_citys() + input = torch.rand(2, 3, 224, 224) + model =ESPNetV2(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) diff --git a/core/models/fcn.py b/core/models/fcn.py new file mode 100644 index 0000000..0623fb5 --- /dev/null +++ b/core/models/fcn.py @@ -0,0 +1,234 @@ +import os +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.base_models.vgg import vgg16 + +__all__ = ['get_fcn32s', 'get_fcn16s', 'get_fcn8s', + 'get_fcn32s_vgg16_voc', 'get_fcn16s_vgg16_voc', 'get_fcn8s_vgg16_voc'] + + +class FCN32s(nn.Module): + """There are some difference from original fcn""" + + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, + norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN32s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.head = _FCNHead(512, nclass, norm_layer) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + pool5 = self.pretrained(x) + + outputs = [] + out = self.head(pool5) + out = F.interpolate(out, size, mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class FCN16s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN16s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool4 = nn.Sequential(*self.pretrained[:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', ['head', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool4']) + + def forward(self, x): + pool4 = self.pool4(x) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + out = F.interpolate(fuse_pool4, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + #return tuple(outputs) + return outputs[0] + +class FCN8s(nn.Module): + def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs): + super(FCN8s, self).__init__() + self.aux = aux + if backbone == 'vgg16': + self.pretrained = vgg16(pretrained=pretrained_base).features + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + self.pool3 = nn.Sequential(*self.pretrained[:17]) + self.pool4 = nn.Sequential(*self.pretrained[17:24]) + self.pool5 = nn.Sequential(*self.pretrained[24:]) + self.head = _FCNHead(512, nclass, norm_layer) + self.score_pool3 = nn.Conv2d(256, nclass, 1) + self.score_pool4 = nn.Conv2d(512, nclass, 1) + if aux: + self.auxlayer = _FCNHead(512, nclass, norm_layer) + + self.__setattr__('exclusive', + ['head', 'score_pool3', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool3', + 'score_pool4']) + + def forward(self, x): + pool3 = self.pool3(x) + pool4 = self.pool4(pool3) + pool5 = self.pool5(pool4) + + outputs = [] + score_fr = self.head(pool5) + + score_pool4 = self.score_pool4(pool4) + score_pool3 = self.score_pool3(pool3) + + upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True) + fuse_pool4 = upscore2 + score_pool4 + + upscore_pool4 = F.interpolate(fuse_pool4, score_pool3.size()[2:], mode='bilinear', align_corners=True) + fuse_pool3 = upscore_pool4 + score_pool3 + + out = F.interpolate(fuse_pool3, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(out) + + if self.aux: + auxout = self.auxlayer(pool5) + auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True) + outputs.append(auxout) + + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels), + nn.ReLU(inplace=True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn32s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN32s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn32s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn16s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN16s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn16s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn8s(dataset='pascal_voc', backbone='vgg16', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN8s(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn8s_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn32s_vgg16_voc(**kwargs): + return get_fcn32s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn16s_vgg16_voc(**kwargs): + return get_fcn16s('pascal_voc', 'vgg16', **kwargs) + + +def get_fcn8s_vgg16_voc(**kwargs): + return get_fcn8s('pascal_voc', 'vgg16', **kwargs) + + +if __name__ == "__main__": + model = FCN16s(21) + print(model) + input = torch.rand(2, 3, 224,224) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss) + print(loss.shape) + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) \ No newline at end of file diff --git a/core/models/fcnv2.py b/core/models/fcnv2.py new file mode 100644 index 0000000..6bc4954 --- /dev/null +++ b/core/models/fcnv2.py @@ -0,0 +1,82 @@ +"""Fully Convolutional Network with Stride of 8""" +from __future__ import division + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .segbase import SegBaseModel + +__all__ = ['FCN', 'get_fcn', 'get_fcn_resnet50_voc', + 'get_fcn_resnet101_voc', 'get_fcn_resnet152_voc'] + + +class FCN(SegBaseModel): + def __init__(self, nclass, backbone='resnet50', aux=True, pretrained_base=True, **kwargs): + super(FCN, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _FCNHead(2048, nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _FCNHead(nn.Module): + def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_FCNHead, self).__init__() + inter_channels = in_channels // 4 + self.block = nn.Sequential( + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + norm_layer(inter_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ) + + def forward(self, x): + return self.block(x) + + +def get_fcn(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = FCN(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('fcn_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_fcn_resnet50_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet50', **kwargs) + + +def get_fcn_resnet101_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet101', **kwargs) + + +def get_fcn_resnet152_voc(**kwargs): + return get_fcn('pascal_voc', 'resnet152', **kwargs) diff --git a/core/models/hrnet.py b/core/models/hrnet.py new file mode 100644 index 0000000..8ad08e3 --- /dev/null +++ b/core/models/hrnet.py @@ -0,0 +1,29 @@ +"""High-Resolution Representations for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +class HRNet(nn.Module): + """HRNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Ke Sun. "High-Resolution Representations for Labeling Pixels and Regions." + arXiv preprint arXiv:1904.04514 (2019). + """ + def __init__(self, nclass, backbone='', aux=False, pretrained_base=False, **kwargs): + super(HRNet, self).__init__() + + def forward(self, x): + pass \ No newline at end of file diff --git a/core/models/icnet.py b/core/models/icnet.py new file mode 100644 index 0000000..fed14a4 --- /dev/null +++ b/core/models/icnet.py @@ -0,0 +1,180 @@ +"""Image Cascade Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel + +__all__ = ['ICNet', 'get_icnet', 'get_icnet_resnet50_citys', + 'get_icnet_resnet101_citys', 'get_icnet_resnet152_citys'] + + +class ICNet(SegBaseModel): + """Image Cascade Network""" + + def __init__(self, nclass, backbone='resnet50', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(ICNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.conv_sub1 = nn.Sequential( + _ConvBNReLU(3, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 32, 3, 2, **kwargs), + _ConvBNReLU(32, 64, 3, 2, **kwargs) + ) + + self.ppm = PyramidPoolingModule() + + self.head = _ICHead(nclass, **kwargs) + + self.__setattr__('exclusive', ['conv_sub1', 'head']) + + def forward(self, x): + # sub 1 + x_sub1 = self.conv_sub1(x) + + # sub 2 + x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear', align_corners=True) + _, x_sub2, _, _ = self.base_forward(x_sub2) + + # sub 4 + x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear', align_corners=True) + _, _, _, x_sub4 = self.base_forward(x_sub4) + # add PyramidPoolingModule + x_sub4 = self.ppm(x_sub4) + outputs = self.head(x_sub1, x_sub2, x_sub4) + + return tuple(outputs) + +class PyramidPoolingModule(nn.Module): + def __init__(self, pyramids=[1,2,3,6]): + super(PyramidPoolingModule, self).__init__() + self.pyramids = pyramids + + def forward(self, input): + feat = input + height, width = input.shape[2:] + for bin_size in self.pyramids: + x = F.adaptive_avg_pool2d(input, output_size=bin_size) + x = F.interpolate(x, size=(height, width), mode='bilinear', align_corners=True) + feat = feat + x + return feat + +class _ICHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ICHead, self).__init__() + #self.cff_12 = CascadeFeatureFusion(512, 64, 128, nclass, norm_layer, **kwargs) + self.cff_12 = CascadeFeatureFusion(128, 64, 128, nclass, norm_layer, **kwargs) + self.cff_24 = CascadeFeatureFusion(2048, 512, 128, nclass, norm_layer, **kwargs) + + self.conv_cls = nn.Conv2d(128, nclass, 1, bias=False) + + def forward(self, x_sub1, x_sub2, x_sub4): + outputs = list() + x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2) + outputs.append(x_24_cls) + #x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) + x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1) + outputs.append(x_12_cls) + + up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear', align_corners=True) + up_x2 = self.conv_cls(up_x2) + outputs.append(up_x2) + up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear', align_corners=True) + outputs.append(up_x8) + # 1 -> 1/4 -> 1/8 -> 1/16 + outputs.reverse() + + return outputs + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, + groups=1, norm_layer=nn.BatchNorm2d, bias=False, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU(True) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class CascadeFeatureFusion(nn.Module): + """CFF Unit""" + + def __init__(self, low_channels, high_channels, out_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(CascadeFeatureFusion, self).__init__() + self.conv_low = nn.Sequential( + nn.Conv2d(low_channels, out_channels, 3, padding=2, dilation=2, bias=False), + norm_layer(out_channels) + ) + self.conv_high = nn.Sequential( + nn.Conv2d(high_channels, out_channels, 1, bias=False), + norm_layer(out_channels) + ) + self.conv_low_cls = nn.Conv2d(out_channels, nclass, 1, bias=False) + + def forward(self, x_low, x_high): + x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) + x_low = self.conv_low(x_low) + x_high = self.conv_high(x_high) + x = x_low + x_high + x = F.relu(x, inplace=True) + x_low_cls = self.conv_low_cls(x_low) + + return x, x_low_cls + + +def get_icnet(dataset='citys', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = ICNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('icnet_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_icnet_resnet50_citys(**kwargs): + return get_icnet('citys', 'resnet50', **kwargs) + + +def get_icnet_resnet101_citys(**kwargs): + return get_icnet('citys', 'resnet101', **kwargs) + + +def get_icnet_resnet152_citys(**kwargs): + return get_icnet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + # img = torch.randn(1, 3, 256, 256) + # model = get_icnet_resnet50_citys() + # outputs = model(img) + input = torch.rand(2, 3, 224, 224) + model = ICNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + #print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/lednet.py b/core/models/lednet.py new file mode 100644 index 0000000..03c05bd --- /dev/null +++ b/core/models/lednet.py @@ -0,0 +1,211 @@ +"""LEDNet: A Lightweight Encoder-Decoder Network for Real-time Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU + +__all__ = ['LEDNet', 'get_lednet', 'get_lednet_citys'] + +class LEDNet(nn.Module): + r"""LEDNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Yu Wang, et al. "LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation." + arXiv preprint arXiv:1905.02423 (2019). + """ + + def __init__(self, nclass, backbone='', aux=False, jpu=False, pretrained_base=True, **kwargs): + super(LEDNet, self).__init__() + self.encoder = nn.Sequential( + Downsampling(3, 32), + SSnbt(32, **kwargs), SSnbt(32, **kwargs), SSnbt(32, **kwargs), + Downsampling(32, 64), + SSnbt(64, **kwargs), SSnbt(64, **kwargs), + Downsampling(64, 128), + SSnbt(128, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 2, **kwargs), + SSnbt(128, 5, **kwargs), + SSnbt(128, 9, **kwargs), + SSnbt(128, 17, **kwargs), + ) + self.decoder = APNModule(128, nclass) + + self.__setattr__('exclusive', ['encoder', 'decoder']) + + def forward(self, x): + size = x.size()[2:] + x = self.encoder(x) + x = self.decoder(x) + outputs = list() + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + #return tuple(outputs) + return outputs[0] + +class Downsampling(nn.Module): + def __init__(self, in_channels, out_channels, **kwargs): + super(Downsampling, self).__init__() + self.conv1 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.conv2 = nn.Conv2d(in_channels, out_channels // 2, 3, 2, 2, bias=False) + self.pool = nn.MaxPool2d(kernel_size=2, stride=1) + + def forward(self, x): + x1 = self.conv1(x) + x1 = self.pool(x1) + + x2 = self.conv2(x) + x2 = self.pool(x2) + + return torch.cat([x1, x2], dim=1) + + +class SSnbt(nn.Module): + def __init__(self, in_channels, dilation=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(SSnbt, self).__init__() + inter_channels = in_channels // 2 + self.branch1 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.branch2 = nn.Sequential( + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, 1), bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(1, 0), bias=False), + norm_layer(inter_channels), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (1, 3), padding=(0, dilation), dilation=(1, dilation), + bias=False), + nn.ReLU(True), + nn.Conv2d(inter_channels, inter_channels, (3, 1), padding=(dilation, 0), dilation=(dilation, 1), + bias=False), + norm_layer(inter_channels), + nn.ReLU(True)) + + self.relu = nn.ReLU(True) + + @staticmethod + def channel_shuffle(x, groups): + n, c, h, w = x.size() + + channels_per_group = c // groups + x = x.view(n, groups, channels_per_group, h, w) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(n, -1, h, w) + + return x + + def forward(self, x): + # channels split + x1, x2 = x.split(x.size(1) // 2, 1) + + x1 = self.branch1(x1) + x2 = self.branch2(x2) + + out = torch.cat([x1, x2], dim=1) + out = self.relu(out + x) + out = self.channel_shuffle(out, groups=2) + + return out + + +class APNModule(nn.Module): + def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(APNModule, self).__init__() + self.conv1 = _ConvBNReLU(in_channels, in_channels, 3, 2, 1, norm_layer=norm_layer) + self.conv2 = _ConvBNReLU(in_channels, in_channels, 5, 2, 2, norm_layer=norm_layer) + self.conv3 = _ConvBNReLU(in_channels, in_channels, 7, 2, 3, norm_layer=norm_layer) + self.level1 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level2 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level3 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level4 = _ConvBNReLU(in_channels, nclass, 1, norm_layer=norm_layer) + self.level5 = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + _ConvBNReLU(in_channels, nclass, 1)) + + def forward(self, x): + w, h = x.size()[2:] + branch3 = self.conv1(x) + branch2 = self.conv2(branch3) + branch1 = self.conv3(branch2) + + out = self.level1(branch1) + out = F.interpolate(out, ((w + 3) // 4, (h + 3) // 4), mode='bilinear', align_corners=True) + out = self.level2(branch2) + out + out = F.interpolate(out, ((w + 1) // 2, (h + 1) // 2), mode='bilinear', align_corners=True) + out = self.level3(branch3) + out + out = F.interpolate(out, (w, h), mode='bilinear', align_corners=True) + out = self.level4(x) * out + out = self.level5(x) + out + return out + + +def get_lednet(dataset='citys', backbone='', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = LEDNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('lednet_%s' % (acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_lednet_citys(**kwargs): + return get_lednet('citys', **kwargs) + + +if __name__ == '__main__': + #model = get_lednet_citys() + input = torch.rand(2, 3, 224, 224) + model =LEDNet(4, pretrained_base=True) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/model_store.py b/core/models/model_store.py new file mode 100644 index 0000000..9e64675 --- /dev/null +++ b/core/models/model_store.py @@ -0,0 +1,68 @@ +"""Model store which provides pretrained models.""" +from __future__ import print_function + +import os +import zipfile + +from ..utils.download import download, check_sha1 + +__all__ = ['get_model_file', 'get_resnet_file'] + +_model_sha1 = {name: checksum for checksum, name in [ + ('25c4b50959ef024fcc050213a06b614899f94b3d', 'resnet50'), + ('2a57e44de9c853fa015b172309a1ee7e2d0e4e2a', 'resnet101'), + ('0d43d698c66aceaa2bc0309f55efdd7ff4b143af', 'resnet152'), +]} + +encoding_repo_url = 'https://hangzh.s3.amazonaws.com/' +_url_format = '{repo_url}encoding/models/{file_name}.zip' + + +def short_hash(name): + if name not in _model_sha1: + raise ValueError('Pretrained model for {name} is not available.'.format(name=name)) + return _model_sha1[name][:8] + + +def get_resnet_file(name, root='~/.torch/models'): + file_name = '{name}-{short_hash}'.format(name=name, short_hash=short_hash(name)) + root = os.path.expanduser(root) + + file_path = os.path.join(root, file_name + '.pth') + sha1_hash = _model_sha1[name] + if os.path.exists(file_path): + if check_sha1(file_path, sha1_hash): + return file_path + else: + print('Mismatch in the content of model file {} detected.' + + ' Downloading again.'.format(file_path)) + else: + print('Model file {} is not found. Downloading.'.format(file_path)) + + if not os.path.exists(root): + os.makedirs(root) + + zip_file_path = os.path.join(root, file_name + '.zip') + repo_url = os.environ.get('ENCODING_REPO', encoding_repo_url) + if repo_url[-1] != '/': + repo_url = repo_url + '/' + download(_url_format.format(repo_url=repo_url, file_name=file_name), + path=zip_file_path, + overwrite=True) + with zipfile.ZipFile(zip_file_path) as zf: + zf.extractall(root) + os.remove(zip_file_path) + + if check_sha1(file_path, sha1_hash): + return file_path + else: + raise ValueError('Downloaded file has different hash. Please try again.') + + +def get_model_file(name, root='~/.torch/models'): + root = os.path.expanduser(root) + file_path = os.path.join(root, name + '.pth') + if os.path.exists(file_path): + return file_path + else: + raise ValueError('Model file is not found. Downloading or trainning.') diff --git a/core/models/model_zoo.py b/core/models/model_zoo.py new file mode 100644 index 0000000..5c69f50 --- /dev/null +++ b/core/models/model_zoo.py @@ -0,0 +1,126 @@ +"""Model store which handles pretrained models """ +from .fcn import * +from .fcnv2 import * +from .pspnet import * +from .deeplabv3 import * +from .deeplabv3_plus import * +from .danet import * +from .denseaspp import * +from .bisenet import * +from .encnet import * +from .dunet import * +from .icnet import * +from .enet import * +from .ocnet import * +from .ccnet import * +from .psanet import * +from .cgnet import * +from .espnet import * +from .lednet import * +from .dfanet import * + +__all__ = ['get_model', 'get_model_list', 'get_segmentation_model'] + +_models = { + 'fcn32s_vgg16_voc': get_fcn32s_vgg16_voc, + 'fcn16s_vgg16_voc': get_fcn16s_vgg16_voc, + 'fcn8s_vgg16_voc': get_fcn8s_vgg16_voc, + 'fcn_resnet50_voc': get_fcn_resnet50_voc, + 'fcn_resnet101_voc': get_fcn_resnet101_voc, + 'fcn_resnet152_voc': get_fcn_resnet152_voc, + 'psp_resnet50_voc': get_psp_resnet50_voc, + 'psp_resnet50_ade': get_psp_resnet50_ade, + 'psp_resnet101_voc': get_psp_resnet101_voc, + 'psp_resnet101_ade': get_psp_resnet101_ade, + 'psp_resnet101_citys': get_psp_resnet101_citys, + 'psp_resnet101_coco': get_psp_resnet101_coco, + 'deeplabv3_resnet50_voc': get_deeplabv3_resnet50_voc, + 'deeplabv3_resnet101_voc': get_deeplabv3_resnet101_voc, + 'deeplabv3_resnet152_voc': get_deeplabv3_resnet152_voc, + 'deeplabv3_resnet50_ade': get_deeplabv3_resnet50_ade, + 'deeplabv3_resnet101_ade': get_deeplabv3_resnet101_ade, + 'deeplabv3_resnet152_ade': get_deeplabv3_resnet152_ade, + 'deeplabv3_plus_xception_voc': get_deeplabv3_plus_xception_voc, + 'danet_resnet50_ciyts': get_danet_resnet50_citys, + 'danet_resnet101_citys': get_danet_resnet101_citys, + 'danet_resnet152_citys': get_danet_resnet152_citys, + 'denseaspp_densenet121_citys': get_denseaspp_densenet121_citys, + 'denseaspp_densenet161_citys': get_denseaspp_densenet161_citys, + 'denseaspp_densenet169_citys': get_denseaspp_densenet169_citys, + 'denseaspp_densenet201_citys': get_denseaspp_densenet201_citys, + + 'bisenet_resnet18_citys': get_bisenet_resnet18_citys, # 原始 + + 'encnet_resnet50_ade': get_encnet_resnet50_ade, + 'encnet_resnet101_ade': get_encnet_resnet101_ade, + 'encnet_resnet152_ade': get_encnet_resnet152_ade, + 'dunet_resnet50_pascal_voc': get_dunet_resnet50_pascal_voc, + 'dunet_resnet101_pascal_voc': get_dunet_resnet101_pascal_voc, + 'dunet_resnet152_pascal_voc': get_dunet_resnet152_pascal_voc, + 'icnet_resnet50_citys': get_icnet_resnet50_citys, + 'icnet_resnet101_citys': get_icnet_resnet101_citys, + 'icnet_resnet152_citys': get_icnet_resnet152_citys, + 'enet_citys': get_enet_citys, + 'base_ocnet_resnet101_citys': get_base_ocnet_resnet101_citys, + 'pyramid_ocnet_resnet101_citys': get_pyramid_ocnet_resnet101_citys, + 'asp_ocnet_resnet101_citys': get_asp_ocnet_resnet101_citys, + 'ccnet_resnet50_citys': get_ccnet_resnet50_citys, + 'ccnet_resnet101_citys': get_ccnet_resnet101_citys, + 'ccnet_resnet152_citys': get_ccnet_resnet152_citys, + 'ccnet_resnet50_ade': get_ccnet_resnet50_ade, + 'ccnet_resnet101_ade': get_ccnet_resnet101_ade, + 'ccnet_resnet152_ade': get_ccnet_resnet152_ade, + 'psanet_resnet50_voc': get_psanet_resnet50_voc, + 'psanet_resnet101_voc': get_psanet_resnet101_voc, + 'psanet_resnet152_voc': get_psanet_resnet152_voc, + 'psanet_resnet50_citys': get_psanet_resnet50_citys, + 'psanet_resnet101_citys': get_psanet_resnet101_citys, + 'psanet_resnet152_citys': get_psanet_resnet152_citys, + 'cgnet_citys': get_cgnet_citys, + 'espnet_citys': get_espnet_citys, + 'lednet_citys': get_lednet_citys, + 'dfanet_citys': get_dfanet_citys, +} + + +def get_model(name, **kwargs): + name = name.lower() + if name not in _models: + err_str = '"%s" is not among the following model list:\n\t' % (name) + err_str += '%s' % ('\n\t'.join(sorted(_models.keys()))) + raise ValueError(err_str) + net = _models[name](**kwargs) + return net + + +def get_model_list(): + return _models.keys() + + +def get_segmentation_model(model, **kwargs): + models = { + 'fcn32s': get_fcn32s, + 'fcn16s': get_fcn16s, + 'fcn8s': get_fcn8s, + 'fcn': get_fcn, + 'psp': get_psp, + 'deeplabv3': get_deeplabv3, + 'deeplabv3_plus': get_deeplabv3_plus, + 'danet': get_danet, + 'denseaspp': get_denseaspp, + + 'bisenet': get_bisenet, # 原始 + + 'encnet': get_encnet, + 'dunet': get_dunet, + 'icnet': get_icnet, + 'enet': get_enet, + 'ocnet': get_ocnet, + 'ccnet': get_ccnet, + 'psanet': get_psanet, + 'cgnet': get_cgnet, + 'espnet': get_espnet, + 'lednet': get_lednet, + 'dfanet': get_dfanet, + } + return models[model](**kwargs) diff --git a/core/models/ocnet.py b/core/models/ocnet.py new file mode 100644 index 0000000..1e1e85c --- /dev/null +++ b/core/models/ocnet.py @@ -0,0 +1,361 @@ +""" Object Context Network for Scene Parsing""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['OCNet', 'get_ocnet', 'get_base_ocnet_resnet101_citys', + 'get_pyramid_ocnet_resnet101_citys', 'get_asp_ocnet_resnet101_citys'] + + +class OCNet(SegBaseModel): + r"""OCNet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + Reference: + Yuhui Yuan, Jingdong Wang. "OCNet: Object Context Network for Scene Parsing." + arXiv preprint arXiv:1809.00916 (2018). + """ + + def __init__(self, nclass, backbone='resnet101', oc_arch='base', aux=False, pretrained_base=True, **kwargs): + super(OCNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _OCHead(nclass, oc_arch, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _OCHead(nn.Module): + def __init__(self, nclass, oc_arch, norm_layer=nn.BatchNorm2d, **kwargs): + super(_OCHead, self).__init__() + if oc_arch == 'base': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + BaseOCModule(512, 512, 256, 256, scales=([1]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'pyramid': + self.context = nn.Sequential( + nn.Conv2d(2048, 512, 3, 1, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + PyramidOCModule(512, 512, 256, 512, scales=([1, 2, 3, 6]), norm_layer=norm_layer, **kwargs)) + elif oc_arch == 'asp': + self.context = ASPOCModule(2048, 512, 256, 512, norm_layer=norm_layer, **kwargs) + else: + raise ValueError("Unknown OC architecture!") + + self.out = nn.Conv2d(512, nclass, 1) + + def forward(self, x): + x = self.context(x) + return self.out(x) + + +class BaseAttentionBlock(nn.Module): + """The basic implementation for self-attention block/non-local block.""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(BaseAttentionBlock, self).__init__() + self.scale = scale + self.key_channels = key_channels + self.value_channels = value_channels + if scale > 1: + self.pool = nn.MaxPool2d(scale) + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + if self.scale > 1: + x = self.pool(x) + + value = self.f_value(x).view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query = self.f_query(x).view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key = self.f_key(x).view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query, key) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context = torch.bmm(sim_map, value).permute(0, 2, 1).contiguous() + context = context.view(batch_size, self.value_channels, *x.size()[2:]) + context = self.W(context) + if self.scale > 1: + context = F.interpolate(context, size=(w, h), mode='bilinear', align_corners=True) + + return context + + +class BaseOCModule(nn.Module): + """Base-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, concat=True, **kwargs): + super(BaseOCModule, self).__init__() + self.stages = nn.ModuleList([ + BaseAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + in_channels = in_channels * 2 if concat else in_channels + self.project = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + self.concat = concat + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = priors[0] + for i in range(1, len(priors)): + context += priors[i] + if self.concat: + context = torch.cat([context, x], 1) + out = self.project(context) + return out + + +class PyramidAttentionBlock(nn.Module): + """The basic implementation for pyramid self-attention block/non-local block""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scale=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidAttentionBlock, self).__init__() + self.scale = scale + self.value_channels = value_channels + self.key_channels = key_channels + + self.f_value = nn.Conv2d(in_channels, value_channels, 1) + self.f_key = nn.Sequential( + nn.Conv2d(in_channels, key_channels, 1), + norm_layer(key_channels), + nn.ReLU(True) + ) + self.f_query = self.f_key + self.W = nn.Conv2d(value_channels, out_channels, 1) + nn.init.constant_(self.W.weight, 0) + nn.init.constant_(self.W.bias, 0) + + def forward(self, x): + batch_size, c, w, h = x.size() + + local_x = list() + local_y = list() + step_w, step_h = w // self.scale, h // self.scale + for i in range(self.scale): + for j in range(self.scale): + start_x, start_y = step_w * i, step_h * j + end_x, end_y = min(start_x + step_w, w), min(start_y + step_h, h) + if i == (self.scale - 1): + end_x = w + if j == (self.scale - 1): + end_y = h + local_x += [start_x, end_x] + local_y += [start_y, end_y] + + value = self.f_value(x) + query = self.f_query(x) + key = self.f_key(x) + + local_list = list() + local_block_cnt = (self.scale ** 2) * 2 + for i in range(0, local_block_cnt, 2): + value_local = value[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + query_local = query[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + key_local = key[:, :, local_x[i]:local_x[i + 1], local_y[i]:local_y[i + 1]] + + w_local, h_local = value_local.size(2), value_local.size(3) + value_local = value_local.contiguous().view(batch_size, self.value_channels, -1).permute(0, 2, 1) + query_local = query_local.contiguous().view(batch_size, self.key_channels, -1).permute(0, 2, 1) + key_local = key_local.contiguous().view(batch_size, self.key_channels, -1) + + sim_map = torch.bmm(query_local, key_local) * (self.key_channels ** -.5) + sim_map = F.softmax(sim_map, dim=-1) + + context_local = torch.bmm(sim_map, value_local).permute(0, 2, 1).contiguous() + context_local = context_local.view(batch_size, self.value_channels, w_local, h_local) + local_list.append(context_local) + + context_list = list() + for i in range(0, self.scale): + row_tmp = list() + for j in range(self.scale): + row_tmp.append(local_list[j + i * self.scale]) + context_list.append(torch.cat(row_tmp, 3)) + + context = torch.cat(context_list, 2) + context = self.W(context) + + return context + + +class PyramidOCModule(nn.Module): + """Pyramid-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + scales=([1]), norm_layer=nn.BatchNorm2d, **kwargs): + super(PyramidOCModule, self).__init__() + self.stages = nn.ModuleList([ + PyramidAttentionBlock(in_channels, out_channels, key_channels, value_channels, scale, norm_layer, **kwargs) + for scale in scales]) + self.up_dr = nn.Sequential( + nn.Conv2d(in_channels, in_channels * len(scales), 1), + norm_layer(in_channels * len(scales)), + nn.ReLU(True) + ) + self.project = nn.Sequential( + nn.Conv2d(in_channels * len(scales) * 2, out_channels, 1), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.05) + ) + + def forward(self, x): + priors = [stage(x) for stage in self.stages] + context = [self.up_dr(x)] + for i in range(len(priors)): + context += [priors[i]] + context = torch.cat(context, 1) + out = self.project(context) + return out + + +class ASPOCModule(nn.Module): + """ASP-OC""" + + def __init__(self, in_channels, out_channels, key_channels, value_channels, + atrous_rates=(12, 24, 36), norm_layer=nn.BatchNorm2d, **kwargs): + super(ASPOCModule, self).__init__() + self.context = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=1), + norm_layer(out_channels), + nn.ReLU(True), + BaseOCModule(out_channels, out_channels, key_channels, value_channels, ([2]), norm_layer, False, **kwargs)) + + rate1, rate2, rate3 = tuple(atrous_rates) + self.b1 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate1, dilation=rate1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b2 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate2, dilation=rate2, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b3 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 3, padding=rate3, dilation=rate3, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + self.b4 = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True)) + + self.project = nn.Sequential( + nn.Conv2d(out_channels * 5, out_channels, 1, bias=False), + norm_layer(out_channels), + nn.ReLU(True), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + feat1 = self.context(x) + feat2 = self.b1(x) + feat3 = self.b2(x) + feat4 = self.b3(x) + feat5 = self.b4(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + out = self.project(out) + return out + + +def get_ocnet(dataset='citys', backbone='resnet50', oc_arch='base', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = OCNet(datasets[dataset].NUM_CLASS, backbone=backbone, oc_arch=oc_arch, + pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('%s_ocnet_%s_%s' % ( + oc_arch, backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_base_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'base', **kwargs) + + +def get_pyramid_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'pyramid', **kwargs) + + +def get_asp_ocnet_resnet101_citys(**kwargs): + return get_ocnet('citys', 'resnet101', 'asp', **kwargs) + + +if __name__ == '__main__': + #img = torch.randn(1, 3, 256, 256) + #model = get_asp_ocnet_resnet101_citys() + # outputs = model(img) + input = torch.rand(1, 3, 224,224) + model=OCNet(4,pretrained_base=False) + #target = torch.zeros(4, 512, 512).cuda() + #model.eval() + #print(model) + loss = model(input) + print(loss,loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + flop,params=profile(model,input_size=(1,3,512,512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop/1e9, params/1e6)) \ No newline at end of file diff --git a/core/models/psanet.py b/core/models/psanet.py new file mode 100644 index 0000000..82361f3 --- /dev/null +++ b/core/models/psanet.py @@ -0,0 +1,163 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import _ConvBNReLU +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + # psa_out_channels = crop_size // 8 ** 2 + self.psa = _PointwiseSpatialAttention(2048, 3600, norm_layer) + + self.conv_post = _ConvBNReLU(1024, 2048, 1, norm_layer=norm_layer) + self.project = nn.Sequential( + _ConvBNReLU(4096, 512, 3, padding=1, norm_layer=norm_layer), + nn.Dropout2d(0.1, False), + nn.Conv2d(512, nclass, 1)) + + def forward(self, x): + global_feature = self.psa(x) + out = self.conv_post(global_feature) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _PointwiseSpatialAttention(nn.Module):# + def __init__(self, in_channels, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PointwiseSpatialAttention, self).__init__() + reduced_channels = 512 + self.collect_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + self.distribute_attention = _AttentionGeneration(in_channels, reduced_channels, out_channels, norm_layer) + + def forward(self, x): + collect_fm = self.collect_attention(x) + distribute_fm = self.distribute_attention(x) + psa_fm = torch.cat([collect_fm, distribute_fm], dim=1) + return psa_fm + + +class _AttentionGeneration(nn.Module):#-->Z:(n,C2,H,W),不是原文over-completed的做法。 + def __init__(self, in_channels, reduced_channels, out_channels, norm_layer, **kwargs): + super(_AttentionGeneration, self).__init__() + self.conv_reduce = _ConvBNReLU(in_channels, reduced_channels, 1, norm_layer=norm_layer) + self.attention = nn.Sequential( + _ConvBNReLU(reduced_channels, reduced_channels, 1, norm_layer=norm_layer), + nn.Conv2d(reduced_channels, out_channels, 1, bias=False)) + + self.reduced_channels = reduced_channels + + def forward(self, x): + reduce_x = self.conv_reduce(x) + attention = self.attention(reduce_x) + n, c, h, w = attention.size()#c=out_channels=3600, + attention = attention.view(n, c, -1)#(n,3600,H*W) + reduce_x = reduce_x.view(n, self.reduced_channels, -1)#(n,512,H*W) + print(reduce_x.shape,attention.shape) + fm = torch.bmm(reduce_x, torch.softmax(attention, dim=1)) + fm = fm.view(n, self.reduced_channels, h, w)#(n,512,60,60) + + return fm + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from core.data.dataloader import datasets + model = PSANet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/core/models/psanet_offical.py b/core/models/psanet_offical.py new file mode 100644 index 0000000..54531a3 --- /dev/null +++ b/core/models/psanet_offical.py @@ -0,0 +1,255 @@ +import torch +from torch import nn +import torch.nn.functional as F +import core.lib.psa.functional as PF +import modeling.backbone.resnet_real as models + +#运行失败,compact可以运行,但over-completed运行不了。也是跟psamask的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,导入不了_C模块出错) +# +# from . import functions +# +# +# def psa_mask(input, psa_type=0, mask_H_=None, mask_W_=None): +# return functions.psa_mask(input, psa_type, mask_H_, mask_W_) +# +# +# import torch +# from torch.autograd import Function +# from .. import src + + +# class PSAMask(Function): +# @staticmethod +# def forward(ctx, input, psa_type=0, mask_H_=None, mask_W_=None): +# assert psa_type in [0, 1] # 0-col, 1-dis +# assert (mask_H_ is None and mask_W_ is None) or (mask_H_ is not None and mask_W_ is not None) +# num_, channels_, feature_H_, feature_W_ = input.size() +# if mask_H_ is None and mask_W_ is None: +# mask_H_, mask_W_ = 2 * feature_H_ - 1, 2 * feature_W_ - 1 +# assert (mask_H_ % 2 == 1) and (mask_W_ % 2 == 1) +# assert channels_ == mask_H_ * mask_W_ +# half_mask_H_, half_mask_W_ = (mask_H_ - 1) // 2, (mask_W_ - 1) // 2 +# output = torch.zeros([num_, feature_H_ * feature_W_, feature_H_, feature_W_], dtype=input.dtype, device=input.device) +# if not input.is_cuda: +# src.cpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# output = output.cuda() +# src.gpu.psamask_forward(psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ = psa_type, num_, channels_, feature_H_, feature_W_ +# ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ = mask_H_, mask_W_, half_mask_H_, half_mask_W_ +# return output +# +# @staticmethod +# def backward(ctx, grad_output): +# psa_type, num_, channels_, feature_H_, feature_W_ = ctx.psa_type, ctx.num_, ctx.channels_, ctx.feature_H_, ctx.feature_W_ +# mask_H_, mask_W_, half_mask_H_, half_mask_W_ = ctx.mask_H_, ctx.mask_W_, ctx.half_mask_H_, ctx.half_mask_W_ +# grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) +# if not grad_output.is_cuda: +# src.cpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# else: +# src.gpu.psamask_backward(psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) +# return grad_input, None, None, None + + +# psa_mask = PSAMask.apply + + +class PSA(nn.Module): + def __init__(self, in_channels=2048, mid_channels=512, psa_type=2, compact=False, shrink_factor=2, mask_h=59, + mask_w=59, normalization_factor=1.0, psa_softmax=True): + super(PSA, self).__init__() + assert psa_type in [0, 1, 2] + self.psa_type = psa_type + self.compact = compact + self.shrink_factor = shrink_factor + self.mask_h = mask_h + self.mask_w = mask_w + self.psa_softmax = psa_softmax + if normalization_factor is None: + normalization_factor = mask_h * mask_w + self.normalization_factor = normalization_factor + + self.reduce = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + if psa_type == 2: + self.reduce_p = nn.Sequential( + nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True) + ) + self.attention_p = nn.Sequential( + nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + nn.Conv2d(mid_channels, mask_h*mask_w, kernel_size=1, bias=False), + ) + self.proj = nn.Sequential( + nn.Conv2d(mid_channels * (2 if psa_type == 2 else 1), in_channels, kernel_size=1, bias=False), + nn.BatchNorm2d(in_channels), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + out = x + if self.psa_type in [0, 1]: + x = self.reduce(x) + n, c, h, w = x.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1#可以理解为这样做的目的是向上取整。 + w = (w - 1) // self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + y = self.attention(x) + if self.compact: + if self.psa_type == 1: + y = y.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y = PF.psa_mask(y, self.psa_type, self.mask_h, self.mask_w) + if self.psa_softmax: + y = F.softmax(y, dim=1) + x = torch.bmm(x.view(n, c, h * w), y.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + elif self.psa_type == 2: + x_col = self.reduce(x) + x_dis = self.reduce_p(x) + n, c, h, w = x_col.size() + if self.shrink_factor != 1: + h = (h - 1) // self.shrink_factor + 1 + w = (w - 1) // self.shrink_factor + 1 + x_col = F.interpolate(x_col, size=(h, w), mode='bilinear', align_corners=True) + x_dis = F.interpolate(x_dis, size=(h, w), mode='bilinear', align_corners=True) + y_col = self.attention(x_col) + y_dis = self.attention_p(x_dis) + if self.compact: + y_dis = y_dis.view(n, h * w, h * w).transpose(1, 2).view(n, h * w, h, w) + else: + y_col = PF.psa_mask(y_col, 0, self.mask_h, self.mask_w) + y_dis = PF.psa_mask(y_dis, 1, self.mask_h, self.mask_w) + if self.psa_softmax: + y_col = F.softmax(y_col, dim=1) + y_dis = F.softmax(y_dis, dim=1) + x_col = torch.bmm(x_col.view(n, c, h * w), y_col.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x_dis = torch.bmm(x_dis.view(n, c, h * w), y_dis.view(n, h * w, h * w)).view(n, c, h, w) * (1.0 / self.normalization_factor) + x = torch.cat([x_col, x_dis], 1) + x = self.proj(x) + if self.shrink_factor != 1: + h = (h - 1) * self.shrink_factor + 1 + w = (w - 1) * self.shrink_factor + 1 + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + return torch.cat((out, x), 1) + + +class PSANet(nn.Module): + def __init__(self, layers=50, dropout=0.1, classes=2, zoom_factor=8, use_psa=True, psa_type=2, compact=False, + shrink_factor=2, mask_h=59, mask_w=59, normalization_factor=1.0, psa_softmax=True, + criterion=nn.CrossEntropyLoss(ignore_index=255), pretrained=True): + super(PSANet, self).__init__() + assert layers in [50, 101, 152] + assert classes > 1 + assert zoom_factor in [1, 2, 4, 8] + assert psa_type in [0, 1, 2] + self.zoom_factor = zoom_factor + self.use_psa = use_psa + self.criterion = criterion + + if layers == 50: + resnet = models.resnet50(pretrained=pretrained,deep_base=True) + elif layers == 101: + resnet = models.resnet101(pretrained=pretrained) + else: + resnet = models.resnet152(pretrained=pretrained) + self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) + self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4 + + for n, m in self.layer3.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + for n, m in self.layer4.named_modules(): + if 'conv2' in n: + m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) + elif 'downsample.0' in n: + m.stride = (1, 1) + + fea_dim = 2048 + if use_psa: + self.psa = PSA(fea_dim, 512, psa_type, compact, shrink_factor, mask_h, mask_w, normalization_factor, psa_softmax) + fea_dim *= 2 + self.cls = nn.Sequential( + nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(512), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(512, classes, kernel_size=1) + ) + if self.training: + self.aux = nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True), + nn.Dropout2d(p=dropout), + nn.Conv2d(256, classes, kernel_size=1) + ) + + def forward(self, x, y=None): + x_size = x.size() + assert (x_size[2] - 1) % 8 == 0 and (x_size[3] - 1) % 8 == 0 + h = int((x_size[2] - 1) / 8 * self.zoom_factor + 1) + w = int((x_size[3] - 1) / 8 * self.zoom_factor + 1) + + x = self.layer0(x) + x = self.layer1(x) + x = self.layer2(x) + x_tmp = self.layer3(x) + x = self.layer4(x_tmp) + if self.use_psa: + x = self.psa(x) + x = self.cls(x) + if self.zoom_factor != 1: + x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) + + if self.training: + aux = self.aux(x_tmp) + if self.zoom_factor != 1: + aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True) + main_loss = self.criterion(x, y) + aux_loss = self.criterion(aux, y) + return x.max(1)[1], main_loss, aux_loss + else: + return x + + +if __name__ == '__main__': + import os + os.environ["CUDA_VISIBLE_DEVICES"] = '0' + crop_h = crop_w = 465 + input = torch.rand(4, 3, crop_h, crop_w).cuda() + compact = False + mask_h, mask_w = None, None + shrink_factor = 2 + if compact: + mask_h = (crop_h - 1) // (8 * shrink_factor) + 1 + mask_w = (crop_w - 1) // (8 * shrink_factor) + 1 + else: + assert (mask_h is None and mask_w is None) or (mask_h is not None and mask_w is not None) + if mask_h is None and mask_w is None: + mask_h = 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1 + mask_w = 2 * ((crop_w - 1) // (8 * shrink_factor) + 1) - 1 + else: + assert (mask_h % 2 == 1) and (mask_h >= 3) and (mask_h <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + assert (mask_w % 2 == 1) and (mask_w >= 3) and (mask_w <= 2 * ((crop_h - 1) // (8 * shrink_factor) + 1) - 1) + + model = PSANet(layers=50, dropout=0.1, classes=21, zoom_factor=8, use_psa=True, psa_type=2, compact=compact, + shrink_factor=shrink_factor, mask_h=mask_h, mask_w=mask_w, psa_softmax=True, pretrained=False).cuda() + print(model) + model.eval() + output = model(input) + print('PSANet', output.size()) diff --git a/core/models/psanet_old.py b/core/models/psanet_old.py new file mode 100644 index 0000000..71a6db7 --- /dev/null +++ b/core/models/psanet_old.py @@ -0,0 +1,208 @@ +"""Point-wise Spatial Attention Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.nn import CollectAttention, DistributeAttention +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + + +#运行失败,name '_C' is not defined。也是跟psa_block模块的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,找不到文件出错) + + +__all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc', + 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys', + 'get_psanet_resnet152_citys'] + + +class PSANet(SegBaseModel): + r"""PSANet + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing." + ECCV-2018. + """ + + def __init__(self, nclass, backbone='resnet', aux=False, pretrained_base=False, **kwargs): + super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base, **kwargs) + self.head = _PSAHead(nclass, **kwargs) + if aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = list() + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + return tuple(outputs) + + +class _PSAHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs): + super(_PSAHead, self).__init__() + self.collect = _CollectModule(2048, 512, 60, 60, norm_layer, **kwargs) + self.distribute = _DistributeModule(2048, 512, 60, 60, norm_layer, **kwargs) + + self.conv_post = nn.Sequential( + nn.Conv2d(1024, 2048, 1, bias=False), + norm_layer(2048), + nn.ReLU(True)) + self.project = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512), + nn.ReLU(True), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + global_feature_collect = self.collect(x) + global_feature_distribute = self.distribute(x) + + global_feature = torch.cat([global_feature_collect, global_feature_distribute], dim=1) + out = self.conv_post(global_feature) + out = F.interpolate(out, scale_factor=2, mode='bilinear', align_corners=True) + out = torch.cat([x, out], dim=1) + out = self.project(out) + + return out + + +class _CollectModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_CollectModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.collect_attention = CollectAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + # shrink + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + ca = self.collect_attention(x_adaption) + global_feature_collect_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + ca_i = ca[i].view(ca.shape[1], -1) + global_feature_collect_list.append( + torch.mm(x_shrink_i, ca_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_collect = torch.cat(global_feature_collect_list) + + return global_feature_collect + + +class _DistributeModule(nn.Module): + def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs): + super(_DistributeModule, self).__init__() + self.conv_reduce = nn.Sequential( + nn.Conv2d(in_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True)) + self.conv_adaption = nn.Sequential( + nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False), + norm_layer(reduced_channels), + nn.ReLU(True), + nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False)) + self.distribute_attention = DistributeAttention() + + self.reduced_channels = reduced_channels + self.feat_w = feat_w + self.feat_h = feat_h + + def forward(self, x): + x = self.conv_reduce(x) + x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True) + x_adaption = self.conv_adaption(x_shrink) + da = self.distribute_attention(x_adaption) + global_feature_distribute_list = list() + for i in range(x_shrink.shape[0]): + x_shrink_i = x_shrink[i].view(self.reduced_channels, -1) + da_i = da[i].view(da.shape[1], -1) + global_feature_distribute_list.append( + torch.mm(x_shrink_i, da_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2)) + global_feature_distribute = torch.cat(global_feature_distribute_list) + + return global_feature_distribute + + +def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=False, **kwargs): + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + # from ..data.dataloader import datasets + model = PSANet(4, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + # if pretrained: + # from .model_store import get_model_file + # device = torch.device(kwargs['local_rank']) + # model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root), + # map_location=device)) + return model + + +def get_psanet_resnet50_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet50', **kwargs) + + +def get_psanet_resnet101_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet101', **kwargs) + + +def get_psanet_resnet152_voc(**kwargs): + return get_psanet('pascal_voc', 'resnet152', **kwargs) + + +def get_psanet_resnet50_citys(**kwargs): + return get_psanet('citys', 'resnet50', **kwargs) + + +def get_psanet_resnet101_citys(**kwargs): + return get_psanet('citys', 'resnet101', **kwargs) + + +def get_psanet_resnet152_citys(**kwargs): + return get_psanet('citys', 'resnet152', **kwargs) + + +if __name__ == '__main__': + model = get_psanet_resnet50_voc() + img = torch.randn(1, 3, 480, 480) + output = model(img) diff --git a/core/models/pspnet.py b/core/models/pspnet.py new file mode 100644 index 0000000..6960e57 --- /dev/null +++ b/core/models/pspnet.py @@ -0,0 +1,185 @@ +"""Pyramid Scene Parsing Network""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from core.models.segbase import SegBaseModel +from core.models.fcn import _FCNHead + +__all__ = ['PSPNet', 'get_psp', 'get_psp_resnet50_voc', 'get_psp_resnet50_ade', 'get_psp_resnet101_voc', + 'get_psp_resnet101_ade', 'get_psp_resnet101_citys', 'get_psp_resnet101_coco'] + + +class PSPNet(SegBaseModel): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + nclass : int + Number of categories for the training dataset. + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + norm_layer : object + Normalization layer used in backbone network (default: :class:`nn.BatchNorm`; + for Synchronized Cross-GPU BachNormalization). + aux : bool + Auxiliary loss. + + Reference: + Zhao, Hengshuang, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. + "Pyramid scene parsing network." *CVPR*, 2017 + """ + + def __init__(self, nclass, backbone='resnet50', aux=False, pretrained_base=True, **kwargs): + super(PSPNet, self).__init__(nclass, aux, backbone, pretrained_base=pretrained_base, **kwargs) + self.head = _PSPHead(nclass, **kwargs) + if self.aux: + self.auxlayer = _FCNHead(1024, nclass, **kwargs) + + self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head']) + + def forward(self, x): + size = x.size()[2:] + _, _, c3, c4 = self.base_forward(x) + outputs = [] + x = self.head(c4) + x = F.interpolate(x, size, mode='bilinear', align_corners=True) + outputs.append(x) + + if self.aux: + auxout = self.auxlayer(c3) + auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True) + outputs.append(auxout) + #return tuple(outputs) + return outputs[0] + +def _PSP1x1Conv(in_channels, out_channels, norm_layer, norm_kwargs): + return nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True) + ) + + +class _PyramidPooling(nn.Module): + def __init__(self, in_channels, **kwargs): + super(_PyramidPooling, self).__init__() + out_channels = int(in_channels / 4) + self.avgpool1 = nn.AdaptiveAvgPool2d(1) + self.avgpool2 = nn.AdaptiveAvgPool2d(2) + self.avgpool3 = nn.AdaptiveAvgPool2d(3) + self.avgpool4 = nn.AdaptiveAvgPool2d(6) + self.conv1 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv2 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv3 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + self.conv4 = _PSP1x1Conv(in_channels, out_channels, **kwargs) + + def forward(self, x): + size = x.size()[2:] + feat1 = F.interpolate(self.conv1(self.avgpool1(x)), size, mode='bilinear', align_corners=True) + feat2 = F.interpolate(self.conv2(self.avgpool2(x)), size, mode='bilinear', align_corners=True) + feat3 = F.interpolate(self.conv3(self.avgpool3(x)), size, mode='bilinear', align_corners=True) + feat4 = F.interpolate(self.conv4(self.avgpool4(x)), size, mode='bilinear', align_corners=True) + return torch.cat([x, feat1, feat2, feat3, feat4], dim=1) + + +class _PSPHead(nn.Module): + def __init__(self, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs): + super(_PSPHead, self).__init__() + self.psp = _PyramidPooling(2048, norm_layer=norm_layer, norm_kwargs=norm_kwargs) + self.block = nn.Sequential( + nn.Conv2d(4096, 512, 3, padding=1, bias=False), + norm_layer(512, **({} if norm_kwargs is None else norm_kwargs)), + nn.ReLU(True), + nn.Dropout(0.1), + nn.Conv2d(512, nclass, 1) + ) + + def forward(self, x): + x = self.psp(x) + return self.block(x) + + +def get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models', + pretrained_base=True, **kwargs): + r"""Pyramid Scene Parsing Network + + Parameters + ---------- + dataset : str, default pascal_voc + The dataset that model pretrained on. (pascal_voc, ade20k) + pretrained : bool or str + Boolean value controls whether to load the default pretrained weights for model. + String value represents the hashtag for a certain version of pretrained weights. + root : str, default '~/.torch/models' + Location for keeping the model parameters. + pretrained_base : bool or str, default True + This will load pretrained backbone network, that was trained on ImageNet. + Examples + -------- + >>> model = get_psp(dataset='pascal_voc', backbone='resnet50', pretrained=False) + >>> print(model) + """ + acronyms = { + 'pascal_voc': 'pascal_voc', + 'pascal_aug': 'pascal_aug', + 'ade20k': 'ade', + 'coco': 'coco', + 'citys': 'citys', + } + from ..data.dataloader import datasets + model = PSPNet(datasets[dataset].NUM_CLASS, backbone=backbone, pretrained_base=pretrained_base, **kwargs) + if pretrained: + from .model_store import get_model_file + device = torch.device(kwargs['local_rank']) + model.load_state_dict(torch.load(get_model_file('psp_%s_%s' % (backbone, acronyms[dataset]), root=root), + map_location=device)) + return model + + +def get_psp_resnet50_voc(**kwargs): + return get_psp('pascal_voc', 'resnet50', **kwargs) + + +def get_psp_resnet50_ade(**kwargs): + return get_psp('ade20k', 'resnet50', **kwargs) + + +def get_psp_resnet101_voc(**kwargs): + return get_psp('pascal_voc', 'resnet101', **kwargs) + + +def get_psp_resnet101_ade(**kwargs): + return get_psp('ade20k', 'resnet101', **kwargs) + + +def get_psp_resnet101_citys(**kwargs): + return get_psp('citys', 'resnet101', **kwargs) + + +def get_psp_resnet101_coco(**kwargs): + return get_psp('coco', 'resnet101', **kwargs) + + +if __name__ == '__main__': + # model = get_psp_resnet50_voc() + # img = torch.randn(4, 3, 480, 480) + # output = model(img) + input = torch.rand(2, 3, 512, 512) + model = PSPNet(4, pretrained_base=False) + # target = torch.zeros(4, 512, 512).cuda() + # model.eval() + # print(model) + loss = model(input) + print(loss, loss.shape) + + # from torchsummary import summary + # + # summary(model, (3, 224, 224)) # 打印表格,按顺序输出每层的输出形状和参数 + import torch + from thop import profile + from torchsummary import summary + + flop, params = profile(model, input_size=(1, 3, 512, 512)) + print('flops:{:.3f}G\nparams:{:.3f}M'.format(flop / 1e9, params / 1e6)) \ No newline at end of file diff --git a/core/models/segbase.py b/core/models/segbase.py new file mode 100644 index 0000000..dd06266 --- /dev/null +++ b/core/models/segbase.py @@ -0,0 +1,60 @@ +"""Base Model for Semantic Segmentation""" +import torch.nn as nn + +from ..nn import JPU +from .base_models.resnetv1b import resnet50_v1s, resnet101_v1s, resnet152_v1s + +__all__ = ['SegBaseModel'] + + +class SegBaseModel(nn.Module): + r"""Base Model for Semantic Segmentation + + Parameters + ---------- + backbone : string + Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50', + 'resnet101' or 'resnet152'). + """ + + def __init__(self, nclass, aux, backbone='resnet50', jpu=False, pretrained_base=True, **kwargs): + super(SegBaseModel, self).__init__() + dilated = False if jpu else True + self.aux = aux + self.nclass = nclass + if backbone == 'resnet50': + self.pretrained = resnet50_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet101': + self.pretrained = resnet101_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + elif backbone == 'resnet152': + self.pretrained = resnet152_v1s(pretrained=pretrained_base, dilated=dilated, **kwargs) + else: + raise RuntimeError('unknown backbone: {}'.format(backbone)) + + self.jpu = JPU([512, 1024, 2048], width=512, **kwargs) if jpu else None + + def base_forward(self, x): + """forwarding pre-trained network""" + x = self.pretrained.conv1(x) + x = self.pretrained.bn1(x) + x = self.pretrained.relu(x) + x = self.pretrained.maxpool(x) + c1 = self.pretrained.layer1(x) + c2 = self.pretrained.layer2(c1) + c3 = self.pretrained.layer3(c2) + c4 = self.pretrained.layer4(c3) + + if self.jpu: + return self.jpu(c1, c2, c3, c4) + else: + return c1, c2, c3, c4 #返回的是layer1,2,3,4的输出 + + def evaluate(self, x): + """evaluating network with inputs and targets""" + return self.forward(x)[0] + + def demo(self, x): + pred = self.forward(x) + if self.aux: + pred = pred[0] + return pred diff --git a/core/nn/__init__.py b/core/nn/__init__.py new file mode 100644 index 0000000..218bee9 --- /dev/null +++ b/core/nn/__init__.py @@ -0,0 +1,7 @@ +"""Seg NN Modules""" +#from .sync_bn.syncbn import * +#from .syncbn import * +from .ca_block import * +from .psa_block import * +from .jpu import * +from .basic import * \ No newline at end of file diff --git a/core/nn/__pycache__/__init__.cpython-37.pyc b/core/nn/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..606933d Binary files /dev/null and b/core/nn/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/nn/__pycache__/__init__.cpython-38.pyc b/core/nn/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..b31c615 Binary files /dev/null and b/core/nn/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/nn/__pycache__/__init__.cpython-39.pyc b/core/nn/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000..0c42b60 Binary files /dev/null and b/core/nn/__pycache__/__init__.cpython-39.pyc differ diff --git a/core/nn/__pycache__/basic.cpython-37.pyc b/core/nn/__pycache__/basic.cpython-37.pyc new file mode 100644 index 0000000..f938be9 Binary files /dev/null and b/core/nn/__pycache__/basic.cpython-37.pyc differ diff --git a/core/nn/__pycache__/basic.cpython-38.pyc b/core/nn/__pycache__/basic.cpython-38.pyc new file mode 100644 index 0000000..f853a92 Binary files /dev/null and b/core/nn/__pycache__/basic.cpython-38.pyc differ diff --git a/core/nn/__pycache__/ca_block.cpython-37.pyc b/core/nn/__pycache__/ca_block.cpython-37.pyc new file mode 100644 index 0000000..6827fb3 Binary files /dev/null and b/core/nn/__pycache__/ca_block.cpython-37.pyc differ diff --git a/core/nn/__pycache__/ca_block.cpython-38.pyc b/core/nn/__pycache__/ca_block.cpython-38.pyc new file mode 100644 index 0000000..47523f6 Binary files /dev/null and b/core/nn/__pycache__/ca_block.cpython-38.pyc differ diff --git a/core/nn/__pycache__/ca_block.cpython-39.pyc b/core/nn/__pycache__/ca_block.cpython-39.pyc new file mode 100644 index 0000000..b0e9a44 Binary files /dev/null and b/core/nn/__pycache__/ca_block.cpython-39.pyc differ diff --git a/core/nn/__pycache__/jpu.cpython-37.pyc b/core/nn/__pycache__/jpu.cpython-37.pyc new file mode 100644 index 0000000..a4f3fee Binary files /dev/null and b/core/nn/__pycache__/jpu.cpython-37.pyc differ diff --git a/core/nn/__pycache__/jpu.cpython-38.pyc b/core/nn/__pycache__/jpu.cpython-38.pyc new file mode 100644 index 0000000..dda8f98 Binary files /dev/null and b/core/nn/__pycache__/jpu.cpython-38.pyc differ diff --git a/core/nn/__pycache__/psa_block.cpython-37.pyc b/core/nn/__pycache__/psa_block.cpython-37.pyc new file mode 100644 index 0000000..01de387 Binary files /dev/null and b/core/nn/__pycache__/psa_block.cpython-37.pyc differ diff --git a/core/nn/__pycache__/psa_block.cpython-38.pyc b/core/nn/__pycache__/psa_block.cpython-38.pyc new file mode 100644 index 0000000..69dcf84 Binary files /dev/null and b/core/nn/__pycache__/psa_block.cpython-38.pyc differ diff --git a/core/nn/basic.py b/core/nn/basic.py new file mode 100644 index 0000000..3b5a186 --- /dev/null +++ b/core/nn/basic.py @@ -0,0 +1,134 @@ +"""Basic Module for Semantic Segmentation""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['_ConvBNPReLU', '_ConvBN', '_BNPReLU', '_ConvBNReLU', '_DepthwiseConv', 'InvertedResidual'] + + +class _ConvBNReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.relu = nn.ReLU6(False) if relu6 else nn.ReLU(False) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + +class _ConvBNPReLU(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBNPReLU, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.prelu(x) + return x + + +class _ConvBN(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, + dilation=1, groups=1, norm_layer=nn.BatchNorm2d, **kwargs): + super(_ConvBN, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False) + self.bn = norm_layer(out_channels) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + +class _BNPReLU(nn.Module): + def __init__(self, out_channels, norm_layer=nn.BatchNorm2d, **kwargs): + super(_BNPReLU, self).__init__() + self.bn = norm_layer(out_channels) + self.prelu = nn.PReLU(out_channels) + + def forward(self, x): + x = self.bn(x) + x = self.prelu(x) + return x + + +# ----------------------------------------------------------------- +# For PSPNet +# ----------------------------------------------------------------- +class _PSPModule(nn.Module): + def __init__(self, in_channels, sizes=(1, 2, 3, 6), **kwargs): + super(_PSPModule, self).__init__() + out_channels = int(in_channels / 4) + self.avgpools = nn.ModuleList() + self.convs = nn.ModuleList() + for size in sizes: + self.avgpool.append(nn.AdaptiveAvgPool2d(size)) + self.convs.append(_ConvBNReLU(in_channels, out_channels, 1, **kwargs)) + + def forward(self, x): + size = x.size()[2:] + feats = [x] + for (avgpool, conv) in enumerate(zip(self.avgpools, self.convs)): + feats.append(F.interpolate(conv(avgpool(x)), size, mode='bilinear', align_corners=True)) + return torch.cat(feats, dim=1) + + +# ----------------------------------------------------------------- +# For MobileNet +# ----------------------------------------------------------------- +class _DepthwiseConv(nn.Module): + """conv_dw in MobileNet""" + + def __init__(self, in_channels, out_channels, stride, norm_layer=nn.BatchNorm2d, **kwargs): + super(_DepthwiseConv, self).__init__() + self.conv = nn.Sequential( + _ConvBNReLU(in_channels, in_channels, 3, stride, 1, groups=in_channels, norm_layer=norm_layer), + _ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer)) + + def forward(self, x): + return self.conv(x) + + +# ----------------------------------------------------------------- +# For MobileNetV2 +# ----------------------------------------------------------------- +class InvertedResidual(nn.Module): + def __init__(self, in_channels, out_channels, stride, expand_ratio, norm_layer=nn.BatchNorm2d, **kwargs): + super(InvertedResidual, self).__init__() + assert stride in [1, 2] + self.use_res_connect = stride == 1 and in_channels == out_channels + + layers = list() + inter_channels = int(round(in_channels * expand_ratio)) + if expand_ratio != 1: + # pw + layers.append(_ConvBNReLU(in_channels, inter_channels, 1, relu6=True, norm_layer=norm_layer)) + layers.extend([ + # dw + _ConvBNReLU(inter_channels, inter_channels, 3, stride, 1, + groups=inter_channels, relu6=True, norm_layer=norm_layer), + # pw-linear + nn.Conv2d(inter_channels, out_channels, 1, bias=False), + norm_layer(out_channels)]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +if __name__ == '__main__': + x = torch.randn(1, 32, 64, 64) + model = InvertedResidual(32, 64, 2, 1) + out = model(x) diff --git a/core/nn/ca_block.py b/core/nn/ca_block.py new file mode 100644 index 0000000..954c293 --- /dev/null +++ b/core/nn/ca_block.py @@ -0,0 +1,72 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CrissCrossAttention', 'ca_weight', 'ca_map'] + + +class _CAWeight(torch.autograd.Function): + @staticmethod + def forward(ctx, t, f): + weight = _C.ca_forward(t, f) + + ctx.save_for_backward(t, f) + + return weight + + @staticmethod + @once_differentiable + def backward(ctx, dw): + t, f = ctx.saved_tensors + + dt, df = _C.ca_backward(dw, t, f) + return dt, df + + +class _CAMap(torch.autograd.Function): + @staticmethod + def forward(ctx, weight, g): + out = _C.ca_map_forward(weight, g) + + ctx.save_for_backward(weight, g) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + weight, g = ctx.saved_tensors + + dw, dg = _C.ca_map_backward(dout, weight, g) + + return dw, dg + + +ca_weight = _CAWeight.apply +ca_map = _CAMap.apply + + +class CrissCrossAttention(nn.Module): + """Criss-Cross Attention Module""" + + def __init__(self, in_channels): + super(CrissCrossAttention, self).__init__() + self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1) + self.value_conv = nn.Conv2d(in_channels, in_channels, 1) + self.gamma = nn.Parameter(torch.zeros(1)) + + def forward(self, x): + proj_query = self.query_conv(x) + proj_key = self.key_conv(x) + proj_value = self.value_conv(x) + + energy = ca_weight(proj_query, proj_key) + attention = F.softmax(energy, 1) + out = ca_map(attention, proj_value) + out = self.gamma * out + x + + return out diff --git a/core/nn/csrc/ca.h b/core/nn/csrc/ca.h new file mode 100644 index 0000000..1a93b36 --- /dev/null +++ b/core/nn/csrc/ca.h @@ -0,0 +1,58 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor ca_forward(const at::Tensor& t, + const at::Tensor& f) { + if (t.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_forward_cuda(t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_forward_cpu(t, f); +} + +std::tuple ca_backward(const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + if (dw.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_backward_cuda(dw, t, f); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_backward_cpu(dw, t, f); +} + +at::Tensor ca_map_forward(const at::Tensor& weight, + const at::Tensor& g) { + if (weight.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_forward_cuda(weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_forward_cpu(weight, g); +} + +std::tuple ca_map_backward(const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + if (dout.type().is_cuda()) { + #ifdef WITH_CUDA + return ca_map_backward_cuda(dout, weight, g); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return ca_map_backward_cpu(dout, weight, g); +} \ No newline at end of file diff --git a/core/nn/csrc/cpu/ca_cpu.cpp b/core/nn/csrc/cpu/ca_cpu.cpp new file mode 100644 index 0000000..6029c51 --- /dev/null +++ b/core/nn/csrc/cpu/ca_cpu.cpp @@ -0,0 +1,24 @@ +#include "cpu/vision.h" + + +at::Tensor ca_forward_cpu( + const torch::Tensor& t, + const torch::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/core/nn/csrc/cpu/psa_cpu.cpp b/core/nn/csrc/cpu/psa_cpu.cpp new file mode 100644 index 0000000..9e0e765 --- /dev/null +++ b/core/nn/csrc/cpu/psa_cpu.cpp @@ -0,0 +1,13 @@ +#include "cpu/vision.h" + + +at::Tensor psa_forward_cpu( + const torch::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + AT_ERROR("Not implemented on the CPU");} \ No newline at end of file diff --git a/core/nn/csrc/cpu/syncbn_cpu.cpp b/core/nn/csrc/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..70b5db4 --- /dev/null +++ b/core/nn/csrc/cpu/syncbn_cpu.cpp @@ -0,0 +1,45 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output = (input_ - broadcast_to(ex_, input_)) / broadcast_to(exs_, input_); + output = output * broadcast_to(gamma_, input_) + broadcast_to(beta_, input_); + return output; +} + +// Not implementing CPU backward for now +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input_); + at::Tensor gradgamma = at::zeros_like(gamma_); + at::Tensor gradbeta = at::zeros_like(beta_); + at::Tensor gradMean = at::zeros_like(ex_); + at::Tensor gradStd = at::zeros_like(exs_); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} \ No newline at end of file diff --git a/core/nn/csrc/cpu/vision.h b/core/nn/csrc/cpu/vision.h new file mode 100644 index 0000000..8a824fe --- /dev/null +++ b/core/nn/csrc/cpu/vision.h @@ -0,0 +1,47 @@ +#pragma once +#include + + +at::Tensor ca_forward_cpu( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cpu( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cpu( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cpu( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cpu( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cpu( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cpu( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cpu( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/core/nn/csrc/cuda/ca_cuda.cu b/core/nn/csrc/cuda/ca_cuda.cu new file mode 100644 index 0000000..ba459fa --- /dev/null +++ b/core/nn/csrc/cuda/ca_cuda.cu @@ -0,0 +1,324 @@ +#include +#include + +#include +#include +#include + +template +__global__ void ca_forward_kernel(const T *t, const T *f, T *weight, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _t = t[(batch * chn + plane) * sp + y * width + x]; + + if (z < width) { + int i = z; + T _f = f[(batch * chn + plane) * sp + y * width + i]; + weight[(batch * len + i) * sp + y*width + x] += _t*_f; + } + else { + int i = z - width; + int j = i +__global__ void ca_backward_kernel_t(const T *dw, const T *t, const T *f, T *dt, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + i) * sp + y*width + x]; + T _f = f[(batch * chn + plane) * sp + y*width + i]; + dt[(batch * chn + plane) * sp + y*width + x] += _dw * _f; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i +__global__ void ca_backward_kernel_f(const T *dw, const T *t, const T *f, T *df, + int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dw = dw[(batch * len + x) * sp + y*width + i]; + T _t = t[(batch * chn + plane) * sp + y*width + i]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dw = dw[(batch * len + width + j) * sp + i*width + x]; + T _t = t[(batch * chn + plane) * sp + i*width + x]; + df[(batch * chn + plane) * sp + y*width + x] += _dw * _t; + } + } + } +} + +template +__global__ void ca_map_forward_kernel(const T *weight, const T *g, T *out, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _g = g[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + i) * sp + y*width + x]; + out[(batch * chn + plane) * sp + y*width + x] += _g * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + + int j = i +__global__ void ca_map_backward_kernel_w(const T *dout, const T *weight, const T *g, T *dw, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int z = blockIdx.z; + + if (x < width && y < height && z < height+width-1) { + for (int batch = 0; batch < num; ++batch) { + for (int plane = 0; plane < chn; ++plane) { + T _dout = dout[(batch * chn + plane) * sp + y*width + x]; + + if (z < width) { + int i = z; + T _g = g[(batch * chn + plane) * sp + y*width + i]; + dw[(batch * len + i) * sp + y*width + x] += _dout * _g; + } + else { + int i = z - width; + int j = i +__global__ void ca_map_backward_kernel_g(const T *dout, const T *weight, const T *g, T *dg, int num, int chn, int height, int width) { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int sp = height * width; + int len = height + width - 1; + int plane = blockIdx.z; + + if (x < width && y < height && plane < chn) { + for (int batch = 0; batch < num; ++batch) { + for (int i = 0; i < width; ++i) { + T _dout = dout[(batch * chn + plane) * sp + y*width + i]; + T _w = weight[(batch * len + x) * sp + y*width + i]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + for (int i = 0; i < height; ++i) { + if (i == y) continue; + int j = i>y ? y : y-1; + + T _dout = dout[(batch * chn + plane) * sp + i*width + x]; + T _w = weight[(batch * len + width + j) * sp + i*width + x]; + dg[(batch * chn + plane) * sp + y*width + x] += _dout * _w; + } + } + } +} + +/* + * Implementations + */ +at::Tensor ca_forward_cuda(const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor weight = at::zeros({n, h + w - 1, h, w}, t.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_forward", [&] { + ca_forward_kernel<<>>( + t.contiguous().data(), + f.contiguous().data(), + weight.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return weight; +} + +std::tuple ca_backward_cuda(const at::Tensor& dw, const at::Tensor& t, const at::Tensor& f) { + AT_ASSERTM(dw.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(t.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(f.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = t.size(0); + auto c = t.size(1); + auto h = t.size(2); + auto w = t.size(3); + + at::Tensor dt = at::zeros_like(t); + at::Tensor df = at::zeros_like(f); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(t.type(), "ca_backward_kernel_t", [&] { + ca_backward_kernel_t<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + dt.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(f.type(), "ca_backward_kernel_f", [&] { + ca_backward_kernel_f<<>> ( + dw.contiguous().data(), + t.contiguous().data(), + f.contiguous().data(), + df.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dt, df); +} + +at::Tensor ca_map_forward_cuda(const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = g.size(0); + auto c = g.size(1); + auto h = g.size(2); + auto w = g.size(3); + + at::Tensor out = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = c; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_forward", [&] { + ca_map_forward_kernel<<>>( + weight.contiguous().data(), + g.contiguous().data(), + out.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return out; +} + +std::tuple ca_map_backward_cuda(const at::Tensor& dout, const at::Tensor& weight, const at::Tensor& g) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(weight.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(g.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = dout.size(0); + auto c = dout.size(1); + auto h = dout.size(2); + auto w = dout.size(3); + + at::Tensor dw = at::zeros_like(weight); + at::Tensor dg = at::zeros_like(g); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Run kernel + dim3 threads(32, 32); + int d1 = (w + threads.x - 1) / threads.x; + int d2 = (h + threads.y - 1) / threads.y; + int d3 = h + w; + dim3 blocks(d1, d2, d3); + + AT_DISPATCH_FLOATING_TYPES(weight.type(), "ca_map_backward_kernel_w", [&] { + ca_map_backward_kernel_w<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dw.contiguous().data(), + n, c, h, w); + }); + + AT_DISPATCH_FLOATING_TYPES(g.type(), "ca_map_backward_kernel_g", [&] { + ca_map_backward_kernel_g<<>> ( + dout.contiguous().data(), + weight.contiguous().data(), + g.contiguous().data(), + dg.contiguous().data(), + n, c, h, w); + }); + THCudaCheck(cudaGetLastError()); + return std::make_tuple(dw, dg); +} \ No newline at end of file diff --git a/core/nn/csrc/cuda/helper.h b/core/nn/csrc/cuda/helper.h new file mode 100644 index 0000000..cc5ea88 --- /dev/null +++ b/core/nn/csrc/cuda/helper.h @@ -0,0 +1,334 @@ +#include +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/core/nn/csrc/cuda/psa_cuda.cu b/core/nn/csrc/cuda/psa_cuda.cu new file mode 100644 index 0000000..c47c98a --- /dev/null +++ b/core/nn/csrc/cuda/psa_cuda.cu @@ -0,0 +1,214 @@ +#include +#include + +#include +#include +#include + +#define PSA_TYPE_COLLECT 1 +#define PSA_TYPE_DISTRIBUTE 2 + +const int CUDA_NUM_THREADS = 512; + +inline int GET_BLOCKS(const int N) { + return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; +} + +template +__global__ void psa_collect_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_forward_kernel(const T *hc, T *out, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + out[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)] = + hc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w]; + } + } + } +} + +template +__global__ void psa_collect_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((h * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)) * height * width + h * width + w]; + } + } + } +} + +template +__global__ void psa_distribute_backward_kernel(const T *dout, T *dhc, int num, int height, int width) { + const int out_h = 2 * height - 1; + const int out_w = 2 * width - 1; + const int half_out_h = (out_h - 1) / 2; + const int half_out_w = (out_w - 1) / 2; + + int x = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = num * height * width; + + for (int i = x; i < nthreads; i += blockDim.x * gridDim.x) { + const int w = i % width; + const int h = (i / width) % height; + const int n = i / width / height; + + // effective mask region : [hstart, hend) x [wstart, wend) with out-indexed + const int hstart = max(0, half_out_h - h); + const int hend = min(out_h, height + half_out_h - h); + const int wstart = max(0, half_out_w - w); + const int wend = min(out_w, width + half_out_w - w); + + // (hidx, widx) with out-indexed + // (hidx + h - half_mask_H_, widx + w - half_mask_W_) with feature-indexed + for (int hidx = hstart; hidx < hend; hidx++) { + for (int widx = wstart; widx < wend; widx++) { + dhc[((n * out_h * out_w + hidx * out_w + widx) * height + h) * width + w] = + dout[(n * height * width + h * width + w) * height * width + (hidx + h - half_out_h) * width + (widx + w - half_out_w)]; + } + } + } +} + +at::Tensor psa_forward_cuda(const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor out = at::zeros({n, h * w, h * w}, hc.options()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_collect_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_forward", [&] { + psa_distribute_forward_kernel<<>>( + hc.contiguous().data(), + out.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return out; +} + +at::Tensor psa_backward_cuda(const at::Tensor& dout, const at::Tensor& hc, const int forward_type) { + AT_ASSERTM(dout.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(hc.type().is_cuda(), "input must be a CUDA tensor"); + + auto n = hc.size(0); + auto c = hc.size(1); + auto h = hc.size(2); + auto w = hc.size(3); + + at::Tensor dhc = at::zeros_like(hc); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + int nthreads = n * h * w; + + switch (forward_type) { + case PSA_TYPE_COLLECT: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_collect_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + case PSA_TYPE_DISTRIBUTE: + AT_DISPATCH_FLOATING_TYPES(hc.type(), "psa_backward", [&] { + psa_distribute_backward_kernel<<>>( + dout.contiguous().data(), + dhc.contiguous().data(), + n, h, w); + }); + break; + } + THCudaCheck(cudaGetLastError()); + return dhc; +} \ No newline at end of file diff --git a/core/nn/csrc/cuda/syncbn_cuda.cu b/core/nn/csrc/cuda/syncbn_cuda.cu new file mode 100644 index 0000000..dcaed67 --- /dev/null +++ b/core/nn/csrc/cuda/syncbn_cuda.cu @@ -0,0 +1,488 @@ +#include +// #include +#include +#include +#include + +#include +#include + +#include "helper.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void batchnorm_forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void inp_batchnorm_forward_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void expectation_forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void inp_batchnorm_backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void inp_expectation_backward_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + batchnorm_forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "inp_batchnorm_forward_cuda", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_batchnorm_forward_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + batchnorm_backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_batchnorm_backward_cuda", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + inp_batchnorm_backward_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector expectation_forward_cuda( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_forward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + expectation_forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "inp_expectation_backward_cuda", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + inp_expectation_backward_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/core/nn/csrc/cuda/vision.h b/core/nn/csrc/cuda/vision.h new file mode 100644 index 0000000..6696840 --- /dev/null +++ b/core/nn/csrc/cuda/vision.h @@ -0,0 +1,84 @@ +#pragma once +#include +#include + + +at::Tensor ca_forward_cuda( + const at::Tensor& t, + const at::Tensor& f); + +std::tuple ca_backward_cuda( + const at::Tensor& dw, + const at::Tensor& t, + const at::Tensor& f); + +at::Tensor ca_map_forward_cuda( + const at::Tensor& weight, + const at::Tensor& g); + +std::tuple ca_map_backward_cuda( + const at::Tensor& dout, + const at::Tensor& weight, + const at::Tensor& g); + +at::Tensor psa_forward_cuda( + const at::Tensor& hc, + const int forward_type); + +at::Tensor psa_backward_cuda( + const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type); + +at::Tensor batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor inp_batchnorm_forward_cuda( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector inp_batchnorm_backward_cuda( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector expectation_forward_cuda( + const at::Tensor input_); + +at::Tensor expectation_backward_cuda( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor inp_expectation_backward_cuda( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); \ No newline at end of file diff --git a/core/nn/csrc/psa.h b/core/nn/csrc/psa.h new file mode 100644 index 0000000..1702581 --- /dev/null +++ b/core/nn/csrc/psa.h @@ -0,0 +1,33 @@ +#pragma once + +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor psa_forward(const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_forward_cuda(hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_forward_cpu(hc, forward_type); +} + +at::Tensor psa_backward(const at::Tensor& dout, + const at::Tensor& hc, + const int forward_type) { + if (hc.type().is_cuda()) { + #ifdef WITH_CUDA + return psa_backward_cuda(dout, hc, forward_type); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return psa_backward_cpu(dout, hc, forward_type); +} \ No newline at end of file diff --git a/core/nn/csrc/syncbn.h b/core/nn/csrc/syncbn.h new file mode 100644 index 0000000..fbcf695 --- /dev/null +++ b/core/nn/csrc/syncbn.h @@ -0,0 +1,118 @@ +#pragma once + +#include +#include "cpu/vision.h" + +#ifdef WITH_CUDA +#include "cuda/vision.h" +#endif + +// Interface for Python +at::Tensor batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_forward_cpu(input_, ex_, exs_, gamma_, beta_, eps); +} + +at::Tensor inp_batchnorm_forward(const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_forward_cuda(input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + return batchnorm_backward_cpu(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); +} + +std::vector inp_batchnorm_backward(const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (gradoutput_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_batchnorm_backward_cuda(gradoutput_, input_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector expectation_forward(const at::Tensor input_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_forward_cuda(input_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor expectation_backward(const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + if (input_.type().is_cuda()) { + #ifdef WITH_CUDA + return expectation_backward_cuda(input_, gradEx_, gradExs_); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} + +at::Tensor inp_expectation_backward(const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + if (output_.type().is_cuda()) { + #ifdef WITH_CUDA + return inp_expectation_backward_cuda(gradInput_, output_, gradEx_, gradExs_, ex_, exs_, gamma_, beta_, eps); + #else + AT_ERROR("Not compiled with GPU support"); + #endif + } + AT_ERROR("Not implemented on the CPU"); +} \ No newline at end of file diff --git a/core/nn/csrc/vision.cpp b/core/nn/csrc/vision.cpp new file mode 100644 index 0000000..c369176 --- /dev/null +++ b/core/nn/csrc/vision.cpp @@ -0,0 +1,19 @@ +#include "ca.h" +#include "psa.h" +#include "syncbn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ca_forward", &ca_forward, "ca_forward"); + m.def("ca_backward", &ca_backward, "ca_backward"); + m.def("ca_map_forward", &ca_map_forward, "ca_map_forward"); + m.def("ca_map_backward", &ca_map_backward, "ca_map_backward"); + m.def("psa_forward", &psa_forward, "psa_forward"); + m.def("psa_backward", &psa_backward, "psa_backward"); + m.def("batchnorm_forward", &batchnorm_forward, "batchnorm_forward"); + m.def("inp_batchnorm_forward", &inp_batchnorm_forward, "inp_batchnorm_forward"); + m.def("batchnorm_backward", &batchnorm_backward, "batchnorm_backward"); + m.def("inp_batchnorm_backward", &inp_batchnorm_backward, "inp_batchnorm_backward"); + m.def("expectation_forward", &expectation_forward, "expectation_forward"); + m.def("expectation_backward", &expectation_backward, "expectation_backward"); + m.def("inp_expectation_backward", &inp_expectation_backward, "inp_expectation_backward"); +} \ No newline at end of file diff --git a/core/nn/jpu.py b/core/nn/jpu.py new file mode 100644 index 0000000..db23bab --- /dev/null +++ b/core/nn/jpu.py @@ -0,0 +1,68 @@ +"""Joint Pyramid Upsampling""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +__all__ = ['JPU'] + + +class SeparableConv2d(nn.Module): + def __init__(self, inplanes, planes, kernel_size=3, stride=1, padding=1, + dilation=1, bias=False, norm_layer=nn.BatchNorm2d): + super(SeparableConv2d, self).__init__() + self.conv = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias) + self.bn = norm_layer(inplanes) + self.pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.pointwise(x) + return x + + +# copy from: https://github.com/wuhuikai/FastFCN/blob/master/encoding/nn/customize.py +class JPU(nn.Module): + def __init__(self, in_channels, width=512, norm_layer=nn.BatchNorm2d, **kwargs): + super(JPU, self).__init__() + + self.conv5 = nn.Sequential( + nn.Conv2d(in_channels[-1], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv4 = nn.Sequential( + nn.Conv2d(in_channels[-2], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.conv3 = nn.Sequential( + nn.Conv2d(in_channels[-3], width, 3, padding=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + + self.dilation1 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=1, dilation=1, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation2 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=2, dilation=2, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation3 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=4, dilation=4, bias=False), + norm_layer(width), + nn.ReLU(True)) + self.dilation4 = nn.Sequential( + SeparableConv2d(3 * width, width, 3, padding=8, dilation=8, bias=False), + norm_layer(width), + nn.ReLU(True)) + + def forward(self, *inputs): + feats = [self.conv5(inputs[-1]), self.conv4(inputs[-2]), self.conv3(inputs[-3])] + size = feats[-1].size()[2:] + feats[-2] = F.interpolate(feats[-2], size, mode='bilinear', align_corners=True) + feats[-3] = F.interpolate(feats[-3], size, mode='bilinear', align_corners=True) + feat = torch.cat(feats, dim=1) + feat = torch.cat([self.dilation1(feat), self.dilation2(feat), self.dilation3(feat), self.dilation4(feat)], + dim=1) + + return inputs[0], inputs[1], inputs[2], feat diff --git a/core/nn/psa_block.py b/core/nn/psa_block.py new file mode 100644 index 0000000..c8ff11b --- /dev/null +++ b/core/nn/psa_block.py @@ -0,0 +1,71 @@ +import torch +import torch.nn as nn + +from torch.autograd.function import once_differentiable +#from core.nn import _C + +__all__ = ['CollectAttention', 'DistributeAttention', 'psa_collect', 'psa_distribute'] + + +class _PSACollect(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 1) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 1) + + return dhc + + +class _PSADistribute(torch.autograd.Function): + @staticmethod + def forward(ctx, hc): + out = _C.psa_forward(hc, 2) + + ctx.save_for_backward(hc) + + return out + + @staticmethod + @once_differentiable + def backward(ctx, dout): + hc = ctx.saved_tensors + + dhc = _C.psa_backward(dout, hc[0], 2) + + return dhc + + +psa_collect = _PSACollect.apply +psa_distribute = _PSADistribute.apply + + +class CollectAttention(nn.Module): + """Collect Attention Generation Module""" + + def __init__(self): + super(CollectAttention, self).__init__() + + def forward(self, x): + out = psa_collect(x) + return out + + +class DistributeAttention(nn.Module): + """Distribute Attention Generation Module""" + + def __init__(self): + super(DistributeAttention, self).__init__() + + def forward(self, x): + out = psa_distribute(x) + return out diff --git a/core/nn/setup.py b/core/nn/setup.py new file mode 100644 index 0000000..ec800c4 --- /dev/null +++ b/core/nn/setup.py @@ -0,0 +1,56 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# !/usr/bin/env python +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/90c226cf10e098263d1df28bda054a5f22513b4f/setup.py + +import os +import glob +import torch + +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME + +requirements = ["torch"] + + +def get_extension(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, "csrc") + + main_file = glob.glob(os.path.join(extensions_dir, "*.cpp")) + source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp")) + source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) + + sources = main_file + source_cpu + extension = CppExtension + + define_macros = [] + + if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": + extension = CUDAExtension + sources += source_cuda + define_macros += [("WITH_CUDA", None)] + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + "._C", + sources, + include_dirs=include_dirs, + define_macros=define_macros, + ) + ] + + return ext_modules + + +setup( + name="semantic_segmentation", + version="0.1", + author="tramac", + description="semantic segmentation in pytorch", + ext_modules=get_extension(), + cmdclass={"build_ext": BuildExtension} +) \ No newline at end of file diff --git a/core/nn/sync_bn/__init__.py b/core/nn/sync_bn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/nn/sync_bn/functions.py b/core/nn/sync_bn/functions.py new file mode 100644 index 0000000..b0102e6 --- /dev/null +++ b/core/nn/sync_bn/functions.py @@ -0,0 +1,285 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## Email: zhanghang0704@gmail.com +## Copyright (c) 2018 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization functions""" +import torch.cuda.comm as comm + +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from core.nn.sync_bn import lib + +__all__ = ['syncbatchnorm', 'inp_syncbatchnorm'] + + +class syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + if x.is_cuda: + y = lib.gpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + y = lib.cpu.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if x.is_cuda: + dx_ = lib.gpu.expectation_backward(x, _dex, _dexs) + else: + raise NotImplemented + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +def _act_forward(ctx, x): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_forward(x, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +def _act_backward(ctx, x, dx): + if ctx.activation.lower() == "leaky_relu": + if x.is_cuda: + lib.gpu.leaky_relu_backward(x, dx, ctx.slope) + else: + raise NotImplemented + else: + assert ctx.activation == 'none' + + +class inp_syncbatchnorm_(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-5, + activation='none', slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + if x.is_cuda: + _ex, _exs = lib.gpu.expectation_forward(x) + else: + raise NotImplemented + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsuqeeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + ctx.mark_dirty(x) + + # BN forward + activation + if x.is_cuda: + lib.gpu.batchnorm_inp_forward(x, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + _act_forward(ctx, x) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + # BN backward + if dz.is_cuda: + dx, _dex, _dexs, dgamma, dbeta = lib.gpu.batchnorm_inp_backward(dz, z, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + if z.is_cuda: + lib.gpu.expectation_inp_backward(dx, z, _dex, _dexs, _ex, _exs, gamma, beta, ctx.eps) + else: + raise NotImplemented + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = syncbatchnorm_.apply +inp_syncbatchnorm = inp_syncbatchnorm_.apply diff --git a/core/nn/sync_bn/lib/__init__.py b/core/nn/sync_bn/lib/__init__.py new file mode 100644 index 0000000..98c3374 --- /dev/null +++ b/core/nn/sync_bn/lib/__init__.py @@ -0,0 +1,20 @@ +import os +import torch +from torch.utils.cpp_extension import load + +cwd = os.path.dirname(os.path.realpath(__file__)) +cpu_path = os.path.join(cwd, 'cpu') +gpu_path = os.path.join(cwd, 'gpu') + +cpu = load('sync_cpu', [ + os.path.join(cpu_path, 'operator.cpp'), + os.path.join(cpu_path, 'syncbn_cpu.cpp'), +], build_directory=cpu_path, verbose=False) + +if torch.cuda.is_available(): + gpu = load('sync_gpu', [ + os.path.join(gpu_path, 'operator.cpp'), + os.path.join(gpu_path, 'activation_kernel.cu'), + os.path.join(gpu_path, 'syncbn_kernel.cu'), + ], extra_cuda_cflags=["--expt-extended-lambda"], + build_directory=gpu_path, verbose=False) diff --git a/core/nn/sync_bn/lib/cpu/build.ninja b/core/nn/sync_bn/lib/cpu/build.ninja new file mode 100644 index 0000000..e432f66 --- /dev/null +++ b/core/nn/sync_bn/lib/cpu/build.ninja @@ -0,0 +1,21 @@ +ninja_required_version = 1.3 +cxx = c++ + +cflags = -DTORCH_EXTENSION_NAME=sync_cpu -DTORCH_API_INCLUDE_EXTENSION_H -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/torch/csrc/api/include -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/TH -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/lib/python3.6/site-packages/torch/include/THC -isystem /home/tramac/.pyenv/versions/anaconda3-4.4.0/include/python3.6m -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++11 +ldflags = -shared + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out + depfile = $out.d + deps = gcc + +rule link + command = $cxx $in $ldflags -o $out + +build operator.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/operator.cpp +build syncbn_cpu.o: compile /home/tramac/PycharmProjects/awesome-semantic-segmentation-pytorch/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp + +build sync_cpu.so: link operator.o syncbn_cpu.o + +default sync_cpu.so + diff --git a/core/nn/sync_bn/lib/cpu/operator.cpp b/core/nn/sync_bn/lib/cpu/operator.cpp new file mode 100644 index 0000000..5981ffc --- /dev/null +++ b/core/nn/sync_bn/lib/cpu/operator.cpp @@ -0,0 +1,8 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CPU, "BatchNorm forward (CPU)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CPU, "BatchNorm backward (CPU)"); + m.def("sumsquare_forward", &Sum_Square_Forward_CPU, "SumSqu forward (CPU)"); + m.def("sumsquare_backward", &Sum_Square_Backward_CPU, "SumSqu backward (CPU)"); +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/cpu/operator.h b/core/nn/sync_bn/lib/cpu/operator.h new file mode 100644 index 0000000..215fd53 --- /dev/null +++ b/core/nn/sync_bn/lib/cpu/operator.h @@ -0,0 +1,26 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_); + +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + bool train); + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input_); + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input_, + const at::Tensor gradSum_, + const at::Tensor gradSquare_); \ No newline at end of file diff --git a/core/nn/sync_bn/lib/cpu/operator.o b/core/nn/sync_bn/lib/cpu/operator.o new file mode 100644 index 0000000..e69de29 diff --git a/core/nn/sync_bn/lib/cpu/setup.py b/core/nn/sync_bn/lib/cpu/setup.py new file mode 100644 index 0000000..b0ecd6c --- /dev/null +++ b/core/nn/sync_bn/lib/cpu/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CppExtension + +setup( + name='syncbn_cpu', + ext_modules=[ + CppExtension('syncbn_cpu', [ + 'operator.cpp', + 'syncbn_cpu.cpp', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) diff --git a/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp b/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp new file mode 100644 index 0000000..6b6bb73 --- /dev/null +++ b/core/nn/sync_bn/lib/cpu/syncbn_cpu.cpp @@ -0,0 +1,61 @@ +#include +#include +#include + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +at::Tensor BatchNorm_Forward_CPU( + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta) { + auto output = (input - broadcast_to(mean, input)) / broadcast_to(std, input); + output = output * broadcast_to(gamma, input) + broadcast_to(beta, input); + return output; +} + +// Not implementing CPU backward for now +std::vector BatchNorm_Backward_CPU( + const at::Tensor gradoutput, + const at::Tensor input, + const at::Tensor mean, + const at::Tensor std, + const at::Tensor gamma, + const at::Tensor beta, + bool train) { + /* outputs*/ + at::Tensor gradinput = at::zeros_like(input); + at::Tensor gradgamma = at::zeros_like(gamma); + at::Tensor gradbeta = at::zeros_like(beta); + at::Tensor gradMean = at::zeros_like(mean); + at::Tensor gradStd = at::zeros_like(std); + return {gradinput, gradMean, gradStd, gradgamma, gradbeta}; +} + +std::vector Sum_Square_Forward_CPU( + const at::Tensor input) { + /* outputs */ + at::Tensor sum = torch::zeros({input.size(1)}, input.options()); + at::Tensor square = torch::zeros({input.size(1)}, input.options()); + return {sum, square}; +} + +at::Tensor Sum_Square_Backward_CPU( + const at::Tensor input, + const at::Tensor gradSum, + const at::Tensor gradSquare) { + /* outputs */ + at::Tensor gradInput = at::zeros_like(input); + return gradInput; +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/cpu/syncbn_cpu.o b/core/nn/sync_bn/lib/cpu/syncbn_cpu.o new file mode 100644 index 0000000..e69de29 diff --git a/core/nn/sync_bn/lib/gpu/__init__.py b/core/nn/sync_bn/lib/gpu/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core/nn/sync_bn/lib/gpu/activation_kernel.cu b/core/nn/sync_bn/lib/gpu/activation_kernel.cu new file mode 100644 index 0000000..e696667 --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/activation_kernel.cu @@ -0,0 +1,46 @@ +#include +// #include +#include +#include +#include + +#include + +#include +#include + + +namespace { + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + thrust::transform_if(th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +} + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope) { + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "LeakyRelu_Backward_CUDA", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); + /* + // unstable after scaling + at::leaky_relu_(z, 1.0 / slope); + at::leaky_relu_backward(dz, z, slope); + */ +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/common.h b/core/nn/sync_bn/lib/gpu/common.h new file mode 100644 index 0000000..aa38296 --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/common.h @@ -0,0 +1,224 @@ +#include +#include + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static int getNumThreads(int nElem) { + int threadSizes[5] = { 32, 64, 128, 256, MAX_BLOCK_SIZE }; + for (int i = 0; i != 5; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) +{ +#if CUDA_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +struct Float2 { + Acctype v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) : v1(ScalarConvert::to(v1)), v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) : v1(ScalarConvert::to(v)), v2(ScalarConvert::to(v)) {} + __device__ Float2& operator+=(const Float2& a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +template +__device__ T reduceD( + Op op, int b, int i, int k, int D) { + T sum = 0; + for (int x = threadIdx.x; x < D; x += blockDim.x) { + sum += op(b,i,k,x); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceN( + Op op, int b, int k, int d, int N) { + T sum = 0; + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(b,x,k,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceK( + Op op, int b, int i, int d, int K) { + T sum = 0; + for (int x = threadIdx.x; x < K; x += blockDim.x) { + sum += op(b,i,x,d); + } + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__device__ T reduceBN( + Op op, + int k, int d, int B, int N) { + T sum = 0; + for (int batch = 0; batch < B; ++batch) { + for (int x = threadIdx.x; x < N; x += blockDim.x) { + sum += op(batch,x,k,d); + } + } + // sum over NumThreads within a warp + sum = warpSum(sum); + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + if (threadIdx.x / WARP_SIZE < 32) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/device_tensor.h b/core/nn/sync_bn/lib/gpu/device_tensor.h new file mode 100644 index 0000000..c67dfae --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/device_tensor.h @@ -0,0 +1,110 @@ +#include + +template +struct DeviceTensor { + public: + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i < Dim); + return size_[i]; + } + + inline __device__ __host__ int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + inline __device__ __host__ DeviceTensor select(const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ DeviceTensor operator[](const size_t x) const { + assert(Dim > 1); + int offset = x; + for (int i = 1; i < Dim; ++i) { + offset *= size_[i]; + } + DeviceTensor tensor(dptr_ + offset, nullptr); + for (int i = 0; i < Dim - 1; ++i) { + tensor.size_[i] = this->size_[i+1]; + } + return tensor; + } + + inline __device__ __host__ size_t InnerSize() const { + assert(Dim >= 3); + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + inline __device__ __host__ size_t ChannelCount() const { + assert(Dim >= 3); + return size_[1]; + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +struct DeviceTensor { + inline __device__ __host__ DeviceTensor(DType *p, const int *size) + : dptr_(p) { + size_[0] = size ? size[0] : 0; + } + + inline __device__ __host__ unsigned getSize(const int i) const { + assert(i == 0); + return size_[0]; + } + + inline __device__ __host__ int numElements() const { + return size_[0]; + } + + inline __device__ __host__ DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + inline __device__ __host__ DType* data_ptr() const { + return dptr_; + } + + DType *dptr_; + int size_[1]; +}; + +template +static DeviceTensor devicetensor(const at::Tensor &blob) { + DType *data = blob.data(); + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/operator.cpp b/core/nn/sync_bn/lib/gpu/operator.cpp new file mode 100644 index 0000000..48e28fe --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/operator.cpp @@ -0,0 +1,13 @@ +#include "operator.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("batchnorm_forward", &BatchNorm_Forward_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_inp_forward", &BatchNorm_Forward_Inp_CUDA, "BatchNorm forward (CUDA)"); + m.def("batchnorm_backward", &BatchNorm_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("batchnorm_inp_backward", &BatchNorm_Inp_Backward_CUDA, "BatchNorm backward (CUDA)"); + m.def("expectation_forward", &Expectation_Forward_CUDA, "Expectation forward (CUDA)"); + m.def("expectation_backward", &Expectation_Backward_CUDA, "Expectation backward (CUDA)"); + m.def("expectation_inp_backward", &Expectation_Inp_Backward_CUDA, "Inplace Expectation backward (CUDA)"); + m.def("leaky_relu_forward", &LeakyRelu_Forward_CUDA, "Learky ReLU forward (CUDA)"); + m.def("leaky_relu_backward", &LeakyRelu_Backward_CUDA, "Learky ReLU backward (CUDA)"); +} \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/operator.h b/core/nn/sync_bn/lib/gpu/operator.h new file mode 100644 index 0000000..246570d --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/operator.h @@ -0,0 +1,59 @@ +#include +#include + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor mean_, + const at::Tensor std_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_); + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_); + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps); + +void LeakyRelu_Forward_CUDA(at::Tensor z, float slope); + +void LeakyRelu_Backward_CUDA(at::Tensor z, at::Tensor dz, float slope); \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/setup.py b/core/nn/sync_bn/lib/gpu/setup.py new file mode 100644 index 0000000..14c01f6 --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup( + name='syncbn_gpu', + ext_modules=[ + CUDAExtension('sync_gpu', [ + 'operator.cpp', + 'activation_kernel.cu', + 'syncbn_kernel.cu', + ]), + ], + cmdclass={ + 'build_ext': BuildExtension + }) \ No newline at end of file diff --git a/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu b/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu new file mode 100644 index 0000000..2a7e840 --- /dev/null +++ b/core/nn/sync_bn/lib/gpu/syncbn_kernel.cu @@ -0,0 +1,489 @@ +#include +// #include +#include +#include +#include + +#include "common.h" +#include "device_tensor.h" + +namespace { + +template +struct GradOp { + __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) + : beta(m), output(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = gradOutput[batch][plane][n]; + DType c = ScalarConvert::to(output[batch][plane][n] - beta); + return Float2(g, g * c); + } + const Acctype beta; + const DeviceTensor3 output; + const DeviceTensor3 gradOutput; +}; + +template +struct SumOp { + __device__ SumOp(DeviceTensor i) : input(i){} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + DType g = input[batch][plane][n]; + return Float2(g, g * g); + } + DType mean; + DeviceTensor input; +}; + +// Sum across (batch, x/y/z) applying Op() pointwise +template +__device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { + T sum = (T)0; + for (int batch = 0; batch < tensor.getSize(0); ++batch) { + for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void BatchNorm_Forward_kernel ( + DeviceTensor output, + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + output[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Forward_Inp_kernel ( + DeviceTensor input, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta) { + int c = blockIdx.x; + /* main operation */ + for (int b = 0; b < input.getSize(0); ++b) { + for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { + DType inp = input[b][c][x]; + input[b][c][x] = gamma[c] * (inp - mean[c]) / + std[c] + beta[c]; + } + } +} + +template +__global__ void BatchNorm_Backward_Inp_kernel ( + DeviceTensor gradoutput, + DeviceTensor output, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(beta[c], output, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP; + gradExs[c] = - 0.5 * invstd * invstd * dotP; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP / gamma[c]; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + +template +__global__ void BatchNorm_Backward_kernel ( + DeviceTensor gradoutput, + DeviceTensor input, + DeviceTensor gradinput, + DeviceTensor gradgamma, + DeviceTensor gradbeta, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DeviceTensor gradEx, + DeviceTensor gradExs) { + /* declarations of the variables */ + /* Get the index and channels */ + int c = blockIdx.x; + /* main operation */ + GradOp> g(mean[c], input, gradoutput); + Float2 res = reduce, + GradOp>, + DeviceTensor>(g, gradoutput, c); + DType gradOutputSum = res.v1; + DType dotP = res.v2; + DType invstd = DType(1.0) / std[c]; + DType gradScale = invstd * gamma[c]; + if (threadIdx.x == 0) { + gradEx[c] = - gradOutputSum * gradScale + mean[c] * invstd * invstd * dotP * gradScale; + gradExs[c] = - 0.5 * invstd * invstd * dotP * gradScale; + } + if (gradinput.numElements() > 0) { + for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { + gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; + } + } + } + if (gradgamma.numElements() > 0) { + if (threadIdx.x == 0) { + gradgamma[c] += dotP * invstd; + } + } + if (gradbeta.numElements() > 0) { + if (threadIdx.x == 0) { + gradbeta[c] += gradOutputSum; + } + } +} + + +template +__global__ void Expectation_Forward_kernel ( + DeviceTensor input, + DeviceTensor ex, + DeviceTensor exs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + SumOp g(input); + Float2 res = reduce, + SumOp, DeviceTensor>(g, input, c); + DType xsum = res.v1; + DType xsquare = res.v2; + if (threadIdx.x == 0) { + ex[c] = xsum * norm; + exs[c] = xsquare * norm; + } +} + +template +__global__ void Expectation_Backward_kernel ( + DeviceTensor gradInput, + DeviceTensor input, + DeviceTensor gradEx, + DeviceTensor gradExs, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] = gradEx[c] * norm + 2 * gradExs[c] * + input[batch][c][x] * norm; + } + } +} + +template +__global__ void Expectation_Backward_Inp_kernel ( + DeviceTensor gradInput, + DeviceTensor output, + DeviceTensor gradEx, + DeviceTensor gradExs, + DeviceTensor mean, + DeviceTensor std, + DeviceTensor gamma, + DeviceTensor beta, + DType norm) { + int c = blockIdx.x; + /* main operation */ + for (int batch = 0; batch < gradInput.getSize(0); ++batch) { + for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { + gradInput[batch][c][x] += gradEx[c] * norm + 2 * gradExs[c] * + ((output[batch][c][x] - beta[c]) / gamma[c] * std[c] + mean[c]) * norm; + } + } +} + +} // namespace + +at::Tensor BatchNorm_Forward_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto output_ = at::zeros_like(input_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor output = devicetensor(output_); + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_kernel<<>>( + output, input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return output_; +} + +at::Tensor BatchNorm_Forward_Inp_CUDA( + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + BatchNorm_Forward_Inp_kernel<<>>( + input, ex, std, gamma, beta); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return input_; +} + + +std::vector BatchNorm_Inp_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor output_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(output_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor output = devicetensor(output_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_Inp_kernel + <<>>( + gradoutput, output, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + + +std::vector BatchNorm_Backward_CUDA( + const at::Tensor gradoutput_, + const at::Tensor input_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs*/ + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + auto gradinput_ = at::zeros_like(input_); + auto gradgamma_ = at::zeros_like(gamma_); + auto gradbeta_ = at::zeros_like(beta_); + auto gradEx_ = at::zeros_like(ex_); + auto gradExs_ = at::zeros_like(std_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Inp_Backward_CUDA", ([&] { + /* Device tensors */ + DeviceTensor gradoutput = devicetensor(gradoutput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradinput = devicetensor(gradinput_); + DeviceTensor gradgamma = devicetensor(gradgamma_); + DeviceTensor gradbeta = devicetensor(gradbeta_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs = devicetensor(gradExs_); + /* kernel function */ + BatchNorm_Backward_kernel + <<>>( + gradoutput, input, gradinput, gradgamma, gradbeta, ex, std, + gamma, beta, gradEx, gradExs); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {gradinput_, gradEx_, gradExs_, gradgamma_, gradbeta_}; +} + +std::vector Expectation_Forward_CUDA( + const at::Tensor input_) { + /* outputs */ + auto ex_ = torch::zeros({input_.size(1)}, input_.options()); + auto exs_ = torch::zeros({input_.size(1)}, input_.options()); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor input = devicetensor(input_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor exs = devicetensor(exs_); + /* kernel function */ + Expectation_Forward_kernel + <<>>(input, ex, exs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return {ex_, exs_}; +} + +at::Tensor Expectation_Backward_CUDA( + const at::Tensor input_, + const at::Tensor gradEx_, + const at::Tensor gradExs_) { + /* outputs */ + at::Tensor gradInput_ = at::zeros_like(input_); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(input_.size(1)); + dim3 threads(getNumThreads(input_.size(2))); + AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (input_.size(0) * input_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(input_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + /* kernel function */ + Expectation_Backward_kernel + <<>>(gradInput, input, gradEx, gradExs, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} + +at::Tensor Expectation_Inp_Backward_CUDA( + const at::Tensor gradInput_, + const at::Tensor output_, + const at::Tensor gradEx_, + const at::Tensor gradExs_, + const at::Tensor ex_, + const at::Tensor exs_, + const at::Tensor gamma_, + const at::Tensor beta_, + float eps) { + /* outputs */ + //auto gradInput_ = at::zeros_like(output_); + auto std_ = (exs_ - ex_ * ex_ + eps).sqrt(); + /* cuda utils*/ + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + dim3 blocks(output_.size(1)); + dim3 threads(getNumThreads(output_.size(2))); + AT_DISPATCH_FLOATING_TYPES(output_.type(), "SumSquare_Backward_CUDA", ([&] { + scalar_t norm = scalar_t(1) / (output_.size(0) * output_.size(2)); + /* Device tensors */ + DeviceTensor gradInput = devicetensor(gradInput_); + DeviceTensor input = devicetensor(output_); + DeviceTensor gradEx = devicetensor(gradEx_); + DeviceTensor gradExs =devicetensor(gradExs_); + DeviceTensor ex = devicetensor(ex_); + DeviceTensor std = devicetensor(std_); + DeviceTensor gamma = devicetensor(gamma_); + DeviceTensor beta = devicetensor(beta_); + /* kernel function */ + Expectation_Backward_Inp_kernel + <<>>(gradInput, input, gradEx, gradExs, + ex, std, gamma, beta, norm); + })); + AT_ASSERT(cudaGetLastError() == cudaSuccess); + return gradInput_; +} \ No newline at end of file diff --git a/core/nn/sync_bn/syncbn.py b/core/nn/sync_bn/syncbn.py new file mode 100644 index 0000000..f1247af --- /dev/null +++ b/core/nn/sync_bn/syncbn.py @@ -0,0 +1,124 @@ +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +## Created by: Hang Zhang +## ECE Department, Rutgers University +## Email: zhang.hang@rutgers.edu +## Copyright (c) 2017 +## +## This source code is licensed under the MIT-style license found in the +## LICENSE file in the root directory of this source tree +##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch + +from torch.nn.modules.batchnorm import _BatchNorm +from queue import Queue +from .functions import * + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01, inplace=True): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.inplace = False if activation == 'none' else inplace + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + if self.inplace: + return inp_syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + else: + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}, inplace={}'.format( + self.sync, self.activation, self.slope, self.inplace) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/core/nn/syncbn.py b/core/nn/syncbn.py new file mode 100644 index 0000000..c52ec1a --- /dev/null +++ b/core/nn/syncbn.py @@ -0,0 +1,223 @@ +# Adopt from https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/syncbn.py +"""Synchronized Cross-GPU Batch Normalization Module""" +import warnings +import torch +import torch.cuda.comm as comm + +from queue import Queue +from torch.autograd import Function +from torch.nn.modules.batchnorm import _BatchNorm +from torch.autograd.function import once_differentiable +from core.nn import _C + +__all__ = ['SyncBatchNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d'] + + +class _SyncBatchNorm(Function): + @classmethod + def forward(cls, ctx, x, gamma, beta, running_mean, running_var, + extra, sync=True, training=True, momentum=0.1, eps=1e-05, + activation="none", slope=0.01): + # save context + cls._parse_extra(ctx, extra) + ctx.sync = sync + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + assert activation == 'none' + + # continous inputs + x = x.contiguous() + gamma = gamma.contiguous() + beta = beta.contiguous() + + if ctx.training: + _ex, _exs = _C.expectation_forward(x) + + if ctx.sync: + if ctx.is_master: + _ex, _exs = [_ex.unsqueeze(0)], [_exs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _ex_w, _exs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _ex.append(_ex_w.unsqueeze(0)) + _exs.append(_exs_w.unsqueeze(0)) + + _ex = comm.gather(_ex).mean(0) + _exs = comm.gather(_exs).mean(0) + + tensors = comm.broadcast_coalesced((_ex, _exs), [_ex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_ex, _exs)) + _ex, _exs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + # Update running stats + _var = _exs - _ex ** 2 + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * _ex) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * _var) + + # Mark in-place modified tensors + ctx.mark_dirty(running_mean, running_var) + else: + _ex, _var = running_mean.contiguous(), running_var.contiguous() + _exs = _var + _ex ** 2 + + # BN forward + y = _C.batchnorm_forward(x, _ex, _exs, gamma, beta, ctx.eps) + + # Output + ctx.save_for_backward(x, _ex, _exs, gamma, beta) + return y + + @staticmethod + @once_differentiable + def backward(ctx, dz): + x, _ex, _exs, gamma, beta = ctx.saved_tensors + dz = dz.contiguous() + + # BN backward + dx, _dex, _dexs, dgamma, dbeta = _C.batchnorm_backward(dz, x, _ex, _exs, gamma, beta, ctx.eps) + + if ctx.training: + if ctx.sync: + if ctx.is_master: + _dex, _dexs = [_dex.unsqueeze(0)], [_dexs.unsqueeze(0)] + for _ in range(ctx.master_queue.maxsize): + _dex_w, _dexs_w = ctx.master_queue.get() + ctx.master_queue.task_done() + _dex.append(_dex_w.unsqueeze(0)) + _dexs.append(_dexs_w.unsqueeze(0)) + + _dex = comm.gather(_dex).mean(0) + _dexs = comm.gather(_dexs).mean(0) + + tensors = comm.broadcast_coalesced((_dex, _dexs), [_dex.get_device()] + ctx.worker_ids) + for ts, queue in zip(tensors[1:], ctx.worker_queues): + queue.put(ts) + else: + ctx.master_queue.put((_dex, _dexs)) + _dex, _dexs = ctx.worker_queue.get() + ctx.worker_queue.task_done() + + dx_ = _C.expectation_backward(x, _dex, _dexs) + dx = dx + dx_ + + return dx, dgamma, dbeta, None, None, None, None, None, None, None, None, None + + @staticmethod + def _parse_extra(ctx, extra): + ctx.is_master = extra["is_master"] + if ctx.is_master: + ctx.master_queue = extra["master_queue"] + ctx.worker_queues = extra["worker_queues"] + ctx.worker_ids = extra["worker_ids"] + else: + ctx.master_queue = extra["master_queue"] + ctx.worker_queue = extra["worker_queue"] + + +syncbatchnorm = _SyncBatchNorm.apply + + +class SyncBatchNorm(_BatchNorm): + """Cross-GPU Synchronized Batch normalization (SyncBN) + + Parameters: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + sync: a boolean value that when set to ``True``, synchronize across + different gpus. Default: ``True`` + activation : str + Name of the activation functions, one of: `leaky_relu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + Reference: + .. [1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." *ICML 2015* + .. [2] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit Agrawal. "Context Encoding for Semantic Segmentation." *CVPR 2018* + Examples: + >>> m = SyncBatchNorm(100) + >>> net = torch.nn.DataParallel(m) + >>> output = net(input) + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, sync=True, activation='none', slope=0.01): + super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=True) + self.activation = activation + self.slope = slope + self.devices = list(range(torch.cuda.device_count())) + self.sync = sync if len(self.devices) > 1 else False + # Initialize queues + self.worker_ids = self.devices[1:] + self.master_queue = Queue(len(self.worker_ids)) + self.worker_queues = [Queue(1) for _ in self.worker_ids] + + def forward(self, x): + # resize the input to (B, C, -1) + input_shape = x.size() + x = x.view(input_shape[0], self.num_features, -1) + if x.get_device() == self.devices[0]: + # Master mode + extra = { + "is_master": True, + "master_queue": self.master_queue, + "worker_queues": self.worker_queues, + "worker_ids": self.worker_ids + } + else: + # Worker mode + extra = { + "is_master": False, + "master_queue": self.master_queue, + "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] + } + + return syncbatchnorm(x, self.weight, self.bias, self.running_mean, self.running_var, + extra, self.sync, self.training, self.momentum, self.eps, + self.activation, self.slope).view(input_shape) + + def extra_repr(self): + if self.activation == 'none': + return 'sync={}'.format(self.sync) + else: + return 'sync={}, act={}, slope={}'.format( + self.sync, self.activation, self.slope) + + +class BatchNorm1d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm1d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm1d, self).__init__(*args, **kwargs) + + +class BatchNorm2d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm2d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm2d, self).__init__(*args, **kwargs) + + +class BatchNorm3d(SyncBatchNorm): + """BatchNorm1d is deprecated in favor of :class:`core.nn.sync_bn.SyncBatchNorm`.""" + + def __init__(self, *args, **kwargs): + warnings.warn("core.nn.sync_bn.{} is now deprecated in favor of core.nn.sync_bn.{}." + .format('BatchNorm3d', SyncBatchNorm.__name__), DeprecationWarning) + super(BatchNorm3d, self).__init__(*args, **kwargs) diff --git a/core/utils/__init__.py b/core/utils/__init__.py new file mode 100644 index 0000000..067a8d0 --- /dev/null +++ b/core/utils/__init__.py @@ -0,0 +1,5 @@ +"""Utility functions.""" +from __future__ import absolute_import + +from .download import download, check_sha1 +from .filesystem import makedirs, try_import_pycocotools diff --git a/core/utils/__pycache__/__init__.cpython-37.pyc b/core/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..39d1b79 Binary files /dev/null and b/core/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/core/utils/__pycache__/__init__.cpython-38.pyc b/core/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..959545a Binary files /dev/null and b/core/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/core/utils/__pycache__/download.cpython-37.pyc b/core/utils/__pycache__/download.cpython-37.pyc new file mode 100644 index 0000000..4460163 Binary files /dev/null and b/core/utils/__pycache__/download.cpython-37.pyc differ diff --git a/core/utils/__pycache__/download.cpython-38.pyc b/core/utils/__pycache__/download.cpython-38.pyc new file mode 100644 index 0000000..c5f4bd0 Binary files /dev/null and b/core/utils/__pycache__/download.cpython-38.pyc differ diff --git a/core/utils/__pycache__/filesystem.cpython-37.pyc b/core/utils/__pycache__/filesystem.cpython-37.pyc new file mode 100644 index 0000000..ed16308 Binary files /dev/null and b/core/utils/__pycache__/filesystem.cpython-37.pyc differ diff --git a/core/utils/__pycache__/filesystem.cpython-38.pyc b/core/utils/__pycache__/filesystem.cpython-38.pyc new file mode 100644 index 0000000..a2fa6aa Binary files /dev/null and b/core/utils/__pycache__/filesystem.cpython-38.pyc differ diff --git a/core/utils/distributed.py b/core/utils/distributed.py new file mode 100644 index 0000000..257cdf9 --- /dev/null +++ b/core/utils/distributed.py @@ -0,0 +1,258 @@ +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. +""" +import math +import pickle +import torch +import torch.utils.data as data +import torch.distributed as dist + +from torch.utils.data.sampler import Sampler, BatchSampler + +__all__ = ['get_world_size', 'get_rank', 'synchronize', 'is_main_process', + 'all_gather', 'make_data_sampler', 'make_batch_data_sampler', + 'reduce_dict', 'reduce_loss_dict'] + + +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/comm.py +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.IntTensor([tensor.numel()]).to("cuda") + size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) + if local_size != max_size: + padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +def reduce_loss_dict(loss_dict): + """ + Reduce the loss dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + loss_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return loss_dict + with torch.no_grad(): + loss_names = [] + all_losses = [] + for k in sorted(loss_dict.keys()): + loss_names.append(k) + all_losses.append(loss_dict[k]) + all_losses = torch.stack(all_losses, dim=0) + dist.reduce(all_losses, dst=0) + if dist.get_rank() == 0: + # only main process gets accumulated, so only divide by + # world_size in this case + all_losses /= world_size + reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} + return reduced_losses + + +def make_data_sampler(dataset, shuffle, distributed): + if distributed: + return DistributedSampler(dataset, shuffle=shuffle) + if shuffle: + sampler = data.sampler.RandomSampler(dataset) + else: + sampler = data.sampler.SequentialSampler(dataset) + return sampler + + +def make_batch_data_sampler(sampler, images_per_batch, num_iters=None, start_iter=0): + batch_sampler = data.sampler.BatchSampler(sampler, images_per_batch, drop_last=True) + if num_iters is not None: + batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter) + return batch_sampler + + +# Code is copy-pasted from https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/data/samplers/distributed.py +class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + .. note:: + Dataset is assumed to be of constant size. + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset: offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class IterationBasedBatchSampler(BatchSampler): + """ + Wraps a BatchSampler, resampling from it until + a specified number of iterations have been sampled + """ + + def __init__(self, batch_sampler, num_iterations, start_iter=0): + self.batch_sampler = batch_sampler + self.num_iterations = num_iterations + self.start_iter = start_iter + + def __iter__(self): + iteration = self.start_iter + while iteration <= self.num_iterations: + # if the underlying sampler has a set_epoch method, like + # DistributedSampler, used for making each process see + # a different split of the dataset, then set it + if hasattr(self.batch_sampler.sampler, "set_epoch"): + self.batch_sampler.sampler.set_epoch(iteration) + for batch in self.batch_sampler: + iteration += 1 + if iteration > self.num_iterations: + break + yield batch + + def __len__(self): + return self.num_iterations + + +if __name__ == '__main__': + pass diff --git a/core/utils/download.py b/core/utils/download.py new file mode 100644 index 0000000..df4d49f --- /dev/null +++ b/core/utils/download.py @@ -0,0 +1,90 @@ +"""Download files with progress bar.""" +import os +import hashlib +import requests +from tqdm import tqdm + + +def check_sha1(filename, sha1_hash): + """Check whether the sha1 hash of the file content matches the expected hash. + Parameters + ---------- + filename : str + Path to the file. + sha1_hash : str + Expected sha1 hash in hexadecimal digits. + Returns + ------- + bool + Whether the file content matches the expected hash. + """ + sha1 = hashlib.sha1() + with open(filename, 'rb') as f: + while True: + data = f.read(1048576) + if not data: + break + sha1.update(data) + + sha1_file = sha1.hexdigest() + l = min(len(sha1_file), len(sha1_hash)) + return sha1.hexdigest()[0:l] == sha1_hash[0:l] + + +def download(url, path=None, overwrite=False, sha1_hash=None): + """Download an given URL + Parameters + ---------- + url : str + URL to download + path : str, optional + Destination path to store downloaded file. By default stores to the + current directory with same name as in url. + overwrite : bool, optional + Whether to overwrite destination file if already exists. + sha1_hash : str, optional + Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified + but doesn't match. + Returns + ------- + str + The file path of the downloaded file. + """ + if path is None: + fname = url.split('/')[-1] + else: + path = os.path.expanduser(path) + if os.path.isdir(path): + fname = os.path.join(path, url.split('/')[-1]) + else: + fname = path + + if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)): + dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname))) + if not os.path.exists(dirname): + os.makedirs(dirname) + + print('Downloading %s from %s...'%(fname, url)) + r = requests.get(url, stream=True) + if r.status_code != 200: + raise RuntimeError("Failed downloading url %s"%url) + total_length = r.headers.get('content-length') + with open(fname, 'wb') as f: + if total_length is None: # no content length header + for chunk in r.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + else: + total_length = int(total_length) + for chunk in tqdm(r.iter_content(chunk_size=1024), + total=int(total_length / 1024. + 0.5), + unit='KB', unit_scale=False, dynamic_ncols=True): + f.write(chunk) + + if sha1_hash and not check_sha1(fname, sha1_hash): + raise UserWarning('File {} is downloaded but the content hash does not match. ' \ + 'The repo may be outdated or download may be incomplete. ' \ + 'If the "repo_url" is overridden, consider switching to ' \ + 'the default repo.'.format(fname)) + + return fname \ No newline at end of file diff --git a/core/utils/filesystem.py b/core/utils/filesystem.py new file mode 100644 index 0000000..ab2510d --- /dev/null +++ b/core/utils/filesystem.py @@ -0,0 +1,123 @@ +"""Filesystem utility functions.""" +from __future__ import absolute_import +import os +import errno + + +def makedirs(path): + """Create directory recursively if not exists. + Similar to `makedir -p`, you can skip checking existence before this function. + Parameters + ---------- + path : str + Path of the desired dir + """ + try: + os.makedirs(path) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + +def try_import(package, message=None): + """Try import specified package, with custom message support. + Parameters + ---------- + package : str + The name of the targeting package. + message : str, default is None + If not None, this function will raise customized error message when import error is found. + Returns + ------- + module if found, raise ImportError otherwise + """ + try: + return __import__(package) + except ImportError as e: + if not message: + raise e + raise ImportError(message) + + +def try_import_cv2(): + """Try import cv2 at runtime. + Returns + ------- + cv2 module if found. Raise ImportError otherwise + """ + msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \ + or `pip install opencv-python --user` (note that this is unofficial PYPI package)." + return try_import('cv2', msg) + + +def import_try_install(package, extern_url=None): + """Try import the specified package. + If the package not installed, try use pip to install and import if success. + Parameters + ---------- + package : str + The name of the package trying to import. + extern_url : str or None, optional + The external url if package is not hosted on PyPI. + For example, you can install a package using: + "pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx". + In this case, you can pass the url to the extern_url. + Returns + ------- + + The imported python module. + """ + try: + return __import__(package) + except ImportError: + try: + from pip import main as pipmain + except ImportError: + from pip._internal import main as pipmain + + # trying to install package + url = package if extern_url is None else extern_url + pipmain(['install', '--user', url]) # will raise SystemExit Error if fails + + # trying to load again + try: + return __import__(package) + except ImportError: + import sys + import site + user_site = site.getusersitepackages() + if user_site not in sys.path: + sys.path.append(user_site) + return __import__(package) + return __import__(package) + + +"""Import helper for pycocotools""" + + +# NOTE: for developers +# please do not import any pycocotools in __init__ because we are trying to lazy +# import pycocotools to avoid install it for other users who may not use it. +# only import when you actually use it + + +def try_import_pycocotools(): + """Tricks to optionally install and import pycocotools""" + # first we can try import pycocotools + try: + import pycocotools as _ + except ImportError: + import os + # we need to install pycootools, which is a bit tricky + # pycocotools sdist requires Cython, numpy(already met) + import_try_install('cython') + # pypi pycocotools is not compatible with windows + win_url = 'git+https://github.com/zhreshold/cocoapi.git#subdirectory=PythonAPI' + try: + if os.name == 'nt': + import_try_install('pycocotools', win_url) + else: + import_try_install('pycocotools') + except ImportError: + faq = 'cocoapi FAQ' + raise ImportError('Cannot import or install pycocotools, please refer to %s.' % faq) diff --git a/core/utils/logger.py b/core/utils/logger.py new file mode 100644 index 0000000..a2de227 --- /dev/null +++ b/core/utils/logger.py @@ -0,0 +1,30 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +import logging +import os +import sys + +__all__ = ['setup_logger'] + + +# reference from: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/utils/logger.py +def setup_logger(name, save_dir, distributed_rank, filename="log.txt", mode='w'): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + if not os.path.exists(save_dir): + os.makedirs(save_dir) + fh = logging.FileHandler(os.path.join(save_dir, filename), mode=mode) # 'a+' for add, 'w' for overwrite + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger diff --git a/core/utils/loss.py b/core/utils/loss.py new file mode 100644 index 0000000..aab5314 --- /dev/null +++ b/core/utils/loss.py @@ -0,0 +1,196 @@ +"""Custom losses.""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from torch.autograd import Variable + +__all__ = ['MixSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyOHEMLoss', + 'EncNetLoss', 'ICNetLoss', 'get_segmentation_loss'] + + +# TODO: optim function +class MixSoftmaxCrossEntropyLoss(nn.CrossEntropyLoss): + def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs, **kwargs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs)) + + +# reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/nn/loss.py +class EncNetLoss(nn.CrossEntropyLoss): + """2D Cross Entropy Loss with SE Loss""" + + def __init__(self, se_loss=True, se_weight=0.2, nclass=19, aux=False, + aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(EncNetLoss, self).__init__(weight, None, ignore_index) + self.se_loss = se_loss + self.aux = aux + self.nclass = nclass + self.se_weight = se_weight + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if not self.se_loss and not self.aux: + return super(EncNetLoss, self).forward(*inputs) + elif not self.se_loss: + pred1, pred2, target = tuple(inputs) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + return dict(loss=loss1 + self.aux_weight * loss2) + elif not self.aux: + print (inputs) + pred, se_pred, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred) + loss1 = super(EncNetLoss, self).forward(pred, target) + loss2 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.se_weight * loss2) + else: + pred1, se_pred, pred2, target = tuple(inputs) + se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1) + loss1 = super(EncNetLoss, self).forward(pred1, target) + loss2 = super(EncNetLoss, self).forward(pred2, target) + loss3 = self.bceloss(torch.sigmoid(se_pred), se_target) + return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3) + + @staticmethod + def _get_batch_label_vector(target, nclass): + # target is a 3D Variable BxHxW, output is 2D BxnClass + batch = target.size(0) + tvect = Variable(torch.zeros(batch, nclass)) + for i in range(batch): + hist = torch.histc(target[i].cpu().data.float(), + bins=nclass, min=0, + max=nclass - 1) + vect = hist > 0 + tvect[i] = vect + return tvect + + +# TODO: optim function +class ICNetLoss(nn.CrossEntropyLoss): + """Cross Entropy Loss for ICNet""" + + def __init__(self, nclass, aux_weight=0.4, ignore_index=-1, **kwargs): + super(ICNetLoss, self).__init__(ignore_index=ignore_index) + self.nclass = nclass + self.aux_weight = aux_weight + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + + pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs) + # [batch, W, H] -> [batch, 1, W, H] + target = target.unsqueeze(1).float() + target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long() + target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze( + 1).long() + loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4) + loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8) + loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16) + return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight) + + +class OhemCrossEntropy2d(nn.Module): + def __init__(self, ignore_index=-1, thresh=0.7, min_kept=100000, use_weight=True, **kwargs): + super(OhemCrossEntropy2d, self).__init__() + self.ignore_index = ignore_index + self.thresh = float(thresh) + self.min_kept = int(min_kept) + if use_weight: + weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, + 1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955, + 1.0865, 1.1529, 1.0507]) + self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index) + else: + self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index) + + def forward(self, pred, target): + n, c, h, w = pred.size() + target = target.view(-1) + valid_mask = target.ne(self.ignore_index) + target = target * valid_mask.long() + num_valid = valid_mask.sum() + + prob = F.softmax(pred, dim=1) + prob = prob.transpose(0, 1).reshape(c, -1) + + if self.min_kept > num_valid: + print("Lables: {}".format(num_valid)) + elif num_valid > 0: + prob = prob.masked_fill_(1 - valid_mask, 1) + mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)] + threshold = self.thresh + if self.min_kept > 0: + index = mask_prob.argsort() + threshold_index = index[min(len(index), self.min_kept) - 1] + if mask_prob[threshold_index] > self.thresh: + threshold = mask_prob[threshold_index] + kept_mask = mask_prob.le(threshold) + valid_mask = valid_mask * kept_mask + target = target * kept_mask.long() + + target = target.masked_fill_(1 - valid_mask, self.ignore_index) + target = target.view(n, h, w) + + return self.criterion(pred, target) + + +class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d): + def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs): + super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index) + self.aux = aux + self.aux_weight = aux_weight + self.bceloss = nn.BCELoss(weight) + + def _aux_forward(self, *inputs, **kwargs): + *preds, target = tuple(inputs) + + loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target) + for i in range(1, len(preds)): + aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target) + loss += self.aux_weight * aux_loss + return loss + + def forward(self, *inputs): + preds, target = tuple(inputs) + inputs = tuple(list(preds) + [target]) + if self.aux: + return dict(loss=self._aux_forward(*inputs)) + else: + return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(*inputs)) + + +def get_segmentation_loss(model, use_ohem=False, **kwargs): + if use_ohem: + return MixSoftmaxCrossEntropyOHEMLoss(**kwargs) + + model = model.lower() + if model == 'encnet': + return EncNetLoss(**kwargs) + elif model == 'icnet': + return ICNetLoss(nclass=4, **kwargs) + else: + return MixSoftmaxCrossEntropyLoss(**kwargs) diff --git a/core/utils/lr_scheduler.py b/core/utils/lr_scheduler.py new file mode 100644 index 0000000..32b3795 --- /dev/null +++ b/core/utils/lr_scheduler.py @@ -0,0 +1,179 @@ +"""Popular Learning Rate Schedulers""" +from __future__ import division +import math +import torch + +from bisect import bisect_right + +__all__ = ['LRScheduler', 'WarmupMultiStepLR', 'WarmupPolyLR'] + + +class LRScheduler(object): + r"""Learning Rate Scheduler + + Parameters + ---------- + mode : str + Modes for learning rate scheduler. + Currently it supports 'constant', 'step', 'linear', 'poly' and 'cosine'. + base_lr : float + Base learning rate, i.e. the starting learning rate. + target_lr : float + Target learning rate, i.e. the ending learning rate. + With constant mode target_lr is ignored. + niters : int + Number of iterations to be scheduled. + nepochs : int + Number of epochs to be scheduled. + iters_per_epoch : int + Number of iterations in each epoch. + offset : int + Number of iterations before this scheduler. + power : float + Power parameter of poly scheduler. + step_iter : list + A list of iterations to decay the learning rate. + step_epoch : list + A list of epochs to decay the learning rate. + step_factor : float + Learning rate decay factor. + """ + + def __init__(self, mode, base_lr=0.01, target_lr=0, niters=0, nepochs=0, iters_per_epoch=0, + offset=0, power=0.9, step_iter=None, step_epoch=None, step_factor=0.1, warmup_epochs=0): + super(LRScheduler, self).__init__() + assert (mode in ['constant', 'step', 'linear', 'poly', 'cosine']) + + if mode == 'step': + assert (step_iter is not None or step_epoch is not None) + self.niters = niters + self.step = step_iter + epoch_iters = nepochs * iters_per_epoch + if epoch_iters > 0: + self.niters = epoch_iters + if step_epoch is not None: + self.step = [s * iters_per_epoch for s in step_epoch] + + self.step_factor = step_factor + self.base_lr = base_lr + self.target_lr = base_lr if mode == 'constant' else target_lr + self.offset = offset + self.power = power + self.warmup_iters = warmup_epochs * iters_per_epoch + self.mode = mode + + def __call__(self, optimizer, num_update): + self.update(num_update) + assert self.learning_rate >= 0 + self._adjust_learning_rate(optimizer, self.learning_rate) + + def update(self, num_update): + N = self.niters - 1 + T = num_update - self.offset + T = min(max(0, T), N) + + if self.mode == 'constant': + factor = 0 + elif self.mode == 'linear': + factor = 1 - T / N + elif self.mode == 'poly': + factor = pow(1 - T / N, self.power) + elif self.mode == 'cosine': + factor = (1 + math.cos(math.pi * T / N)) / 2 + elif self.mode == 'step': + if self.step is not None: + count = sum([1 for s in self.step if s <= T]) + factor = pow(self.step_factor, count) + else: + factor = 1 + else: + raise NotImplementedError + + # warm up lr schedule + if self.warmup_iters > 0 and T < self.warmup_iters: + factor = factor * 1.0 * T / self.warmup_iters + + if self.mode == 'step': + self.learning_rate = self.base_lr * factor + else: + self.learning_rate = self.target_lr + (self.base_lr - self.target_lr) * factor + + def _adjust_learning_rate(self, optimizer, lr): + optimizer.param_groups[0]['lr'] = lr + # enlarge the lr at the head + for i in range(1, len(optimizer.param_groups)): + optimizer.param_groups[i]['lr'] = lr * 10 + + +# separating MultiStepLR with WarmupLR +# but the current LRScheduler design doesn't allow it +# reference: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/solver/lr_scheduler.py +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method="linear", last_epoch=-1): + super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) + if not list(milestones) == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", milestones) + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted got {}".format(warmup_method)) + + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_factor == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + return [base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs] + + +class WarmupPolyLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, target_lr=0, max_iters=0, power=0.9, warmup_factor=1.0 / 3, + warmup_iters=500, warmup_method='linear', last_epoch=-1): + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted " + "got {}".format(warmup_method)) + + self.target_lr = target_lr + self.max_iters = max_iters + self.power = power + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + + super(WarmupPolyLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + N = self.max_iters - self.warmup_iters + T = self.last_epoch - self.warmup_iters + if self.last_epoch < self.warmup_iters: + if self.warmup_method == 'constant': + warmup_factor = self.warmup_factor + elif self.warmup_method == 'linear': + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + else: + raise ValueError("Unknown warmup type.") + return [self.target_lr + (base_lr - self.target_lr) * warmup_factor for base_lr in self.base_lrs] + factor = pow(1 - T / N, self.power) + return [self.target_lr + (base_lr - self.target_lr) * factor for base_lr in self.base_lrs] + + +if __name__ == '__main__': + import torch + import torch.nn as nn + + model = nn.Conv2d(16, 16, 3, 1, 1) + optimizer = torch.optim.Adam(model.parameters(), lr=0.01) + lr_scheduler = WarmupPolyLR(optimizer, niters=1000) diff --git a/core/utils/parallel.py b/core/utils/parallel.py new file mode 100644 index 0000000..cb9e896 --- /dev/null +++ b/core/utils/parallel.py @@ -0,0 +1,162 @@ +"""Utils for Semantic Segmentation""" +import threading +import torch +import torch.cuda.comm as comm +from torch.nn.parallel.data_parallel import DataParallel +from torch.nn.parallel._functions import Broadcast +from torch.autograd import Function + +__all__ = ['DataParallelModel', 'DataParallelCriterion'] + + +class Reduce(Function): + @staticmethod + def forward(ctx, *inputs): + ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))] + inputs = sorted(inputs, key=lambda i: i.get_device()) + return comm.reduce_add(inputs) + + @staticmethod + def backward(ctx, gradOutputs): + return Broadcast.apply(ctx.target_gpus, gradOutputs) + + +class DataParallelModel(DataParallel): + """Data parallelism + + Hide the difference of single/multiple GPUs to the user. + In the forward pass, the module is replicated on each device, + and each replica handles a portion of the input. During the backwards + pass, gradients from each replica are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + Parameters + ---------- + module : object + Network to be parallelized. + sync : bool + enable synchronization (default: False). + Inputs: + - **inputs**: list of input + Outputs: + - **outputs**: list of output + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + def gather(self, outputs, output_device): + return outputs + + def replicate(self, module, device_ids): + modules = super(DataParallelModel, self).replicate(module, device_ids) + return modules + + +# Reference: https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/encoding/parallel.py +class DataParallelCriterion(DataParallel): + """ + Calculate loss in multiple-GPUs, which balance the memory usage for + Semantic Segmentation. + + The targets are splitted across the specified devices by chunking in + the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`. + + Example:: + >>> net = DataParallelModel(model, device_ids=[0, 1, 2]) + >>> criterion = DataParallelCriterion(criterion, device_ids=[0, 1, 2]) + >>> y = net(x) + >>> loss = criterion(y, target) + """ + + def forward(self, inputs, *targets, **kwargs): + # the inputs should be the outputs of DataParallelModel + if not self.device_ids: + return self.module(inputs, *targets, **kwargs) + targets, kwargs = self.scatter(targets, kwargs, self.device_ids) + if len(self.device_ids) == 1: + return self.module(inputs, *targets[0], **kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = criterion_parallel_apply(replicas, inputs, targets, kwargs) + return Reduce.apply(*outputs) / len(outputs) + + +def get_a_var(obj): + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, list) or isinstance(obj, tuple): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + + +def criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): + r"""Applies each `module` in :attr:`modules` in parallel on arguments + contained in :attr:`inputs` (positional), attr:'targets' (positional) and :attr:`kwargs_tup` (keyword) + on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + targets (tensor): targets to the modules + devices (list of int or torch.device): CUDA devices + :attr:`modules`, :attr:`inputs`, :attr:'targets' :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs) + assert len(targets) == len(inputs) + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = ({},) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + lock = threading.Lock() + results = {} + grad_enabled = torch.is_grad_enabled() + + def _worker(i, module, input, target, kwargs, device=None): + torch.set_grad_enabled(grad_enabled) + if device is None: + device = get_a_var(input).get_device() + try: + with torch.cuda.device(device): + output = module(*(list(input) + target), **kwargs) + with lock: + results[i] = output + except Exception as e: + with lock: + results[i] = e + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, target, kwargs, device)) + for i, (module, input, target, kwargs, device) in + enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, Exception): + raise output + outputs.append(output) + return outputs diff --git a/core/utils/score.py b/core/utils/score.py new file mode 100644 index 0000000..a037e65 --- /dev/null +++ b/core/utils/score.py @@ -0,0 +1,161 @@ +"""Evaluation Metrics for Semantic Segmentation""" +import torch +import numpy as np + +__all__ = ['SegmentationMetric', 'batch_pix_accuracy', 'batch_intersection_union', + 'pixelAccuracy', 'intersectionAndUnion', 'hist_info', 'compute_score'] + + +class SegmentationMetric(object): + """Computes pixAcc and mIoU metric scores + """ + + def __init__(self, nclass): + super(SegmentationMetric, self).__init__() + self.nclass = nclass + self.reset() + + def update(self, preds, labels): + """Updates the internal evaluation result. + + Parameters + ---------- + labels : 'NumpyArray' or list of `NumpyArray` + The labels of the data. + preds : 'NumpyArray' or list of `NumpyArray` + Predicted values. + """ + + def evaluate_worker(self, pred, label): + correct, labeled = batch_pix_accuracy(pred, label) + inter, union = batch_intersection_union(pred, label, self.nclass) + + self.total_correct += correct + self.total_label += labeled + if self.total_inter.device != inter.device: + self.total_inter = self.total_inter.to(inter.device) + self.total_union = self.total_union.to(union.device) + self.total_inter += inter + self.total_union += union + + if isinstance(preds, torch.Tensor): + evaluate_worker(self, preds, labels) + elif isinstance(preds, (list, tuple)): + for (pred, label) in zip(preds, labels): + evaluate_worker(self, pred, label) + + def get(self): + """Gets the current evaluation result. + + Returns + ------- + metrics : tuple of float + pixAcc and mIoU + """ + pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label) # remove np.spacing(1) + IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union) + mIoU = IoU.mean().item() + return pixAcc, mIoU + + def reset(self): + """Resets the internal evaluation result to initial state.""" + self.total_inter = torch.zeros(self.nclass) + self.total_union = torch.zeros(self.nclass) + self.total_correct = 0 + self.total_label = 0 + + +# pytorch version +def batch_pix_accuracy(output, target): + """PixAcc""" + # inputs are numpy array, output 4D, target 3D + predict = torch.argmax(output.long(), 1) + 1 + target = target.long() + 1 + + pixel_labeled = torch.sum(target > 0).item() + pixel_correct = torch.sum((predict == target) * (target > 0)).item() + assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled" + return pixel_correct, pixel_labeled + + +def batch_intersection_union(output, target, nclass): + """mIoU""" + # inputs are numpy array, output 4D, target 3D + mini = 1 + maxi = nclass + nbins = nclass + predict = torch.argmax(output, 1) + 1 + target = target.float() + 1 + + predict = predict.float() * (target > 0).float() + intersection = predict * (predict == target).float() + # areas of intersection and union + # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. + area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi) + area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi) + area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi) + area_union = area_pred + area_lab - area_inter + assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area" + return area_inter.float(), area_union.float() + + +def pixelAccuracy(imPred, imLab): + """ + This function takes the prediction and label of a single image, returns pixel-wise accuracy + To compute over many images do: + for i = range(Nimages): + (pixel_accuracy[i], pixel_correct[i], pixel_labeled[i]) = \ + pixelAccuracy(imPred[i], imLab[i]) + mean_pixel_accuracy = 1.0 * np.sum(pixel_correct) / (np.spacing(1) + np.sum(pixel_labeled)) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + pixel_labeled = np.sum(imLab >= 0) + pixel_correct = np.sum((imPred == imLab) * (imLab >= 0)) + pixel_accuracy = 1.0 * pixel_correct / pixel_labeled + return (pixel_accuracy, pixel_correct, pixel_labeled) + + +def intersectionAndUnion(imPred, imLab, numClass): + """ + This function takes the prediction and label of a single image, + returns intersection and union areas for each class + To compute over many images do: + for i in range(Nimages): + (area_intersection[:,i], area_union[:,i]) = intersectionAndUnion(imPred[i], imLab[i]) + IoU = 1.0 * np.sum(area_intersection, axis=1) / np.sum(np.spacing(1)+area_union, axis=1) + """ + # Remove classes from unlabeled pixels in gt image. + # We should not penalize detections in unlabeled portions of the image. + imPred = imPred * (imLab >= 0) + + # Compute area intersection: + intersection = imPred * (imPred == imLab) + (area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass)) + + # Compute area union: + (area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass)) + (area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass)) + area_union = area_pred + area_lab - area_intersection + return (area_intersection, area_union) + + +def hist_info(pred, label, num_cls): + assert pred.shape == label.shape + k = (label >= 0) & (label < num_cls) + labeled = np.sum(k) + correct = np.sum((pred[k] == label[k])) + + return np.bincount(num_cls * label[k].astype(int) + pred[k], minlength=num_cls ** 2).reshape(num_cls, + num_cls), labeled, correct + + +def compute_score(hist, correct, labeled): + iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + freq = hist.sum(1) / hist.sum() + freq_IU = (iu[freq > 0] * freq[freq > 0]).sum() + mean_pixel_acc = correct / labeled + + return iu, mean_IU, mean_IU_no_back, mean_pixel_acc diff --git a/core/utils/visualize.py b/core/utils/visualize.py new file mode 100644 index 0000000..c63d6c9 --- /dev/null +++ b/core/utils/visualize.py @@ -0,0 +1,158 @@ +import os +import numpy as np +from PIL import Image + +__all__ = ['get_color_pallete', 'print_iou', 'set_img_color', + 'show_prediction', 'show_colorful_images', 'save_colorful_images'] + + +def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False): + n = iu.size + lines = [] + for i in range(n): + if class_names is None: + cls = 'Class %d:' % (i + 1) + else: + cls = '%d %s' % (i + 1, class_names[i]) + # lines.append('%-8s: %.3f%%' % (cls, iu[i] * 100)) + mean_IU = np.nanmean(iu) + mean_IU_no_back = np.nanmean(iu[1:]) + if show_no_back: + lines.append('mean_IU: %.3f%% || mean_IU_no_back: %.3f%% || mean_pixel_acc: %.3f%%' % ( + mean_IU * 100, mean_IU_no_back * 100, mean_pixel_acc * 100)) + else: + lines.append('mean_IU: %.3f%% || mean_pixel_acc: %.3f%%' % (mean_IU * 100, mean_pixel_acc * 100)) + lines.append('=================================================') + line = "\n".join(lines) + + print(line) + + +def set_img_color(img, label, colors, background=0, show255=False): + for i in range(len(colors)): + if i != background: + img[np.where(label == i)] = colors[i] + if show255: + img[np.where(label == 255)] = 255 + + return img + + +def show_prediction(img, pred, colors, background=0): + im = np.array(img, np.uint8) + set_img_color(im, pred, colors, background) + out = np.array(im) + + return out + + +def show_colorful_images(prediction, palettes): + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + im.show() + + +def save_colorful_images(prediction, filename, output_dir, palettes): + ''' + :param prediction: [B, H, W, C] + ''' + im = Image.fromarray(palettes[prediction.astype('uint8').squeeze()]) + fn = os.path.join(output_dir, filename) + out_dir = os.path.split(fn)[0] + if not os.path.exists(out_dir): + os.mkdir(out_dir) + im.save(fn) + + +def get_color_pallete(npimg, dataset='pascal_voc'): + """Visualize image. + + Parameters + ---------- + npimg : numpy.ndarray + Single channel image with shape `H, W, 1`. + dataset : str, default: 'pascal_voc' + The dataset that model pretrained on. ('pascal_voc', 'ade20k') + Returns + ------- + out_img : PIL.Image + Image with color pallete + """ + # recovery boundary + if dataset in ('pascal_voc', 'pascal_aug'): + npimg[npimg == -1] = 255 + # put colormap + if dataset == 'ade20k': + npimg = npimg + 1 + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(adepallete) + return out_img + elif dataset == 'citys': + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(cityspallete) + return out_img + out_img = Image.fromarray(npimg.astype('uint8')) + out_img.putpalette(vocpallete) + return out_img + + +def _getvocpallete(num_cls): + n = num_cls + pallete = [0] * (n * 3) + for j in range(0, n): + lab = j + pallete[j * 3 + 0] = 0 + pallete[j * 3 + 1] = 0 + pallete[j * 3 + 2] = 0 + i = 0 + while (lab > 0): + pallete[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) + pallete[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) + pallete[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) + i = i + 1 + lab >>= 3 + return pallete + + +vocpallete = _getvocpallete(256) + +adepallete = [ + 0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, + 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, + 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, + 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 8, 255, 214, 7, 255, 224, 255, 184, 6, + 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, + 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, + 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, + 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, + 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 10, 0, 255, 173, 255, + 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 255, 173, 0, 255, 0, 20, 255, 184, 184, 0, + 31, 255, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, + 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, + 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 0, 214, 255, 255, + 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, + 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, + 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 0, 41, 255, 0, 255, 204, 41, 0, + 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, + 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255] + +cityspallete = [ + 128, 64, 128, + 244, 35, 232, + 70, 70, 70, + 102, 102, 156, + 190, 153, 153, + 153, 153, 153, + 250, 170, 30, + 220, 220, 0, + 107, 142, 35, + 152, 251, 152, + 0, 130, 180, + 220, 20, 60, + 255, 0, 0, + 0, 0, 142, + 0, 0, 70, + 0, 60, 100, + 0, 80, 100, + 0, 0, 230, + 119, 11, 32, +] diff --git a/drownUtils.py b/drownUtils.py new file mode 100644 index 0000000..db29f63 --- /dev/null +++ b/drownUtils.py @@ -0,0 +1,164 @@ +import numpy as np +import time,cv2 +def ms(t1,t0): + return (t1-t0)*1000.0 +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) + +def mixDrowing_water_postprocess(preds,_mask_cv,pars ): + '''还未考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + + t0=time.time() + '''1、最大分割水域作为判断依据''' + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + max_contour = contour_info[0] + #print(max_contour) + t1=time.time() + '''2、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + #if preds[i][4]=='head' or preds[i][4]=='person': + if preds[i][0]==0 or preds[i][0]==1: + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t2=time.time() + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour[0], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t3=time.time() + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + + t4=time.time() + timeInfos = '%.1f (step1:%.1f step2:%.2f step3:%.3f step4:%.1f) ' %( ms(t4,t0), ms(t1,t0),ms(t2,t1),ms(t3,t2),ms(t4,t3) ) + + + + return final_head_person_filterboat,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + diff --git a/drownUtils20230801.py b/drownUtils20230801.py new file mode 100644 index 0000000..90d8be5 --- /dev/null +++ b/drownUtils20230801.py @@ -0,0 +1,218 @@ +import numpy as np +import time,cv2 +def ms(t1,t0): + return (t1-t0)*1000.0 +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) + +def mixDrowing_water_postprocess(preds,_mask_cv,pars ): + '''考虑船上人过滤''' + '''输入:落水人员的结果(类别+坐标)、原图、mask图像 + 过程:获得mask的轮廓,判断人员是否在轮廓内。 + 在,则保留且绘制;不在,舍弃。 + 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度), + ''' + '''1、最大分割水域作为判断依据''' + zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。 + original_height = _mask_cv.shape[0] + original_width=_mask_cv.shape[1] + zoom_height=int(original_height/zoom_factor) + zoom_width=int(original_width/zoom_factor) + + _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后 + t4 = time.time() + img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv # + t5 = time.time() + contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) + + # 寻找轮廓(多边界) + contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2) + contour_info = [] + for c in contours: + contour_info.append(( + c, + cv2.isContourConvex(c), + cv2.contourArea(c), + )) + contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True) + t6 = time.time() + + '''新增模块::如果水域为空,则返回原图、无落水人员等。''' + if contour_info==[]: + # final_img=_img_cv + final_head_person_filterwater=[] + timeInfos=0 + # return final_img, final_head_person_filterwater + return final_head_person_filterwater,timeInfos + else: + max_contour = contour_info[0] + max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸 + print(max_contour) + t7 = time.time() + + + '''2.1、preds中head+person取出,boat取出。''' + init_head_person=[] + init_boat = [] + for i in range(len(preds)): + if preds[i][4]=='head' or preds[i][4]=='person': + init_head_person.append(preds[i]) + else: + init_boat.append(preds[i]) + t8 = time.time() + + '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。''' + init_head=[] + init_person=[] + #head与person标签分开 + for i in range(len(init_head_person)): + if init_head_person[i][4]=='head': + init_head.append(init_head_person[i]) + else: + init_person.append(init_head_person[i]) + # person的框形成contours + person_contour=[] + for i in range(len(init_person)): + boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]] + contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour + contour_temp_person=np.array(contour_temp_person) + contour_temp_person=np.float32(contour_temp_person) + person_contour.append(np.array(contour_temp_person)) + # head是否在person的contours内,在说明是同一人,过滤掉。 + list_head=[] + for i in range(len(init_head)): + for j in range(len(person_contour)): + center_x, center_y=center_coordinate(init_head[i]) + flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + pass + else: + list_head.append(init_head[i]) + # person和最终head合并起来 + init_head_person_temp=init_person+list_head + + '''3、preds中head+person,通过1中水域过滤''' + init_head_person_filterwater=init_head_person_temp + final_head_person_filterwater=[] + for i in range(len(init_head_person_filterwater)): + center_x, center_y=center_coordinate(init_head_person_filterwater[i]) + flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + final_head_person_filterwater.append(init_head_person_filterwater[i]) + else: + pass + t9 = time.time() + + '''4、水域过滤后的head+person,再通过船舶范围过滤''' + init_head_person_filterboat=final_head_person_filterwater + # final_head_person_filterboat=[] + #获取船舶范围 + boat_contour=[] + for i in range(len(init_boat)): + boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]] + contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour + contour_temp_=np.array(contour_temp) + contour_temp_=np.float32(contour_temp_) + boat_contour.append(np.array(contour_temp_)) + t10 = time.time() + # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素) + list_headperson_inboat=[] + for i in range(len(init_head_person_filterboat)): + for j in range(len(boat_contour)): + center_x, center_y=center_coordinate(init_head_person_filterboat[i]) + # yyyyyyyy=boat_contour[j] + flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。 + if flag==1: + list_headperson_inboat.append(init_head_person_filterboat[i]) + else: + pass + # print('list_headperson_inboat',list_headperson_inboat) + if len(list_headperson_inboat)==0: + pass + else: + list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除 + # 过滤船舶范围内的head和person + final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat) + final_output_luoshui=final_head_person_filterboat + t11 = time.time() + + timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s' + %((t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) ) + + return final_output_luoshui,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度) + + diff --git a/input_dir/DJI_20221108135632_0001_Z.jpg b/input_dir/DJI_20221108135632_0001_Z.jpg new file mode 100644 index 0000000..385c36e Binary files /dev/null and b/input_dir/DJI_20221108135632_0001_Z.jpg differ diff --git a/luoshui_2398.jpg b/luoshui_2398.jpg new file mode 100644 index 0000000..5b00ef0 Binary files /dev/null and b/luoshui_2398.jpg differ diff --git a/models/AIDetector_pytorch.py b/models/AIDetector_pytorch.py new file mode 100644 index 0000000..7fd595d --- /dev/null +++ b/models/AIDetector_pytorch.py @@ -0,0 +1,113 @@ +import torch +import numpy as np +from models.experimental import attempt_load +from utils.general import non_max_suppression, scale_coords +from utils.BaseDetector import baseDet +from utils.torch_utils import select_device +from utils.datasets import letterbox +import random +import cv2 + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' +def plot_one_box(x, img, color=None, label=None, line_thickness=3): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled + # cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [0, 0, 0], thickness=tf, lineType=cv2.LINE_AA) + +class Detector(baseDet): + + def __init__(self,dete_weights): + super(Detector, self).__init__() + self.init_model(dete_weights) + self.build_config() + + def init_model(self,dete_weights): + + # self.weights = 'weights/best_luoshui20230608.pt' + self.weights = dete_weights + + self.device = '0' if torch.cuda.is_available() else 'cpu' + self.device = select_device(self.device) + model = attempt_load(self.weights, map_location=self.device) + model.to(self.device).eval() + model.half() + # torch.save(model, 'test.pt') + self.m = model + self.names = model.module.names if hasattr( + model, 'module') else model.names + + def preprocess(self, img): + + img0 = img.copy() + img = letterbox(img, new_shape=self.img_size)[0] + img = img[:, :, ::-1].transpose(2, 0, 1) + img = np.ascontiguousarray(img) + img = torch.from_numpy(img).to(self.device) + img = img.half() # 半精度 + img /= 255.0 # 图像归一化 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + return img0, img + + + + def detect(self, im): + + im0, img = self.preprocess(im) + + pred = self.m(img, augment=False)[0] + pred = pred.float() + pred = non_max_suppression(pred, self.threshold, 0.4) + + pred_boxes = [] + for det in pred: + + if det is not None and len(det): + det[:, :4] = scale_coords( + img.shape[2:], det[:, :4], im0.shape).round() + + for *x, conf, cls_id in det: + lbl = self.names[int(cls_id)] + # if not lbl in ['person', 'car', 'truck']:#不在这个类别中,则继续 + # continue + # if not lbl in ['head', 'boat']: # 不在这个类别中,则继续 + # continue + x1, y1 = int(x[0]), int(x[1]) + x2, y2 = int(x[2]), int(x[3]) + pred_boxes.append((x1, y1, x2, y2, lbl, conf)) + + c = int(cls_id) # integer class + plot_one_box(x, im0, label=lbl, color=colors(c, True), line_thickness=3) + + cv2.imwrite('test_result_1.png', im0) + + return im, pred_boxes + diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/__pycache__/AIDetector_pytorch.cpython-37.pyc b/models/__pycache__/AIDetector_pytorch.cpython-37.pyc new file mode 100644 index 0000000..ee35371 Binary files /dev/null and b/models/__pycache__/AIDetector_pytorch.cpython-37.pyc differ diff --git a/models/__pycache__/AIDetector_pytorch.cpython-38.pyc b/models/__pycache__/AIDetector_pytorch.cpython-38.pyc new file mode 100644 index 0000000..0f60e51 Binary files /dev/null and b/models/__pycache__/AIDetector_pytorch.cpython-38.pyc differ diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..d8860b2 Binary files /dev/null and b/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..569e016 Binary files /dev/null and b/models/__pycache__/__init__.cpython-38.pyc differ diff --git a/models/__pycache__/bisenet.cpython-38.pyc b/models/__pycache__/bisenet.cpython-38.pyc new file mode 100644 index 0000000..7b23b3c Binary files /dev/null and b/models/__pycache__/bisenet.cpython-38.pyc differ diff --git a/models/__pycache__/common.cpython-37.pyc b/models/__pycache__/common.cpython-37.pyc new file mode 100644 index 0000000..b0531ea Binary files /dev/null and b/models/__pycache__/common.cpython-37.pyc differ diff --git a/models/__pycache__/common.cpython-38.pyc b/models/__pycache__/common.cpython-38.pyc new file mode 100644 index 0000000..b704e31 Binary files /dev/null and b/models/__pycache__/common.cpython-38.pyc differ diff --git a/models/__pycache__/experimental.cpython-37.pyc b/models/__pycache__/experimental.cpython-37.pyc new file mode 100644 index 0000000..70c9a1e Binary files /dev/null and b/models/__pycache__/experimental.cpython-37.pyc differ diff --git a/models/__pycache__/experimental.cpython-38.pyc b/models/__pycache__/experimental.cpython-38.pyc new file mode 100644 index 0000000..d37368c Binary files /dev/null and b/models/__pycache__/experimental.cpython-38.pyc differ diff --git a/models/__pycache__/model_stages.cpython-37.pyc b/models/__pycache__/model_stages.cpython-37.pyc new file mode 100644 index 0000000..5da7b57 Binary files /dev/null and b/models/__pycache__/model_stages.cpython-37.pyc differ diff --git a/models/__pycache__/model_stages.cpython-38.pyc b/models/__pycache__/model_stages.cpython-38.pyc new file mode 100644 index 0000000..89c1aff Binary files /dev/null and b/models/__pycache__/model_stages.cpython-38.pyc differ diff --git a/models/__pycache__/model_stages_trt.cpython-37.pyc b/models/__pycache__/model_stages_trt.cpython-37.pyc new file mode 100644 index 0000000..3f3b9f6 Binary files /dev/null and b/models/__pycache__/model_stages_trt.cpython-37.pyc differ diff --git a/models/__pycache__/stdcnet.cpython-38.pyc b/models/__pycache__/stdcnet.cpython-38.pyc new file mode 100644 index 0000000..da58d4e Binary files /dev/null and b/models/__pycache__/stdcnet.cpython-38.pyc differ diff --git a/models/__pycache__/yolo.cpython-37.pyc b/models/__pycache__/yolo.cpython-37.pyc new file mode 100644 index 0000000..fdfa1ac Binary files /dev/null and b/models/__pycache__/yolo.cpython-37.pyc differ diff --git a/models/__pycache__/yolo.cpython-38.pyc b/models/__pycache__/yolo.cpython-38.pyc new file mode 100644 index 0000000..8e5274e Binary files /dev/null and b/models/__pycache__/yolo.cpython-38.pyc differ diff --git a/models/common.py b/models/common.py new file mode 100644 index 0000000..4211db4 --- /dev/null +++ b/models/common.py @@ -0,0 +1,395 @@ +# YOLOv5 common modules + +import math +from copy import copy +from pathlib import Path + +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +from PIL import Image +from torch.cuda import amp + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box +from utils.plots import colors, plot_one_box +from utils.torch_utils import time_synchronized + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.LeakyReLU(0.1, inplace=True) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + max_det = 1000 # maximum number of detections per image + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) + + +class AutoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + max_det = 1000 # maximum number of detections per image + + def __init__(self, model): + super(AutoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # filename: imgs = 'data/images/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_synchronized()] + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, str): # filename or uri + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) + + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) + + # Post-process + y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render or crop: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + else: # all others + plot_one_box(box, im, label=label, color=colors(cls)) + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if pprint: + print(str.rstrip(', ')) + if show: + im.show(self.files[i]) # show + if save: + f = self.files[i] + im.save(save_dir / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if render: + self.imgs[i] = np.asarray(im) + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + self.display(save=True, save_dir=save_dir) # save results + + def crop(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + self.display(crop=True, save_dir=save_dir) # crop results + print(f'Saved results to {save_dir}\n') + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/models/experimental.py b/models/experimental.py new file mode 100644 index 0000000..afa7879 --- /dev/null +++ b/models/experimental.py @@ -0,0 +1,137 @@ +# YOLOv5 experimental modules + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(GhostBottleneck, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None, inplace=True): + from models.yolo import Detect, Model + + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: + m.inplace = inplace # pytorch 1.7.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print(f'Ensemble created with {weights}\n') + for k in ['names']: + setattr(model, k, getattr(model[-1], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + return model # return ensemble diff --git a/models/export.py b/models/export.py new file mode 100644 index 0000000..65721f6 --- /dev/null +++ b/models/export.py @@ -0,0 +1,143 @@ +"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats + +Usage: + $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 +""" + +import argparse +import sys +import time +from pathlib import Path + +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn +from torch.utils.mobile_optimizer import optimize_for_mobile + +import models +from models.experimental import attempt_load +from utils.activations import Hardswish, SiLU +from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging +from utils.torch_utils import select_device + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--train', action='store_true', help='model.train() mode') + parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only + parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only + parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + opt.include = [x.lower() for x in opt.include] + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + + # Update model + if opt.half: + img, model = img.half(), model.half() # to FP16 + if opt.train: + model.train() # training mode (no grid construction in Detect layer) + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + elif isinstance(m, models.yolo.Detect): + m.inplace = opt.inplace + m.onnx_dynamic = opt.dynamic + # m.forward = m.forward_export # assign forward (optional) + + for _ in range(2): + y = model(img) # dry runs + print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") + + # TorchScript export ----------------------------------------------------------------------------------------------- + if 'torchscript' in opt.include or 'coreml' in opt.include: + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img, strict=False) + (optimize_for_mobile(ts) if opt.optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # ONNX export ------------------------------------------------------------------------------------------------------ + if 'onnx' in opt.include: + prefix = colorstr('ONNX:') + try: + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = opt.weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify( + model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # CoreML export ---------------------------------------------------------------------------------------------------- + if 'coreml' in opt.include: + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {ct.__version__}...') + assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' + model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + f = opt.weights.replace('.pt', '.mlmodel') # filename + model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + # Finish + print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') diff --git a/models/model_stages.py b/models/model_stages.py new file mode 100644 index 0000000..bdf6755 --- /dev/null +++ b/models/model_stages.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- + + +import torch +import torch.nn as nn +import torch.nn.functional as F + +# from models.stdcnet import STDCNet1446, STDCNet813 +from models.stdcnet import STDCNet1446, STDCNet813 + + +# BatchNorm2d = nn.BatchNorm2d + +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + # self.bn = BatchNorm2d(out_chan) + # self.bn = BatchNorm2d(out_chan, activation='none') + self.bn = nn.BatchNorm2d(out_chan) + self.relu = nn.ReLU() + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):######################1 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + # self.bn_atten = nn.BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.bn_atten = nn.BatchNorm2d(out_chan)########################2 + + self.sigmoid_atten = nn.Sigmoid() + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False, *args, **kwargs): + super(ContextPath, self).__init__() + + self.backbone_name = backbone + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + self.init_weight() + + def forward(self, x): + H0, W0 = x.size()[2:] + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + H8, W8 = feat8.size()[2:] + H16, W16 = feat16.size()[2:] + H32, W32 = feat32.size()[2:] + + avg = F.avg_pool2d(feat32, feat32.size()[2:]) + + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (H32, W32), mode='nearest') + + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):#################3 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + atten = F.avg_pool2d(feat, feat.size()[2:]) + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, nn.BatchNorm2d):##################4 + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet(nn.Module): + def __init__(self, backbone, n_classes, pretrain_model='', use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, use_conv_last=False, heat_map=False, *args, **kwargs): + super(BiSeNet, self).__init__() + + self.use_boundary_2 = use_boundary_2 + self.use_boundary_4 = use_boundary_4 + self.use_boundary_8 = use_boundary_8 + self.use_boundary_16 = use_boundary_16 + # self.heat_map = heat_map + self.cp = ContextPath(backbone, pretrain_model, use_conv_last=use_conv_last) + + if backbone == 'STDCNet1446': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + elif backbone == 'STDCNet813': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + else: + print("backbone is not in backbone lists") + exit(0) + + self.ffm = FeatureFusionModule(inplane, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + + self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1) + + self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1) + self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1) + self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1) + self.init_weight() + + def forward(self, x): + H, W = x.size()[2:] + + feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + + feat_out_sp2 = self.conv_out_sp2(feat_res2) + + feat_out_sp4 = self.conv_out_sp4(feat_res4) + + feat_out_sp8 = self.conv_out_sp8(feat_res8) + + feat_out_sp16 = self.conv_out_sp16(feat_res16) + + feat_fuse = self.ffm(feat_res8, feat_cp8) + + feat_out = self.conv_out(feat_fuse) + feat_out16 = self.conv_out16(feat_cp8) + feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True) + feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True) + feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True) + + if self.use_boundary_2 and self.use_boundary_4 and self.use_boundary_8: + return feat_out, feat_out16, feat_out32, feat_out_sp2, feat_out_sp4, feat_out_sp8 + + if (not self.use_boundary_2) and self.use_boundary_4 and self.use_boundary_8: + return feat_out, feat_out16, feat_out32, feat_out_sp4, feat_out_sp8 + + if (not self.use_boundary_2) and (not self.use_boundary_4) and self.use_boundary_8: + return feat_out, feat_out16, feat_out32, feat_out_sp8 + + if (not self.use_boundary_2) and (not self.use_boundary_4) and (not self.use_boundary_8): + return feat_out, feat_out16, feat_out32 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, (FeatureFusionModule, BiSeNetOutput)): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + + # net = BiSeNet('STDCNet813', 19) # 原始 + # net = BiSeNet('STDCNet813', 3) # 改动 + # net = BiSeNet('STDCNet813', 4) # 改动 + net = BiSeNet('STDCNet813', 2) # 改动 + + net.cuda() + net.eval() + in_ten = torch.randn(1, 3, 768, 1536).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + # torch.save(net.state_dict(), 'STDCNet813.pth')### + + diff --git a/models/model_stages_trt.py b/models/model_stages_trt.py new file mode 100644 index 0000000..69a7a77 --- /dev/null +++ b/models/model_stages_trt.py @@ -0,0 +1,407 @@ +#!/usr/bin/python +# -*- encoding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F + +from models.stdcnet import STDCNet1446, STDCNet813 +BatchNorm2d = nn.BatchNorm2d + +class ConvBNReLU(nn.Module): + def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs): + super(ConvBNReLU, self).__init__() + self.conv = nn.Conv2d(in_chan, + out_chan, + kernel_size = ks, + stride = stride, + padding = padding, + bias = False) + self.bn = BatchNorm2d(out_chan) + # self.bn = BatchNorm2d(out_chan, activation='none') + self.relu = nn.ReLU() + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class BiSeNetOutput(nn.Module): + def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs): + super(BiSeNetOutput, self).__init__() + self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1) + self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False) + self.init_weight() + + def forward(self, x): + x = self.conv(x) + x = self.conv_out(x) + return x + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class AttentionRefinementModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(AttentionRefinementModule, self).__init__() + self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1) + self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False) + self.bn_atten = BatchNorm2d(out_chan) + # self.bn_atten = BatchNorm2d(out_chan, activation='none') + self.sigmoid_atten = nn.Sigmoid() + self.init_weight() + + def forward(self, x): + feat = self.conv(x) + # atten = F.avg_pool2d(feat, feat.size()[2:]) + size_array = [int(s) for s in feat.size()[2:]] + atten = torch.nn.functional.avg_pool2d(feat, size_array) + atten = self.conv_atten(atten) + atten = self.bn_atten(atten) + atten = self.sigmoid_atten(atten) + out = torch.mul(feat, atten) + return out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + +class ContextPath(nn.Module): + def __init__(self, backbone='CatNetSmall', pretrain_model='', use_conv_last=False, input_size=512, *args, **kwargs): + super(ContextPath, self).__init__() + + self.backbone_name = backbone + self.input_size = input_size + print('backbone: ', backbone) + if backbone == 'STDCNet1446': + self.backbone = STDCNet1446(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + + elif backbone == 'STDCNet813': + self.backbone = STDCNet813(pretrain_model=pretrain_model, use_conv_last=use_conv_last) + self.arm16 = AttentionRefinementModule(512, 128) + inplanes = 1024 + if use_conv_last: + inplanes = 1024 + self.arm32 = AttentionRefinementModule(inplanes, 128) + self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1) + self.conv_avg = ConvBNReLU(inplanes, 128, ks=1, stride=1, padding=0) + else: + print("backbone is not in backbone lists") + exit(0) + + if self.input_size == 512: + self.H8 = torch.tensor(64) + self.W8 = torch.tensor(128) + + self.H16 = torch.tensor(32) + self.W16 = torch.tensor(64) + + self.H32 = torch.tensor(16) + self.W32 = torch.tensor(32) + elif self.input_size == 768: + self.H8 = torch.tensor(96) + self.W8 = torch.tensor(192) + + self.H16 = torch.tensor(48) + self.W16 = torch.tensor(96) + + self.H32 = torch.tensor(24) + self.W32 = torch.tensor(48) + elif self.input_size == 1024: + self.H8 = torch.tensor(128) + self.W8 = torch.tensor(256) + + self.H16 = torch.tensor(64) + self.W16 = torch.tensor(128) + + self.H32 = torch.tensor(32) + self.W32 = torch.tensor(64) + + elif self.input_size == 720: + self.H8 = torch.tensor(90) + self.W8 = torch.tensor(120) + + self.H16 = torch.tensor(45) + self.W16 = torch.tensor(60) + + self.H32 = torch.tensor(23) + self.W32 = torch.tensor(30) + else: + print("input_size is not in input_size lists") + exit(0) + + self.init_weight() + + def forward(self, x): + + feat2, feat4, feat8, feat16, feat32 = self.backbone(x) + size_array = [int(s) for s in feat32.size()[2:]] + avg = torch.nn.functional.avg_pool2d(feat32, size_array) + + avg = self.conv_avg(avg) + avg_up = F.interpolate(avg, (self.H32, self.W32), mode='nearest') + + feat32_arm = self.arm32(feat32) + feat32_sum = feat32_arm + avg_up + feat32_up = F.interpolate(feat32_sum, (self.H16, self.W16), mode='nearest') + feat32_up = self.conv_head32(feat32_up) + + feat16_arm = self.arm16(feat16) + feat16_sum = feat16_arm + feat32_up + feat16_up = F.interpolate(feat16_sum, (self.H8, self.W8), mode='nearest') + feat16_up = self.conv_head16(feat16_up) + + return feat2, feat4, feat8, feat16, feat16_up, feat32_up # x8, x16 + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + +class SpatialPath(nn.Module): + def __init__(self, *args, **kwargs): + super(SpatialPath, self).__init__() + self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3) + self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1) + self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0) + self.init_weight() + + def forward(self, x): + feat = self.conv1(x) + feat = self.conv2(feat) + feat = self.conv3(feat) + feat = self.conv_out(feat) + return feat + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class FeatureFusionModule(nn.Module): + def __init__(self, in_chan, out_chan, *args, **kwargs): + super(FeatureFusionModule, self).__init__() + self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0) + self.conv1 = nn.Conv2d(out_chan, + out_chan//4, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.conv2 = nn.Conv2d(out_chan//4, + out_chan, + kernel_size = 1, + stride = 1, + padding = 0, + bias = False) + self.relu = nn.ReLU(inplace=True) + self.sigmoid = nn.Sigmoid() + self.init_weight() + + def forward(self, fsp, fcp): + fcat = torch.cat([fsp, fcp], dim=1) + feat = self.convblk(fcat) + # atten = F.avg_pool2d(feat, feat.size()[2:]) + + size_array = [int(s) for s in feat.size()[2:]] + atten = torch.nn.functional.avg_pool2d(feat, size_array) + atten = self.conv1(atten) + atten = self.relu(atten) + atten = self.conv2(atten) + atten = self.sigmoid(atten) + feat_atten = torch.mul(feat, atten) + feat_out = feat_atten + feat + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params = [], [] + for name, module in self.named_modules(): + if isinstance(module, (nn.Linear, nn.Conv2d)): + wd_params.append(module.weight) + if not module.bias is None: + nowd_params.append(module.bias) + elif isinstance(module, BatchNorm2d): + nowd_params += list(module.parameters()) + return wd_params, nowd_params + + +class BiSeNet(nn.Module): + def __init__(self, backbone, n_classes, pretrain_model='', use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, input_size=512, use_conv_last=False, heat_map=False, *args, **kwargs): + super(BiSeNet, self).__init__() + + self.use_boundary_2 = use_boundary_2 + self.use_boundary_4 = use_boundary_4 + self.use_boundary_8 = use_boundary_8 + self.use_boundary_16 = use_boundary_16 + self.input_size = input_size + + print('BiSeNet backbone: ', backbone) + self.cp = ContextPath(backbone, pretrain_model, input_size=self.input_size, use_conv_last=use_conv_last) + + if backbone == 'STDCNet1446': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + elif backbone == 'STDCNet813': + conv_out_inplanes = 128 + sp2_inplanes = 32 + sp4_inplanes = 64 + sp8_inplanes = 256 + sp16_inplanes = 512 + inplane = sp8_inplanes + conv_out_inplanes + + else: + print("backbone is not in backbone lists") + exit(0) + + self.ffm = FeatureFusionModule(inplane, 256) + self.conv_out = BiSeNetOutput(256, 256, n_classes) + self.conv_out16 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + self.conv_out32 = BiSeNetOutput(conv_out_inplanes, 64, n_classes) + + self.conv_out_sp16 = BiSeNetOutput(sp16_inplanes, 64, 1) + self.conv_out_sp8 = BiSeNetOutput(sp8_inplanes, 64, 1) + self.conv_out_sp4 = BiSeNetOutput(sp4_inplanes, 64, 1) + self.conv_out_sp2 = BiSeNetOutput(sp2_inplanes, 64, 1) + + if self.input_size == 512: + self.H = torch.tensor(512) + self.W = torch.tensor(1024) + elif self.input_size == 768: + self.H = torch.tensor(768) + self.W = torch.tensor(1536) + elif self.input_size == 1024: + self.H = torch.tensor(1024) + self.W = torch.tensor(2048) + elif self.input_size == 720: + self.H = torch.tensor(720) + self.W = torch.tensor(960) + else: + print("input_size is not in input_size lists") + exit(0) + + self.init_weight() + + def forward(self, x): + # H, W = x.size()[2:] + + feat_res2, feat_res4, feat_res8, feat_res16, feat_cp8, feat_cp16 = self.cp(x) + # 16, 24, 40, 112, + + feat_out_sp8 = self.conv_out_sp8(feat_res8) + + feat_out_sp16 = self.conv_out_sp16(feat_res16) + + feat_fuse = self.ffm(feat_res8, feat_cp8) + + feat_out = self.conv_out(feat_fuse) + feat_out16 = self.conv_out16(feat_cp8) + feat_out32 = self.conv_out32(feat_cp16) + + feat_out = F.interpolate(feat_out, (self.H, self.W), mode='nearest') + feat_out16 = F.interpolate(feat_out16, (self.H, self.W), mode='nearest') + feat_out32 = F.interpolate(feat_out32, (self.H, self.W), mode='nearest') + + return feat_out + + def init_weight(self): + for ly in self.children(): + if isinstance(ly, nn.Conv2d): + nn.init.kaiming_normal_(ly.weight, a=1) + if not ly.bias is None: nn.init.constant_(ly.bias, 0) + + def get_params(self): + wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], [] + for name, child in self.named_children(): + child_wd_params, child_nowd_params = child.get_params() + if isinstance(child, (FeatureFusionModule, BiSeNetOutput)): + lr_mul_wd_params += child_wd_params + lr_mul_nowd_params += child_nowd_params + else: + wd_params += child_wd_params + nowd_params += child_nowd_params + return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params + + +if __name__ == "__main__": + + net = BiSeNet('STDCNet813', 19) + net.cuda() + net.eval() + in_ten = torch.randn(1, 3, 768, 1536).cuda() + out, out16, out32 = net(in_ten) + print(out.shape) + torch.save(net.state_dict(), 'STDCNet813.pth') + + diff --git a/models/stdcnet.py b/models/stdcnet.py new file mode 100644 index 0000000..13ece7e --- /dev/null +++ b/models/stdcnet.py @@ -0,0 +1,304 @@ +import torch +import torch.nn as nn +from torch.nn import init +import math + + + +class ConvX(nn.Module): + def __init__(self, in_planes, out_planes, kernel=3, stride=1): + super(ConvX, self).__init__() + self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel, stride=stride, padding=kernel//2, bias=False) + self.bn = nn.BatchNorm2d(out_planes) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + out = self.relu(self.bn(self.conv(x))) + return out + + +class AddBottleneck(nn.Module): + def __init__(self, in_planes, out_planes, block_num=3, stride=1): + super(AddBottleneck, self).__init__() + assert block_num > 1, print("block number should be larger than 1.") + self.conv_list = nn.ModuleList() + self.stride = stride + if stride == 2: + self.avd_layer = nn.Sequential( + nn.Conv2d(out_planes//2, out_planes//2, kernel_size=3, stride=2, padding=1, groups=out_planes//2, bias=False), + nn.BatchNorm2d(out_planes//2), + ) + self.skip = nn.Sequential( + nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=2, padding=1, groups=in_planes, bias=False), + nn.BatchNorm2d(in_planes), + nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False), + nn.BatchNorm2d(out_planes), + ) + stride = 1 + + for idx in range(block_num): + if idx == 0: + self.conv_list.append(ConvX(in_planes, out_planes//2, kernel=1)) + elif idx == 1 and block_num == 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//2, stride=stride)) + elif idx == 1 and block_num > 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//4, stride=stride)) + elif idx < block_num - 1: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx+1)))) + else: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx)))) + + def forward(self, x): + out_list = [] + out = x + + for idx, conv in enumerate(self.conv_list): + if idx == 0 and self.stride == 2: + out = self.avd_layer(conv(out)) + else: + out = conv(out) + out_list.append(out) + + if self.stride == 2: + x = self.skip(x) + + return torch.cat(out_list, dim=1) + x + + + +class CatBottleneck(nn.Module): + def __init__(self, in_planes, out_planes, block_num=3, stride=1): + super(CatBottleneck, self).__init__() + assert block_num > 1, print("block number should be larger than 1.") + self.conv_list = nn.ModuleList() + self.stride = stride + if stride == 2: + self.avd_layer = nn.Sequential( + nn.Conv2d(out_planes//2, out_planes//2, kernel_size=3, stride=2, padding=1, groups=out_planes//2, bias=False), + nn.BatchNorm2d(out_planes//2), + ) + self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + stride = 1 + + for idx in range(block_num): + if idx == 0: + self.conv_list.append(ConvX(in_planes, out_planes//2, kernel=1)) + elif idx == 1 and block_num == 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//2, stride=stride)) + elif idx == 1 and block_num > 2: + self.conv_list.append(ConvX(out_planes//2, out_planes//4, stride=stride)) + elif idx < block_num - 1: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx+1)))) + else: + self.conv_list.append(ConvX(out_planes//int(math.pow(2, idx)), out_planes//int(math.pow(2, idx)))) + + def forward(self, x): + out_list = [] + out1 = self.conv_list[0](x) + + for idx, conv in enumerate(self.conv_list[1:]): + if idx == 0: + if self.stride == 2: + out = conv(self.avd_layer(out1)) + else: + out = conv(out1) + else: + out = conv(out) + out_list.append(out) + + if self.stride == 2: + out1 = self.skip(out1) + out_list.insert(0, out1) + + out = torch.cat(out_list, dim=1) + return out + +#STDC2Net +class STDCNet1446(nn.Module): + def __init__(self, base=64, layers=[4,5,3], block_num=4, type="cat", num_classes=1000, dropout=0.20, pretrain_model='', use_conv_last=False): + super(STDCNet1446, self).__init__() + if type == "cat": + block = CatBottleneck + elif type == "add": + block = AddBottleneck + self.use_conv_last = use_conv_last + self.features = self._make_layers(base, layers, block_num, block) + self.conv_last = ConvX(base*16, max(1024, base*16), 1, 1) + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(max(1024, base*16), max(1024, base*16), bias=False) + self.bn = nn.BatchNorm1d(max(1024, base*16)) + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(p=dropout) + self.linear = nn.Linear(max(1024, base*16), num_classes, bias=False) + + self.x2 = nn.Sequential(self.features[:1]) + self.x4 = nn.Sequential(self.features[1:2]) + self.x8 = nn.Sequential(self.features[2:6]) + self.x16 = nn.Sequential(self.features[6:11]) + self.x32 = nn.Sequential(self.features[11:]) + + if pretrain_model: + print('use pretrain model {}'.format(pretrain_model)) + self.init_weight(pretrain_model) + else: + self.init_params() + + def init_weight(self, pretrain_model): + + state_dict = torch.load(pretrain_model)["state_dict"] + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def _make_layers(self, base, layers, block_num, block): + features = [] + features += [ConvX(3, base//2, 3, 2)] + features += [ConvX(base//2, base, 3, 2)] + + for i, layer in enumerate(layers): + for j in range(layer): + if i == 0 and j == 0: + features.append(block(base, base*4, block_num, 2)) + elif j == 0: + features.append(block(base*int(math.pow(2,i+1)), base*int(math.pow(2,i+2)), block_num, 2)) + else: + features.append(block(base*int(math.pow(2,i+2)), base*int(math.pow(2,i+2)), block_num, 1)) + + return nn.Sequential(*features) + + def forward(self, x): + feat2 = self.x2(x) + feat4 = self.x4(feat2) + feat8 = self.x8(feat4) + feat16 = self.x16(feat8) + feat32 = self.x32(feat16) + if self.use_conv_last: + feat32 = self.conv_last(feat32) + + return feat2, feat4, feat8, feat16, feat32 + + def forward_impl(self, x): + out = self.features(x) + out = self.conv_last(out).pow(2) + out = self.gap(out).flatten(1) + out = self.fc(out) + # out = self.bn(out) + out = self.relu(out) + # out = self.relu(self.bn(self.fc(out))) + out = self.dropout(out) + out = self.linear(out) + return out + +# STDC1Net +class STDCNet813(nn.Module): + def __init__(self, base=64, layers=[2,2,2], block_num=4, type="cat", num_classes=1000, dropout=0.20, pretrain_model='', use_conv_last=False): + super(STDCNet813, self).__init__() + if type == "cat": + block = CatBottleneck + elif type == "add": + block = AddBottleneck + self.use_conv_last = use_conv_last + self.features = self._make_layers(base, layers, block_num, block) + self.conv_last = ConvX(base*16, max(1024, base*16), 1, 1) + self.gap = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(max(1024, base*16), max(1024, base*16), bias=False) + self.bn = nn.BatchNorm1d(max(1024, base*16)) + self.relu = nn.ReLU(inplace=True) + self.dropout = nn.Dropout(p=dropout) + self.linear = nn.Linear(max(1024, base*16), num_classes, bias=False) + + self.x2 = nn.Sequential(self.features[:1]) + self.x4 = nn.Sequential(self.features[1:2]) + self.x8 = nn.Sequential(self.features[2:4]) + self.x16 = nn.Sequential(self.features[4:6]) + self.x32 = nn.Sequential(self.features[6:]) + + if pretrain_model: + print('use pretrain model {}'.format(pretrain_model)) + self.init_weight(pretrain_model) + else: + self.init_params() + + def init_weight(self, pretrain_model): + + state_dict = torch.load(pretrain_model)["state_dict"] + self_state_dict = self.state_dict() + for k, v in state_dict.items(): + self_state_dict.update({k: v}) + self.load_state_dict(self_state_dict) + + def init_params(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + init.constant_(m.weight, 1) + init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + init.normal_(m.weight, std=0.001) + if m.bias is not None: + init.constant_(m.bias, 0) + + def _make_layers(self, base, layers, block_num, block): + features = [] + features += [ConvX(3, base//2, 3, 2)] + features += [ConvX(base//2, base, 3, 2)] + + for i, layer in enumerate(layers): + for j in range(layer): + if i == 0 and j == 0: + features.append(block(base, base*4, block_num, 2)) + elif j == 0: + features.append(block(base*int(math.pow(2,i+1)), base*int(math.pow(2,i+2)), block_num, 2)) + else: + features.append(block(base*int(math.pow(2,i+2)), base*int(math.pow(2,i+2)), block_num, 1)) + + return nn.Sequential(*features) + + def forward(self, x): + feat2 = self.x2(x) + feat4 = self.x4(feat2) + feat8 = self.x8(feat4) + feat16 = self.x16(feat8) + feat32 = self.x32(feat16) + if self.use_conv_last: + feat32 = self.conv_last(feat32) + + return feat2, feat4, feat8, feat16, feat32 + + def forward_impl(self, x): + out = self.features(x) + out = self.conv_last(out).pow(2) + out = self.gap(out).flatten(1) + out = self.fc(out) + # out = self.bn(out) + out = self.relu(out) + # out = self.relu(self.bn(self.fc(out))) + out = self.dropout(out) + out = self.linear(out) + return out + +if __name__ == "__main__": + model = STDCNet813(num_classes=1000, dropout=0.00, block_num=4) + model.eval() + x = torch.randn(1,3,224,224) + y = model(x) + torch.save(model.state_dict(), 'cat.pth') + print(y.size()) diff --git a/models/yolo.py b/models/yolo.py new file mode 100644 index 0000000..06b8003 --- /dev/null +++ b/models/yolo.py @@ -0,0 +1,304 @@ +# YOLOv5 YOLO-specific modules + +import argparse +import logging +import sys +from copy import deepcopy +from pathlib import Path + +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + onnx_dynamic = False # ONNX export parameter + + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.inplace = inplace # use in-place ops (e.g. slice assignment) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if self.inplace: + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.safe_load(f) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) + # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.inplace = self.inplace + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + # logger.info('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + return self.forward_augment(x) # augmented inference, None + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if profile: + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + t = time_synchronized() + for _ in range(10): + _ = m(x) + dt.append((time_synchronized() - t) * 100) + if m == self.model[0]: + logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + + if profile: + logger.info('%.1fms total' % sum(dt)) + return x + + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + logger.info( + ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + logger.info('Fusing layers... ') + for m in self.model.modules(): + if type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + logger.info('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + logger.info('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add AutoShape module + logger.info('Adding AutoShape... ') + m = AutoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, + C3, C3TR]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) + # y = model(img, profile=True) + + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter('.') + # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml new file mode 100644 index 0000000..71ebf86 --- /dev/null +++ b/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml new file mode 100644 index 0000000..3c749c9 --- /dev/null +++ b/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml new file mode 100644 index 0000000..aca669d --- /dev/null +++ b/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml new file mode 100644 index 0000000..d3babdf --- /dev/null +++ b/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/modules/__init__.py b/modules/__init__.py new file mode 100644 index 0000000..8a098de --- /dev/null +++ b/modules/__init__.py @@ -0,0 +1,5 @@ +from .bn import ABN, InPlaceABN, InPlaceABNSync +from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE +from .misc import GlobalAvgPool2d, SingleGPU +from .residual import IdentityResidualBlock +from .dense import DenseModule diff --git a/modules/__pycache__/__init__.cpython-35.pyc b/modules/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000..fed14a7 Binary files /dev/null and b/modules/__pycache__/__init__.cpython-35.pyc differ diff --git a/modules/__pycache__/__init__.cpython-37.pyc b/modules/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..8bce89b Binary files /dev/null and b/modules/__pycache__/__init__.cpython-37.pyc differ diff --git a/modules/__pycache__/__init__.cpython-38.pyc b/modules/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..ae24701 Binary files /dev/null and b/modules/__pycache__/__init__.cpython-38.pyc differ diff --git a/modules/__pycache__/bn.cpython-35.pyc b/modules/__pycache__/bn.cpython-35.pyc new file mode 100644 index 0000000..60913e8 Binary files /dev/null and b/modules/__pycache__/bn.cpython-35.pyc differ diff --git a/modules/__pycache__/bn.cpython-37.pyc b/modules/__pycache__/bn.cpython-37.pyc new file mode 100644 index 0000000..de5223c Binary files /dev/null and b/modules/__pycache__/bn.cpython-37.pyc differ diff --git a/modules/__pycache__/bn.cpython-38.pyc b/modules/__pycache__/bn.cpython-38.pyc new file mode 100644 index 0000000..a745ab2 Binary files /dev/null and b/modules/__pycache__/bn.cpython-38.pyc differ diff --git a/modules/__pycache__/dense.cpython-35.pyc b/modules/__pycache__/dense.cpython-35.pyc new file mode 100644 index 0000000..40528cc Binary files /dev/null and b/modules/__pycache__/dense.cpython-35.pyc differ diff --git a/modules/__pycache__/dense.cpython-37.pyc b/modules/__pycache__/dense.cpython-37.pyc new file mode 100644 index 0000000..92a3a5d Binary files /dev/null and b/modules/__pycache__/dense.cpython-37.pyc differ diff --git a/modules/__pycache__/dense.cpython-38.pyc b/modules/__pycache__/dense.cpython-38.pyc new file mode 100644 index 0000000..8df5a1d Binary files /dev/null and b/modules/__pycache__/dense.cpython-38.pyc differ diff --git a/modules/__pycache__/functions.cpython-35.pyc b/modules/__pycache__/functions.cpython-35.pyc new file mode 100644 index 0000000..73895b9 Binary files /dev/null and b/modules/__pycache__/functions.cpython-35.pyc differ diff --git a/modules/__pycache__/functions.cpython-37.pyc b/modules/__pycache__/functions.cpython-37.pyc new file mode 100644 index 0000000..4df73f1 Binary files /dev/null and b/modules/__pycache__/functions.cpython-37.pyc differ diff --git a/modules/__pycache__/functions.cpython-38.pyc b/modules/__pycache__/functions.cpython-38.pyc new file mode 100644 index 0000000..1776223 Binary files /dev/null and b/modules/__pycache__/functions.cpython-38.pyc differ diff --git a/modules/__pycache__/misc.cpython-35.pyc b/modules/__pycache__/misc.cpython-35.pyc new file mode 100644 index 0000000..4353bf8 Binary files /dev/null and b/modules/__pycache__/misc.cpython-35.pyc differ diff --git a/modules/__pycache__/misc.cpython-37.pyc b/modules/__pycache__/misc.cpython-37.pyc new file mode 100644 index 0000000..efbbf55 Binary files /dev/null and b/modules/__pycache__/misc.cpython-37.pyc differ diff --git a/modules/__pycache__/misc.cpython-38.pyc b/modules/__pycache__/misc.cpython-38.pyc new file mode 100644 index 0000000..2b64333 Binary files /dev/null and b/modules/__pycache__/misc.cpython-38.pyc differ diff --git a/modules/__pycache__/residual.cpython-35.pyc b/modules/__pycache__/residual.cpython-35.pyc new file mode 100644 index 0000000..28dd895 Binary files /dev/null and b/modules/__pycache__/residual.cpython-35.pyc differ diff --git a/modules/__pycache__/residual.cpython-37.pyc b/modules/__pycache__/residual.cpython-37.pyc new file mode 100644 index 0000000..24a7814 Binary files /dev/null and b/modules/__pycache__/residual.cpython-37.pyc differ diff --git a/modules/__pycache__/residual.cpython-38.pyc b/modules/__pycache__/residual.cpython-38.pyc new file mode 100644 index 0000000..54d156c Binary files /dev/null and b/modules/__pycache__/residual.cpython-38.pyc differ diff --git a/modules/bn.py b/modules/bn.py new file mode 100644 index 0000000..cd3928b --- /dev/null +++ b/modules/bn.py @@ -0,0 +1,130 @@ +import torch +import torch.nn as nn +import torch.nn.functional as functional + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from .functions import * + + +class ABN(nn.Module): + """Activated Batch Normalization + + This gathers a `BatchNorm2d` and an activation function in a single module + """ + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): + """Creates an Activated Batch Normalization module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics as. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + activation : str + Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + """ + super(ABN, self).__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + self.momentum = momentum + self.activation = activation + self.slope = slope + if self.affine: + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + else: + self.register_parameter('weight', None) + self.register_parameter('bias', None) + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.constant_(self.running_mean, 0) + nn.init.constant_(self.running_var, 1) + if self.affine: + nn.init.constant_(self.weight, 1) + nn.init.constant_(self.bias, 0) + + def forward(self, x): + x = functional.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, + self.training, self.momentum, self.eps) + + if self.activation == ACT_RELU: + return functional.relu(x, inplace=True) + elif self.activation == ACT_LEAKY_RELU: + return functional.leaky_relu(x, negative_slope=self.slope, inplace=True) + elif self.activation == ACT_ELU: + return functional.elu(x, inplace=True) + else: + return x + + def __repr__(self): + rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ + ' affine={affine}, activation={activation}' + if self.activation == "leaky_relu": + rep += ', slope={slope})' + else: + rep += ')' + return rep.format(name=self.__class__.__name__, **self.__dict__) + + +class InPlaceABN(ABN): + """InPlace Activated Batch Normalization""" + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): + """Creates an InPlace Activated Batch Normalization module + + Parameters + ---------- + num_features : int + Number of feature channels in the input and output. + eps : float + Small constant to prevent numerical issues. + momentum : float + Momentum factor applied to compute running statistics as. + affine : bool + If `True` apply learned scale and shift transformation after normalization. + activation : str + Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. + slope : float + Negative slope for the `leaky_relu` activation. + """ + super(InPlaceABN, self).__init__(num_features, eps, momentum, affine, activation, slope) + + def forward(self, x): + return inplace_abn(x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.activation, self.slope) + + +class InPlaceABNSync(ABN): + """InPlace Activated Batch Normalization with cross-GPU synchronization + This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DistributedDataParallel`. + """ + + def forward(self, x): + return inplace_abn_sync(x, self.weight, self.bias, self.running_mean, self.running_var, + self.training, self.momentum, self.eps, self.activation, self.slope) + + def __repr__(self): + rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ + ' affine={affine}, activation={activation}' + if self.activation == "leaky_relu": + rep += ', slope={slope})' + else: + rep += ')' + return rep.format(name=self.__class__.__name__, **self.__dict__) + + diff --git a/modules/deeplab.py b/modules/deeplab.py new file mode 100644 index 0000000..fd25b78 --- /dev/null +++ b/modules/deeplab.py @@ -0,0 +1,84 @@ +import torch +import torch.nn as nn +import torch.nn.functional as functional + +from models._util import try_index +from .bn import ABN + + +class DeeplabV3(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels=256, + dilations=(12, 24, 36), + norm_act=ABN, + pooling_size=None): + super(DeeplabV3, self).__init__() + self.pooling_size = pooling_size + + self.map_convs = nn.ModuleList([ + nn.Conv2d(in_channels, hidden_channels, 1, bias=False), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[0], padding=dilations[0]), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[1], padding=dilations[1]), + nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[2], padding=dilations[2]) + ]) + self.map_bn = norm_act(hidden_channels * 4) + + self.global_pooling_conv = nn.Conv2d(in_channels, hidden_channels, 1, bias=False) + self.global_pooling_bn = norm_act(hidden_channels) + + self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels, 1, bias=False) + self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels, 1, bias=False) + self.red_bn = norm_act(out_channels) + + self.reset_parameters(self.map_bn.activation, self.map_bn.slope) + + def reset_parameters(self, activation, slope): + gain = nn.init.calculate_gain(activation, slope) + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_normal_(m.weight.data, gain) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, ABN): + if hasattr(m, "weight") and m.weight is not None: + nn.init.constant_(m.weight, 1) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + # Map convolutions + out = torch.cat([m(x) for m in self.map_convs], dim=1) + out = self.map_bn(out) + out = self.red_conv(out) + + # Global pooling + pool = self._global_pooling(x) + pool = self.global_pooling_conv(pool) + pool = self.global_pooling_bn(pool) + pool = self.pool_red_conv(pool) + if self.training or self.pooling_size is None: + pool = pool.repeat(1, 1, x.size(2), x.size(3)) + + out += pool + out = self.red_bn(out) + return out + + def _global_pooling(self, x): + if self.training or self.pooling_size is None: + pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1) + pool = pool.view(x.size(0), x.size(1), 1, 1) + else: + pooling_size = (min(try_index(self.pooling_size, 0), x.shape[2]), + min(try_index(self.pooling_size, 1), x.shape[3])) + padding = ( + (pooling_size[1] - 1) // 2, + (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1, + (pooling_size[0] - 1) // 2, + (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1 + ) + + pool = functional.avg_pool2d(x, pooling_size, stride=1) + pool = functional.pad(pool, pad=padding, mode="replicate") + return pool diff --git a/modules/dense.py b/modules/dense.py new file mode 100644 index 0000000..9638d6e --- /dev/null +++ b/modules/dense.py @@ -0,0 +1,42 @@ +from collections import OrderedDict + +import torch +import torch.nn as nn + +from .bn import ABN + + +class DenseModule(nn.Module): + def __init__(self, in_channels, growth, layers, bottleneck_factor=4, norm_act=ABN, dilation=1): + super(DenseModule, self).__init__() + self.in_channels = in_channels + self.growth = growth + self.layers = layers + + self.convs1 = nn.ModuleList() + self.convs3 = nn.ModuleList() + for i in range(self.layers): + self.convs1.append(nn.Sequential(OrderedDict([ + ("bn", norm_act(in_channels)), + ("conv", nn.Conv2d(in_channels, self.growth * bottleneck_factor, 1, bias=False)) + ]))) + self.convs3.append(nn.Sequential(OrderedDict([ + ("bn", norm_act(self.growth * bottleneck_factor)), + ("conv", nn.Conv2d(self.growth * bottleneck_factor, self.growth, 3, padding=dilation, bias=False, + dilation=dilation)) + ]))) + in_channels += self.growth + + @property + def out_channels(self): + return self.in_channels + self.growth * self.layers + + def forward(self, x): + inputs = [x] + for i in range(self.layers): + x = torch.cat(inputs, dim=1) + x = self.convs1[i](x) + x = self.convs3[i](x) + inputs += [x] + + return torch.cat(inputs, dim=1) diff --git a/modules/functions.py b/modules/functions.py new file mode 100644 index 0000000..093615f --- /dev/null +++ b/modules/functions.py @@ -0,0 +1,234 @@ +from os import path +import torch +import torch.distributed as dist +import torch.autograd as autograd +import torch.cuda.comm as comm +from torch.autograd.function import once_differentiable +from torch.utils.cpp_extension import load + +_src_path = path.join(path.dirname(path.abspath(__file__)), "src") +_backend = load(name="inplace_abn", + extra_cflags=["-O3"], + sources=[path.join(_src_path, f) for f in [ + "inplace_abn.cpp", + "inplace_abn_cpu.cpp", + "inplace_abn_cuda.cu", + "inplace_abn_cuda_half.cu" + ]], + extra_cuda_cflags=["--expt-extended-lambda"]) + +# Activation names +ACT_RELU = "relu" +ACT_LEAKY_RELU = "leaky_relu" +ACT_ELU = "elu" +ACT_NONE = "none" + + +def _check(fn, *args, **kwargs): + success = fn(*args, **kwargs) + if not success: + raise RuntimeError("CUDA Error encountered in {}".format(fn)) + + +def _broadcast_shape(x): + out_size = [] + for i, s in enumerate(x.size()): + if i != 1: + out_size.append(1) + else: + out_size.append(s) + return out_size + + +def _reduce(x): + if len(x.size()) == 2: + return x.sum(dim=0) + else: + n, c = x.size()[0:2] + return x.contiguous().view((n, c, -1)).sum(2).sum(0) + + +def _count_samples(x): + count = 1 + for i, s in enumerate(x.size()): + if i != 1: + count *= s + return count + + +def _act_forward(ctx, x): + if ctx.activation == ACT_LEAKY_RELU: + _backend.leaky_relu_forward(x, ctx.slope) + elif ctx.activation == ACT_ELU: + _backend.elu_forward(x) + elif ctx.activation == ACT_NONE: + pass + + +def _act_backward(ctx, x, dx): + if ctx.activation == ACT_LEAKY_RELU: + _backend.leaky_relu_backward(x, dx, ctx.slope) + elif ctx.activation == ACT_ELU: + _backend.elu_backward(x, dx) + elif ctx.activation == ACT_NONE: + pass + + +class InPlaceABN(autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01): + # Save context + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + ctx.affine = weight is not None and bias is not None + + # Prepare inputs + count = _count_samples(x) + x = x.contiguous() + weight = weight.contiguous() if ctx.affine else x.new_empty(0) + bias = bias.contiguous() if ctx.affine else x.new_empty(0) + + if ctx.training: + mean, var = _backend.mean_var(x) + + # Update running stats + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * count / (count - 1)) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + mean, var = running_mean.contiguous(), running_var.contiguous() + ctx.mark_dirty(x) + + # BN forward + activation + _backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps) + _act_forward(ctx, x) + + # Output + ctx.var = var + ctx.save_for_backward(x, var, weight, bias) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, var, weight, bias = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + if ctx.training: + edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps) + else: + # TODO: implement simplified CUDA backward for inference mode + edz = dz.new_zeros(dz.size(1)) + eydz = dz.new_zeros(dz.size(1)) + + dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps) + dweight = eydz * weight.sign() if ctx.affine else None + dbias = edz if ctx.affine else None + + return dx, dweight, dbias, None, None, None, None, None, None, None + +class InPlaceABNSync(autograd.Function): + @classmethod + def forward(cls, ctx, x, weight, bias, running_mean, running_var, + training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01, equal_batches=True): + # Save context + ctx.training = training + ctx.momentum = momentum + ctx.eps = eps + ctx.activation = activation + ctx.slope = slope + ctx.affine = weight is not None and bias is not None + + # Prepare inputs + ctx.world_size = dist.get_world_size() if dist.is_initialized() else 1 + + #count = _count_samples(x) + batch_size = x.new_tensor([x.shape[0]],dtype=torch.long) + + x = x.contiguous() + weight = weight.contiguous() if ctx.affine else x.new_empty(0) + bias = bias.contiguous() if ctx.affine else x.new_empty(0) + + if ctx.training: + mean, var = _backend.mean_var(x) + if ctx.world_size>1: + # get global batch size + if equal_batches: + batch_size *= ctx.world_size + else: + dist.all_reduce(batch_size, dist.ReduceOp.SUM) + + ctx.factor = x.shape[0]/float(batch_size.item()) + + mean_all = mean.clone() * ctx.factor + dist.all_reduce(mean_all, dist.ReduceOp.SUM) + + var_all = (var + (mean - mean_all) ** 2) * ctx.factor + dist.all_reduce(var_all, dist.ReduceOp.SUM) + + mean = mean_all + var = var_all + + # Update running stats + running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) + count = batch_size.item() * x.view(x.shape[0],x.shape[1],-1).shape[-1] + running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * (float(count) / (count - 1))) + + # Mark in-place modified tensors + ctx.mark_dirty(x, running_mean, running_var) + else: + mean, var = running_mean.contiguous(), running_var.contiguous() + ctx.mark_dirty(x) + + # BN forward + activation + _backend.forward(x, mean, var, weight, bias, ctx.affine, ctx.eps) + _act_forward(ctx, x) + + # Output + ctx.var = var + ctx.save_for_backward(x, var, weight, bias) + return x + + @staticmethod + @once_differentiable + def backward(ctx, dz): + z, var, weight, bias = ctx.saved_tensors + dz = dz.contiguous() + + # Undo activation + _act_backward(ctx, z, dz) + + if ctx.training: + edz, eydz = _backend.edz_eydz(z, dz, weight, bias, ctx.affine, ctx.eps) + edz_local = edz.clone() + eydz_local = eydz.clone() + + if ctx.world_size>1: + edz *= ctx.factor + dist.all_reduce(edz, dist.ReduceOp.SUM) + + eydz *= ctx.factor + dist.all_reduce(eydz, dist.ReduceOp.SUM) + else: + edz_local = edz = dz.new_zeros(dz.size(1)) + eydz_local = eydz = dz.new_zeros(dz.size(1)) + + dx = _backend.backward(z, dz, var, weight, bias, edz, eydz, ctx.affine, ctx.eps) + dweight = eydz_local * weight.sign() if ctx.affine else None + dbias = edz_local if ctx.affine else None + + return dx, dweight, dbias, None, None, None, None, None, None, None + +inplace_abn = InPlaceABN.apply +inplace_abn_sync = InPlaceABNSync.apply + +__all__ = ["inplace_abn", "inplace_abn_sync", "ACT_RELU", "ACT_LEAKY_RELU", "ACT_ELU", "ACT_NONE"] diff --git a/modules/misc.py b/modules/misc.py new file mode 100644 index 0000000..3c50b69 --- /dev/null +++ b/modules/misc.py @@ -0,0 +1,21 @@ +import torch.nn as nn +import torch +import torch.distributed as dist + +class GlobalAvgPool2d(nn.Module): + def __init__(self): + """Global average pooling over the input's spatial dimensions""" + super(GlobalAvgPool2d, self).__init__() + + def forward(self, inputs): + in_size = inputs.size() + return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) + +class SingleGPU(nn.Module): + def __init__(self, module): + super(SingleGPU, self).__init__() + self.module=module + + def forward(self, input): + return self.module(input.cuda(non_blocking=True)) + diff --git a/modules/residual.py b/modules/residual.py new file mode 100644 index 0000000..b7d51ad --- /dev/null +++ b/modules/residual.py @@ -0,0 +1,88 @@ +from collections import OrderedDict + +import torch.nn as nn + +from .bn import ABN + + +class IdentityResidualBlock(nn.Module): + def __init__(self, + in_channels, + channels, + stride=1, + dilation=1, + groups=1, + norm_act=ABN, + dropout=None): + """Configurable identity-mapping residual block + + Parameters + ---------- + in_channels : int + Number of input channels. + channels : list of int + Number of channels in the internal feature maps. Can either have two or three elements: if three construct + a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then + `3 x 3` then `1 x 1` convolutions. + stride : int + Stride of the first `3 x 3` convolution + dilation : int + Dilation to apply to the `3 x 3` convolutions. + groups : int + Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with + bottleneck blocks. + norm_act : callable + Function to create normalization / activation Module. + dropout: callable + Function to create Dropout Module. + """ + super(IdentityResidualBlock, self).__init__() + + # Check parameters for inconsistencies + if len(channels) != 2 and len(channels) != 3: + raise ValueError("channels must contain either two or three values") + if len(channels) == 2 and groups != 1: + raise ValueError("groups > 1 are only valid if len(channels) == 3") + + is_bottleneck = len(channels) == 3 + need_proj_conv = stride != 1 or in_channels != channels[-1] + + self.bn1 = norm_act(in_channels) + if not is_bottleneck: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, + dilation=dilation)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + dilation=dilation)) + ] + if dropout is not None: + layers = layers[0:2] + [("dropout", dropout())] + layers[2:] + else: + layers = [ + ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)), + ("bn2", norm_act(channels[0])), + ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, + groups=groups, dilation=dilation)), + ("bn3", norm_act(channels[1])), + ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)) + ] + if dropout is not None: + layers = layers[0:4] + [("dropout", dropout())] + layers[4:] + self.convs = nn.Sequential(OrderedDict(layers)) + + if need_proj_conv: + self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False) + + def forward(self, x): + if hasattr(self, "proj_conv"): + bn1 = self.bn1(x) + shortcut = self.proj_conv(bn1) + else: + shortcut = x.clone() + bn1 = self.bn1(x) + + out = self.convs(bn1) + out.add_(shortcut) + + return out diff --git a/modules/src/checks.h b/modules/src/checks.h new file mode 100644 index 0000000..e761a6f --- /dev/null +++ b/modules/src/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/modules/src/inplace_abn.cpp b/modules/src/inplace_abn.cpp new file mode 100644 index 0000000..0a6b112 --- /dev/null +++ b/modules/src/inplace_abn.cpp @@ -0,0 +1,95 @@ +#include + +#include + +#include "inplace_abn.h" + +std::vector mean_var(at::Tensor x) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return mean_var_cuda_h(x); + } else { + return mean_var_cuda(x); + } + } else { + return mean_var_cpu(x); + } +} + +at::Tensor forward(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (x.is_cuda()) { + if (x.type().scalarType() == at::ScalarType::Half) { + return forward_cuda_h(x, mean, var, weight, bias, affine, eps); + } else { + return forward_cuda(x, mean, var, weight, bias, affine, eps); + } + } else { + return forward_cpu(x, mean, var, weight, bias, affine, eps); + } +} + +std::vector edz_eydz(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return edz_eydz_cuda_h(z, dz, weight, bias, affine, eps); + } else { + return edz_eydz_cuda(z, dz, weight, bias, affine, eps); + } + } else { + return edz_eydz_cpu(z, dz, weight, bias, affine, eps); + } +} + +at::Tensor backward(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return backward_cuda_h(z, dz, var, weight, bias, edz, eydz, affine, eps); + } else { + return backward_cuda(z, dz, var, weight, bias, edz, eydz, affine, eps); + } + } else { + return backward_cpu(z, dz, var, weight, bias, edz, eydz, affine, eps); + } +} + +void leaky_relu_forward(at::Tensor z, float slope) { + at::leaky_relu_(z, slope); +} + +void leaky_relu_backward(at::Tensor z, at::Tensor dz, float slope) { + if (z.is_cuda()) { + if (z.type().scalarType() == at::ScalarType::Half) { + return leaky_relu_backward_cuda_h(z, dz, slope); + } else { + return leaky_relu_backward_cuda(z, dz, slope); + } + } else { + return leaky_relu_backward_cpu(z, dz, slope); + } +} + +void elu_forward(at::Tensor z) { + at::elu_(z); +} + +void elu_backward(at::Tensor z, at::Tensor dz) { + if (z.is_cuda()) { + return elu_backward_cuda(z, dz); + } else { + return elu_backward_cpu(z, dz); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("mean_var", &mean_var, "Mean and variance computation"); + m.def("forward", &forward, "In-place forward computation"); + m.def("edz_eydz", &edz_eydz, "First part of backward computation"); + m.def("backward", &backward, "Second part of backward computation"); + m.def("leaky_relu_forward", &leaky_relu_forward, "Leaky relu forward computation"); + m.def("leaky_relu_backward", &leaky_relu_backward, "Leaky relu backward computation and inversion"); + m.def("elu_forward", &elu_forward, "Elu forward computation"); + m.def("elu_backward", &elu_backward, "Elu backward computation and inversion"); +} diff --git a/modules/src/inplace_abn.h b/modules/src/inplace_abn.h new file mode 100644 index 0000000..17afd11 --- /dev/null +++ b/modules/src/inplace_abn.h @@ -0,0 +1,88 @@ +#pragma once + +#include + +#include + +std::vector mean_var_cpu(at::Tensor x); +std::vector mean_var_cuda(at::Tensor x); +std::vector mean_var_cuda_h(at::Tensor x); + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps); + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps); + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope); +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope); + +void elu_backward_cpu(at::Tensor z, at::Tensor dz); +void elu_backward_cuda(at::Tensor z, at::Tensor dz); + +static void get_dims(at::Tensor x, int64_t& num, int64_t& chn, int64_t& sp) { + num = x.size(0); + chn = x.size(1); + sp = 1; + for (int64_t i = 2; i < x.ndimension(); ++i) + sp *= x.size(i); +} + +/* + * Specialized CUDA reduction functions for BN + */ +#ifdef __CUDACC__ + +#include "utils/cuda.cuh" + +template +__device__ T reduce(Op op, int plane, int N, int S) { + T sum = (T)0; + for (int batch = 0; batch < N; ++batch) { + for (int x = threadIdx.x; x < S; x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T)0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} +#endif diff --git a/modules/src/inplace_abn_cpu.cpp b/modules/src/inplace_abn_cpu.cpp new file mode 100644 index 0000000..ffc6d38 --- /dev/null +++ b/modules/src/inplace_abn_cpu.cpp @@ -0,0 +1,119 @@ +#include + +#include + +#include "utils/checks.h" +#include "inplace_abn.h" + +at::Tensor reduce_sum(at::Tensor x) { + if (x.ndimension() == 2) { + return x.sum(0); + } else { + auto x_view = x.view({x.size(0), x.size(1), -1}); + return x_view.sum(-1).sum(0); + } +} + +at::Tensor broadcast_to(at::Tensor v, at::Tensor x) { + if (x.ndimension() == 2) { + return v; + } else { + std::vector broadcast_size = {1, -1}; + for (int64_t i = 2; i < x.ndimension(); ++i) + broadcast_size.push_back(1); + + return v.view(broadcast_size); + } +} + +int64_t count(at::Tensor x) { + int64_t count = x.size(0); + for (int64_t i = 2; i < x.ndimension(); ++i) + count *= x.size(i); + + return count; +} + +at::Tensor invert_affine(at::Tensor z, at::Tensor weight, at::Tensor bias, bool affine, float eps) { + if (affine) { + return (z - broadcast_to(bias, z)) / broadcast_to(at::abs(weight) + eps, z); + } else { + return z; + } +} + +std::vector mean_var_cpu(at::Tensor x) { + auto num = count(x); + auto mean = reduce_sum(x) / num; + auto diff = x - broadcast_to(mean, x); + auto var = reduce_sum(diff.pow(2)) / num; + + return {mean, var}; +} + +at::Tensor forward_cpu(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto gamma = affine ? at::abs(weight) + eps : at::ones_like(var); + auto mul = at::rsqrt(var + eps) * gamma; + + x.sub_(broadcast_to(mean, x)); + x.mul_(broadcast_to(mul, x)); + if (affine) x.add_(broadcast_to(bias, x)); + + return x; +} + +std::vector edz_eydz_cpu(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + auto edz = reduce_sum(dz); + auto y = invert_affine(z, weight, bias, affine, eps); + auto eydz = reduce_sum(y * dz); + + return {edz, eydz}; +} + +at::Tensor backward_cpu(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + auto y = invert_affine(z, weight, bias, affine, eps); + auto mul = affine ? at::rsqrt(var + eps) * (at::abs(weight) + eps) : at::rsqrt(var + eps); + + auto num = count(z); + auto dx = (dz - broadcast_to(edz / num, dz) - y * broadcast_to(eydz / num, dz)) * broadcast_to(mul, dz); + return dx; +} + +void leaky_relu_backward_cpu(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] *= 1 / slope; + _dz[i] *= slope; + } + } + })); +} + +void elu_backward_cpu(at::Tensor z, at::Tensor dz) { + CHECK_CPU_INPUT(z); + CHECK_CPU_INPUT(dz); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "elu_backward_cpu", ([&] { + int64_t count = z.numel(); + auto *_z = z.data(); + auto *_dz = dz.data(); + + for (int64_t i = 0; i < count; ++i) { + if (_z[i] < 0) { + _z[i] = log1p(_z[i]); + _dz[i] *= (_z[i] + 1.f); + } + } + })); +} diff --git a/modules/src/inplace_abn_cuda.cu b/modules/src/inplace_abn_cuda.cu new file mode 100644 index 0000000..b157b06 --- /dev/null +++ b/modules/src/inplace_abn_cuda.cu @@ -0,0 +1,333 @@ +#include + +#include +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +template +struct SumOp { + __device__ SumOp(const T *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + return tensor[(batch * chn + plane) * sp + n]; + } + const T *tensor; + const int chn; + const int sp; +}; + +template +struct VarOp { + __device__ VarOp(T m, const T *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ T operator()(int batch, int plane, int n) { + T val = tensor[(batch * chn + plane) * sp + n]; + return (val - mean) * (val - mean); + } + const T mean; + const T *tensor; + const int chn; + const int sp; +}; + +template +struct GradOp { + __device__ GradOp(T _weight, T _bias, const T *_z, const T *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + T _y = (z[(batch * chn + plane) * sp + n] - bias) / weight; + T _dz = dz[(batch * chn + plane) * sp + n]; + return Pair(_dz, _y * _dz); + } + const T weight; + const T bias; + const T *z; + const T *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +template +__global__ void mean_var_kernel(const T *x, T *mean, T *var, int num, int chn, int sp) { + int plane = blockIdx.x; + T norm = T(1) / T(num * sp); + + T _mean = reduce>(SumOp(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + T _var = reduce>(VarOp(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn}, x.options()); + auto var = at::empty({chn}, x.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "mean_var_cuda", ([&] { + mean_var_kernel<<>>( + x.data(), + mean.data(), + var.data(), + num, chn, sp); + })); + + return {mean, var}; +} + +/********** + * forward + **********/ + +template +__global__ void forward_kernel(T *x, const T *mean, const T *var, const T *weight, const T *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _mean = mean[plane]; + T _var = var[plane]; + T _weight = affine ? abs(weight[plane]) + eps : T(1); + T _bias = affine ? bias[plane] : T(0); + + T mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _x = x[(batch * chn + plane) * sp + n]; + T _y = (_x - _mean) * mul + _bias; + + x[(batch * chn + plane) * sp + n] = _y; + } + } +} + +at::Tensor forward_cuda(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(x.type(), "forward_cuda", ([&] { + forward_kernel<<>>( + x.data(), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + })); + + return x; +} + +/*********** + * edz_eydz + ***********/ + +template +__global__ void edz_eydz_kernel(const T *z, const T *dz, const T *weight, const T *bias, + T *edz, T *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOp>(GradOp(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn}, z.options()); + auto eydz = at::empty({chn}, z.options()); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "edz_eydz_cuda", ([&] { + edz_eydz_kernel<<>>( + z.data(), + dz.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + })); + + return {edz, eydz}; +} + +/*********** + * backward + ***********/ + +template +__global__ void backward_kernel(const T *z, const T *dz, const T *var, const T *weight, const T *bias, const T *edz, + const T *eydz, T *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + T _weight = affine ? abs(weight[plane]) + eps : 1.f; + T _bias = affine ? bias[plane] : 0.f; + T _var = var[plane]; + T _edz = edz[plane]; + T _eydz = eydz[plane]; + + T _mul = _weight * rsqrt(_var + eps); + T count = T(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + T _dz = dz[(batch * chn + plane) * sp + n]; + T _y = (z[(batch * chn + plane) * sp + n] - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = (_dz - _edz / count - _y * _eydz / count) * _mul; + } + } +} + +at::Tensor backward_cuda(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + AT_DISPATCH_FLOATING_TYPES(z.type(), "backward_cuda", ([&] { + backward_kernel<<>>( + z.data(), + dz.data(), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + dx.data(), + affine, eps, num, chn, sp); + })); + + return dx; +} + +/************** + * activations + **************/ + +template +inline void leaky_relu_backward_impl(T *z, T *dz, float slope, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_dz, + [slope] __device__ (const T& dz) { return dz * slope; }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [slope] __device__ (const T& z) { return z / slope; }, + [] __device__ (const T& z) { return z < 0; }); +} + +void leaky_relu_backward_cuda(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + leaky_relu_backward_impl(z.data(), dz.data(), slope, count); + })); +} + +template +inline void elu_backward_impl(T *z, T *dz, int64_t count) { + // Create thrust pointers + thrust::device_ptr th_z = thrust::device_pointer_cast(z); + thrust::device_ptr th_dz = thrust::device_pointer_cast(dz); + + auto stream = at::cuda::getCurrentCUDAStream(); + thrust::transform_if(thrust::cuda::par.on(stream), + th_dz, th_dz + count, th_z, th_z, th_dz, + [] __device__ (const T& dz, const T& z) { return dz * (z + 1.); }, + [] __device__ (const T& z) { return z < 0; }); + thrust::transform_if(thrust::cuda::par.on(stream), + th_z, th_z + count, th_z, + [] __device__ (const T& z) { return log1p(z); }, + [] __device__ (const T& z) { return z < 0; }); +} + +void elu_backward_cuda(at::Tensor z, at::Tensor dz) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + + AT_DISPATCH_FLOATING_TYPES(z.type(), "leaky_relu_backward_cuda", ([&] { + elu_backward_impl(z.data(), dz.data(), count); + })); +} diff --git a/modules/src/inplace_abn_cuda_half.cu b/modules/src/inplace_abn_cuda_half.cu new file mode 100644 index 0000000..bb63e73 --- /dev/null +++ b/modules/src/inplace_abn_cuda_half.cu @@ -0,0 +1,275 @@ +#include + +#include + +#include + +#include "utils/checks.h" +#include "utils/cuda.cuh" +#include "inplace_abn.h" + +#include + +// Operations for reduce +struct SumOpH { + __device__ SumOpH(const half *t, int c, int s) + : tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + return __half2float(tensor[(batch * chn + plane) * sp + n]); + } + const half *tensor; + const int chn; + const int sp; +}; + +struct VarOpH { + __device__ VarOpH(float m, const half *t, int c, int s) + : mean(m), tensor(t), chn(c), sp(s) {} + __device__ __forceinline__ float operator()(int batch, int plane, int n) { + const auto t = __half2float(tensor[(batch * chn + plane) * sp + n]); + return (t - mean) * (t - mean); + } + const float mean; + const half *tensor; + const int chn; + const int sp; +}; + +struct GradOpH { + __device__ GradOpH(float _weight, float _bias, const half *_z, const half *_dz, int c, int s) + : weight(_weight), bias(_bias), z(_z), dz(_dz), chn(c), sp(s) {} + __device__ __forceinline__ Pair operator()(int batch, int plane, int n) { + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - bias) / weight; + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + return Pair(_dz, _y * _dz); + } + const float weight; + const float bias; + const half *z; + const half *dz; + const int chn; + const int sp; +}; + +/*********** + * mean_var + ***********/ + +__global__ void mean_var_kernel_h(const half *x, float *mean, float *var, int num, int chn, int sp) { + int plane = blockIdx.x; + float norm = 1.f / static_cast(num * sp); + + float _mean = reduce(SumOpH(x, chn, sp), plane, num, sp) * norm; + __syncthreads(); + float _var = reduce(VarOpH(_mean, x, chn, sp), plane, num, sp) * norm; + + if (threadIdx.x == 0) { + mean[plane] = _mean; + var[plane] = _var; + } +} + +std::vector mean_var_cuda_h(at::Tensor x) { + CHECK_CUDA_INPUT(x); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Prepare output tensors + auto mean = at::empty({chn},x.options().dtype(at::kFloat)); + auto var = at::empty({chn},x.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + mean_var_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + num, chn, sp); + + return {mean, var}; +} + +/********** + * forward + **********/ + +__global__ void forward_kernel_h(half *x, const float *mean, const float *var, const float *weight, const float *bias, + bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + const float _mean = mean[plane]; + const float _var = var[plane]; + const float _weight = affine ? abs(weight[plane]) + eps : 1.f; + const float _bias = affine ? bias[plane] : 0.f; + + const float mul = rsqrt(_var + eps) * _weight; + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + half *x_ptr = x + (batch * chn + plane) * sp + n; + float _x = __half2float(*x_ptr); + float _y = (_x - _mean) * mul + _bias; + + *x_ptr = __float2half(_y); + } + } +} + +at::Tensor forward_cuda_h(at::Tensor x, at::Tensor mean, at::Tensor var, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(x); + CHECK_CUDA_INPUT(mean); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(x, num, chn, sp); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + forward_kernel_h<<>>( + reinterpret_cast(x.data()), + mean.data(), + var.data(), + weight.data(), + bias.data(), + affine, eps, num, chn, sp); + + return x; +} + +__global__ void edz_eydz_kernel_h(const half *z, const half *dz, const float *weight, const float *bias, + float *edz, float *eydz, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + + Pair res = reduce, GradOpH>(GradOpH(_weight, _bias, z, dz, chn, sp), plane, num, sp); + __syncthreads(); + + if (threadIdx.x == 0) { + edz[plane] = res.v1; + eydz[plane] = res.v2; + } +} + +std::vector edz_eydz_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor weight, at::Tensor bias, + bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto edz = at::empty({chn},z.options().dtype(at::kFloat)); + auto eydz = at::empty({chn},z.options().dtype(at::kFloat)); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + edz_eydz_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + affine, eps, num, chn, sp); + + return {edz, eydz}; +} + +__global__ void backward_kernel_h(const half *z, const half *dz, const float *var, const float *weight, const float *bias, const float *edz, + const float *eydz, half *dx, bool affine, float eps, int num, int chn, int sp) { + int plane = blockIdx.x; + + float _weight = affine ? abs(weight[plane]) + eps : 1.f; + float _bias = affine ? bias[plane] : 0.f; + float _var = var[plane]; + float _edz = edz[plane]; + float _eydz = eydz[plane]; + + float _mul = _weight * rsqrt(_var + eps); + float count = float(num * sp); + + for (int batch = 0; batch < num; ++batch) { + for (int n = threadIdx.x; n < sp; n += blockDim.x) { + float _dz = __half2float(dz[(batch * chn + plane) * sp + n]); + float _y = (__half2float(z[(batch * chn + plane) * sp + n]) - _bias) / _weight; + + dx[(batch * chn + plane) * sp + n] = __float2half((_dz - _edz / count - _y * _eydz / count) * _mul); + } + } +} + +at::Tensor backward_cuda_h(at::Tensor z, at::Tensor dz, at::Tensor var, at::Tensor weight, at::Tensor bias, + at::Tensor edz, at::Tensor eydz, bool affine, float eps) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + CHECK_CUDA_INPUT(var); + CHECK_CUDA_INPUT(weight); + CHECK_CUDA_INPUT(bias); + CHECK_CUDA_INPUT(edz); + CHECK_CUDA_INPUT(eydz); + + // Extract dimensions + int64_t num, chn, sp; + get_dims(z, num, chn, sp); + + auto dx = at::zeros_like(z); + + // Run kernel + dim3 blocks(chn); + dim3 threads(getNumThreads(sp)); + auto stream = at::cuda::getCurrentCUDAStream(); + backward_kernel_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + var.data(), + weight.data(), + bias.data(), + edz.data(), + eydz.data(), + reinterpret_cast(dx.data()), + affine, eps, num, chn, sp); + + return dx; +} + +__global__ void leaky_relu_backward_impl_h(half *z, half *dz, float slope, int64_t count) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x){ + float _z = __half2float(z[i]); + if (_z < 0) { + dz[i] = __float2half(__half2float(dz[i]) * slope); + z[i] = __float2half(_z / slope); + } + } +} + +void leaky_relu_backward_cuda_h(at::Tensor z, at::Tensor dz, float slope) { + CHECK_CUDA_INPUT(z); + CHECK_CUDA_INPUT(dz); + + int64_t count = z.numel(); + dim3 threads(getNumThreads(count)); + dim3 blocks = (count + threads.x - 1) / threads.x; + auto stream = at::cuda::getCurrentCUDAStream(); + leaky_relu_backward_impl_h<<>>( + reinterpret_cast(z.data()), + reinterpret_cast(dz.data()), + slope, count); +} + diff --git a/modules/src/utils/checks.h b/modules/src/utils/checks.h new file mode 100644 index 0000000..e761a6f --- /dev/null +++ b/modules/src/utils/checks.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +// Define AT_CHECK for old version of ATen where the same function was called AT_ASSERT +#ifndef AT_CHECK +#define AT_CHECK AT_ASSERT +#endif + +#define CHECK_CUDA(x) AT_CHECK((x).type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CPU(x) AT_CHECK(!(x).type().is_cuda(), #x " must be a CPU tensor") +#define CHECK_CONTIGUOUS(x) AT_CHECK((x).is_contiguous(), #x " must be contiguous") + +#define CHECK_CUDA_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +#define CHECK_CPU_INPUT(x) CHECK_CPU(x); CHECK_CONTIGUOUS(x) \ No newline at end of file diff --git a/modules/src/utils/common.h b/modules/src/utils/common.h new file mode 100644 index 0000000..e8403ee --- /dev/null +++ b/modules/src/utils/common.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +/* + * Functions to share code between CPU and GPU + */ + +#ifdef __CUDACC__ +// CUDA versions + +#define HOST_DEVICE __host__ __device__ +#define INLINE_HOST_DEVICE __host__ __device__ inline +#define FLOOR(x) floor(x) + +#if __CUDA_ARCH__ >= 600 +// Recent compute capabilities have block-level atomicAdd for all data types, so we use that +#define ACCUM(x,y) atomicAdd_block(&(x),(y)) +#else +// Older architectures don't have block-level atomicAdd, nor atomicAdd for doubles, so we defer to atomicAdd for float +// and use the known atomicCAS-based implementation for double +template +__device__ inline data_t atomic_add(data_t *address, data_t val) { + return atomicAdd(address, val); +} + +template<> +__device__ inline double atomic_add(double *address, double val) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +#define ACCUM(x,y) atomic_add(&(x),(y)) +#endif // #if __CUDA_ARCH__ >= 600 + +#else +// CPU versions + +#define HOST_DEVICE +#define INLINE_HOST_DEVICE inline +#define FLOOR(x) std::floor(x) +#define ACCUM(x,y) (x) += (y) + +#endif // #ifdef __CUDACC__ \ No newline at end of file diff --git a/modules/src/utils/cuda.cuh b/modules/src/utils/cuda.cuh new file mode 100644 index 0000000..60c0023 --- /dev/null +++ b/modules/src/utils/cuda.cuh @@ -0,0 +1,71 @@ +#pragma once + +/* + * General settings and functions + */ +const int WARP_SIZE = 32; +const int MAX_BLOCK_SIZE = 1024; + +static int getNumThreads(int nElem) { + int threadSizes[6] = {32, 64, 128, 256, 512, MAX_BLOCK_SIZE}; + for (int i = 0; i < 6; ++i) { + if (nElem <= threadSizes[i]) { + return threadSizes[i]; + } + } + return MAX_BLOCK_SIZE; +} + +/* + * Reduction utilities + */ +template +__device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, + unsigned int mask = 0xffffffff) { +#if CUDART_VERSION >= 9000 + return __shfl_xor_sync(mask, value, laneMask, width); +#else + return __shfl_xor(value, laneMask, width); +#endif +} + +__device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } + +template +struct Pair { + T v1, v2; + __device__ Pair() {} + __device__ Pair(T _v1, T _v2) : v1(_v1), v2(_v2) {} + __device__ Pair(T v) : v1(v), v2(v) {} + __device__ Pair(int v) : v1(v), v2(v) {} + __device__ Pair &operator+=(const Pair &a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 + for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); + } +#else + __shared__ T values[MAX_BLOCK_SIZE]; + values[threadIdx.x] = val; + __threadfence_block(); + const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; + for (int i = 1; i < WARP_SIZE; i++) { + val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; + } +#endif + return val; +} + +template +static __device__ __forceinline__ Pair warpSum(Pair value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} \ No newline at end of file diff --git a/readme.txt b/readme.txt new file mode 100644 index 0000000..c87dbfe --- /dev/null +++ b/readme.txt @@ -0,0 +1,14 @@ +main + òģ͡ͼԡ +AI_process + '''ԭͼĿˮָ''' + '''룺ģָ͡ģ͡ò· + أĿԭͼ񡢷ָͼ + ''' +AI_postprocess + '''ԤͷָǷΪˮԱһ''' + '''룺ˮԱĽ+꣩ԭͼmaskͼ + ̣maskжԱǷڡ + ڣһƣڣ + أջƵĽͼˮԱꡢŶȣ + ''' diff --git a/test_fortest_batch.py b/test_fortest_batch.py new file mode 100644 index 0000000..74e866e --- /dev/null +++ b/test_fortest_batch.py @@ -0,0 +1,314 @@ +import os +import urllib +import traceback +import time +import sys +import numpy as np +import cv2 +from rknn.api import RKNN + +ONNX_MODEL = 'drp.onnx' +RKNN_MODEL = 'drp.rknn' +IMG_PATH = './images' +DATASET = './dataset.txt' + +QUANTIZE_ON = True + +OBJ_THRESH = 0.25 +NMS_THRESH = 0.45 +IMG_SIZE = 640 + +CLASSES = ("Crack","Cossorion","Exposedrebar","Spall","Efflorescence") + + +def sigmoid(x): + return 1 / (1 + np.exp(-x)) + + +def xywh2xyxy(x): + # Convert [x, y, w, h] to [x1, y1, x2, y2] + y = np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def process(input, mask, anchors): + + anchors = [anchors[i] for i in mask] + grid_h, grid_w = map(int, input.shape[0:2]) + + box_confidence = sigmoid(input[..., 4]) + box_confidence = np.expand_dims(box_confidence, axis=-1) + + box_class_probs = sigmoid(input[..., 5:]) + + box_xy = sigmoid(input[..., :2])*2 - 0.5 + + col = np.tile(np.arange(0, grid_w), grid_w).reshape(-1, grid_w) + row = np.tile(np.arange(0, grid_h).reshape(-1, 1), grid_h) + col = col.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2) + row = row.reshape(grid_h, grid_w, 1, 1).repeat(3, axis=-2) + grid = np.concatenate((col, row), axis=-1) + box_xy += grid + box_xy *= int(IMG_SIZE/grid_h) + + box_wh = pow(sigmoid(input[..., 2:4])*2, 2) + box_wh = box_wh * anchors + + box = np.concatenate((box_xy, box_wh), axis=-1) + + return box, box_confidence, box_class_probs + + +def filter_boxes(boxes, box_confidences, box_class_probs): + """Filter boxes with box threshold. It's a bit different with origin yolov5 post process! + + # Arguments + boxes: ndarray, boxes of objects. + box_confidences: ndarray, confidences of objects. + box_class_probs: ndarray, class_probs of objects. + + # Returns + boxes: ndarray, filtered boxes. + classes: ndarray, classes for boxes. + scores: ndarray, scores for boxes. + """ + boxes = boxes.reshape(-1, 4) + box_confidences = box_confidences.reshape(-1) + box_class_probs = box_class_probs.reshape(-1, box_class_probs.shape[-1]) + + _box_pos = np.where(box_confidences >= OBJ_THRESH) + boxes = boxes[_box_pos] + box_confidences = box_confidences[_box_pos] + box_class_probs = box_class_probs[_box_pos] + + class_max_score = np.max(box_class_probs, axis=-1) + classes = np.argmax(box_class_probs, axis=-1) + _class_pos = np.where(class_max_score >= OBJ_THRESH) + + boxes = boxes[_class_pos] + classes = classes[_class_pos] + scores = (class_max_score* box_confidences)[_class_pos] + + return boxes, classes, scores + + +def nms_boxes(boxes, scores): + """Suppress non-maximal boxes. + + # Arguments + boxes: ndarray, boxes of objects. + scores: ndarray, scores of objects. + + # Returns + keep: ndarray, index of effective boxes. + """ + x = boxes[:, 0] + y = boxes[:, 1] + w = boxes[:, 2] - boxes[:, 0] + h = boxes[:, 3] - boxes[:, 1] + + areas = w * h + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + + xx1 = np.maximum(x[i], x[order[1:]]) + yy1 = np.maximum(y[i], y[order[1:]]) + xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]]) + yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]]) + + w1 = np.maximum(0.0, xx2 - xx1 + 0.00001) + h1 = np.maximum(0.0, yy2 - yy1 + 0.00001) + inter = w1 * h1 + + ovr = inter / (areas[i] + areas[order[1:]] - inter) + inds = np.where(ovr <= NMS_THRESH)[0] + order = order[inds + 1] + keep = np.array(keep) + return keep + + +def yolov5_post_process(input_data): + masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], + [59, 119], [116, 90], [156, 198], [373, 326]] + + boxes, classes, scores = [], [], [] + for input, mask in zip(input_data, masks): + b, c, s = process(input, mask, anchors) + b, c, s = filter_boxes(b, c, s) + boxes.append(b) + classes.append(c) + scores.append(s) + + boxes = np.concatenate(boxes) + boxes = xywh2xyxy(boxes) + classes = np.concatenate(classes) + scores = np.concatenate(scores) + + nboxes, nclasses, nscores = [], [], [] + for c in set(classes): + inds = np.where(classes == c) + b = boxes[inds] + c = classes[inds] + s = scores[inds] + + keep = nms_boxes(b, s) + + nboxes.append(b[keep]) + nclasses.append(c[keep]) + nscores.append(s[keep]) + + if not nclasses and not nscores: + return None, None, None + + boxes = np.concatenate(nboxes) + classes = np.concatenate(nclasses) + scores = np.concatenate(nscores) + + return boxes, classes, scores + + +def draw(image, boxes, scores, classes): + """Draw the boxes on the image. + + # Argument: + image: original image. + boxes: ndarray, boxes of objects. + classes: ndarray, classes of objects. + scores: ndarray, scores of objects. + all_classes: all classes name. + """ + for box, score, cl in zip(boxes, scores, classes): + top, left, right, bottom = box + print('class: {}, score: {}'.format(CLASSES[cl], score)) + print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom)) + top = int(top) + left = int(left) + right = int(right) + bottom = int(bottom) + + cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2) + cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), + (top, left - 6), + cv2.FONT_HERSHEY_SIMPLEX, + 0.6, (0, 0, 255), 2) + + +def letterbox(im, new_shape=(640, 640), color=(0, 0, 0)): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +if __name__ == '__main__': + + # Create RKNN object + rknn = RKNN(verbose=True) + + # pre-process config + print('--> Config model') + rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]]) + print('done') + + # Load ONNX model + print('--> Loading model') + ret = rknn.load_onnx(model=ONNX_MODEL) + if ret != 0: + print('Load model failed!') + exit(ret) + print('done') + + # Build model + print('--> Building model') + ret = rknn.build(do_quantization=False, dataset=DATASET) + if ret != 0: + print('Build model failed!') + exit(ret) + print('done') + + # Export RKNN model + print('--> Export rknn model') + ret = rknn.export_rknn(RKNN_MODEL) + if ret != 0: + print('Export rknn model failed!') + exit(ret) + print('done') + + # Init runtime environment + print('--> Init runtime environment') + ret = rknn.init_runtime() + # ret = rknn.init_runtime('rk3566') + if ret != 0: + print('Init runtime environment failed!') + exit(ret) + print('done') + + + folders = os.listdir(IMG_PATH) + for i in range(len(folders)): + path1 = IMG_PATH+ '/' + folders[i] + + + # Set inputs + img = cv2.imread(path1) + # img, ratio, (dw, dh) = letterbox(img, new_shape=(IMG_SIZE, IMG_SIZE)) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (IMG_SIZE, IMG_SIZE)) + + # Inference + print('--> Running model') + outputs = rknn.inference(inputs=[img]) + # np.save('./onnx_yolov5_0.npy', outputs[0]) + # np.save('./onnx_yolov5_1.npy', outputs[1]) + # np.save('./onnx_yolov5_2.npy', outputs[2]) + # print('done') + + # post process + input0_data = outputs[0] + input1_data = outputs[1] + input2_data = outputs[2] + + input0_data = input0_data.reshape([3, -1]+list(input0_data.shape[-2:])) + input1_data = input1_data.reshape([3, -1]+list(input1_data.shape[-2:])) + input2_data = input2_data.reshape([3, -1]+list(input2_data.shape[-2:])) + + input_data = list() + input_data.append(np.transpose(input0_data, (2, 3, 0, 1))) + input_data.append(np.transpose(input1_data, (2, 3, 0, 1))) + input_data.append(np.transpose(input2_data, (2, 3, 0, 1))) + + boxes, classes, scores = yolov5_post_process(input_data) + + img_1 = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + if boxes is not None: + draw(img_1, boxes, scores, classes) + cv2.imwrite('result'+str(i)+'.jpg', img_1) + + rknn.release() diff --git a/test_result_1.png b/test_result_1.png new file mode 100644 index 0000000..831b096 Binary files /dev/null and b/test_result_1.png differ diff --git a/utils/BaseDetector.py b/utils/BaseDetector.py new file mode 100644 index 0000000..9800f9c --- /dev/null +++ b/utils/BaseDetector.py @@ -0,0 +1,52 @@ +# from tracker import update_tracker +import cv2 + + +class baseDet(object): + + def __init__(self): + + self.img_size = 640 + self.threshold = 0.3 + self.stride = 1 + # self.dete_weights='' + + + def build_config(self): + + self.faceTracker = {} + self.faceClasses = {} + self.faceLocation1 = {} + self.faceLocation2 = {} + self.frameCounter = 0 + self.currentCarID = 0 + self.recorded = [] + + self.font = cv2.FONT_HERSHEY_SIMPLEX + + def feedCap(self, im): + + retDict = { + 'frame': None, + 'faces': None, + 'list_of_ids': None, + 'face_bboxes': [] + } + self.frameCounter += 1 + + # im, faces, face_bboxes = update_tracker(self, im) + + retDict['frame'] = im + # retDict['faces'] = faces + # retDict['face_bboxes'] = face_bboxes + + return retDict + + def init_model(self): + raise EOFError("Undefined model type.") + + def preprocess(self): + raise EOFError("Undefined model type.") + + def detect(self): + raise EOFError("Undefined model type.") diff --git a/utils/SSIM_loss.py b/utils/SSIM_loss.py new file mode 100644 index 0000000..673766c --- /dev/null +++ b/utils/SSIM_loss.py @@ -0,0 +1,76 @@ +import torch +import torch.nn.functional as F +from torch.nn import Module +import torch.nn as nn + + +class SSIMLoss(Module): + def __init__(self, kernel_size=11, sigma=1.5, as_loss=True): + super().__init__() + self.kernel_size = kernel_size + self.sigma = sigma + self.as_loss = as_loss + self.gaussian_kernel = self._create_gaussian_kernel(self.kernel_size, self.sigma) + + def forward(self, x, y): + + if not self.gaussian_kernel.is_cuda: + self.gaussian_kernel = self.gaussian_kernel.to(x.device) + + ssim_map = self._ssim(x, y) + + if self.as_loss: + return 1 - ssim_map.mean() + else: + return 1 - ssim_map + + def _ssim(self, x, y): + + # Compute means + y = y.unsqueeze(1) + #print('line30:',x.shape,self.gaussian_kernel.shape,' Y.shape:',y.shape) + ux = F.conv2d(x, self.gaussian_kernel, padding=self.kernel_size // 2, groups=1) + uy = F.conv2d(y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=1) + + # Compute variances + uxx = F.conv2d(x * x, self.gaussian_kernel, padding=self.kernel_size // 2, groups=1) + uyy = F.conv2d(y * y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=1) + uxy = F.conv2d(x * y, self.gaussian_kernel, padding=self.kernel_size // 2, groups=1) + vx = uxx - ux * ux + vy = uyy - uy * uy + vxy = uxy - ux * uy + + c1 = 0.01**2 + c2 = 0.03**2 + numerator = (2 * ux * uy + c1) * (2 * vxy + c2) + denominator = (ux**2 + uy**2 + c1) * (vx + vy + c2) + return numerator / (denominator + 1e-12) + + def _create_gaussian_kernel(self, kernel_size, sigma): + + start = (1 - kernel_size) / 2 + end = (1 + kernel_size) / 2 + kernel_1d = torch.arange(start, end, step=1, dtype=torch.float) + kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2) + kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0) + + kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d) + kernel_2d = kernel_2d.expand(3, 1, kernel_size, kernel_size).contiguous() + return kernel_2d + + +class Multi_SSIM_loss(Module): + def __init__(self, window_sizes=[3, 7, 11], sigma=1.5, as_loss=True): + super(Multi_SSIM_loss, self).__init__() + self.losses = list() + for size in window_sizes: + self.losses.append(SSIMLoss(size, sigma, as_loss)) + self.losses = nn.ModuleList(self.losses) + + def forward(self, img1, img2): + loss_multi = list() + for i in range(len(self.losses)): + loss_multi.append(self.losses[i](img1, img2)) + total_loss = torch.stack(loss_multi, 0).sum() + + return total_loss diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/__pycache__/BaseDetector.cpython-37.pyc b/utils/__pycache__/BaseDetector.cpython-37.pyc new file mode 100644 index 0000000..631ff45 Binary files /dev/null and b/utils/__pycache__/BaseDetector.cpython-37.pyc differ diff --git a/utils/__pycache__/BaseDetector.cpython-38.pyc b/utils/__pycache__/BaseDetector.cpython-38.pyc new file mode 100644 index 0000000..29aadc5 Binary files /dev/null and b/utils/__pycache__/BaseDetector.cpython-38.pyc differ diff --git a/utils/__pycache__/SSIM_loss.cpython-37.pyc b/utils/__pycache__/SSIM_loss.cpython-37.pyc new file mode 100644 index 0000000..5b84d78 Binary files /dev/null and b/utils/__pycache__/SSIM_loss.cpython-37.pyc differ diff --git a/utils/__pycache__/SSIM_loss.cpython-38.pyc b/utils/__pycache__/SSIM_loss.cpython-38.pyc new file mode 100644 index 0000000..595b795 Binary files /dev/null and b/utils/__pycache__/SSIM_loss.cpython-38.pyc differ diff --git a/utils/__pycache__/__init__.cpython-37.pyc b/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..1d4e11a Binary files /dev/null and b/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..cc4e743 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/__pycache__/autoanchor.cpython-37.pyc b/utils/__pycache__/autoanchor.cpython-37.pyc new file mode 100644 index 0000000..54e9086 Binary files /dev/null and b/utils/__pycache__/autoanchor.cpython-37.pyc differ diff --git a/utils/__pycache__/autoanchor.cpython-38.pyc b/utils/__pycache__/autoanchor.cpython-38.pyc new file mode 100644 index 0000000..b506982 Binary files /dev/null and b/utils/__pycache__/autoanchor.cpython-38.pyc differ diff --git a/utils/__pycache__/bce_loss.cpython-37.pyc b/utils/__pycache__/bce_loss.cpython-37.pyc new file mode 100644 index 0000000..b734d20 Binary files /dev/null and b/utils/__pycache__/bce_loss.cpython-37.pyc differ diff --git a/utils/__pycache__/bce_loss.cpython-38.pyc b/utils/__pycache__/bce_loss.cpython-38.pyc new file mode 100644 index 0000000..756cafa Binary files /dev/null and b/utils/__pycache__/bce_loss.cpython-38.pyc differ diff --git a/utils/__pycache__/calculate_weights.cpython-36.pyc b/utils/__pycache__/calculate_weights.cpython-36.pyc new file mode 100644 index 0000000..277accd Binary files /dev/null and b/utils/__pycache__/calculate_weights.cpython-36.pyc differ diff --git a/utils/__pycache__/calculate_weights.cpython-37.pyc b/utils/__pycache__/calculate_weights.cpython-37.pyc new file mode 100644 index 0000000..57cf78f Binary files /dev/null and b/utils/__pycache__/calculate_weights.cpython-37.pyc differ diff --git a/utils/__pycache__/calculate_weights.cpython-38.pyc b/utils/__pycache__/calculate_weights.cpython-38.pyc new file mode 100644 index 0000000..cda2518 Binary files /dev/null and b/utils/__pycache__/calculate_weights.cpython-38.pyc differ diff --git a/utils/__pycache__/custom_transforms.cpython-37.pyc b/utils/__pycache__/custom_transforms.cpython-37.pyc new file mode 100644 index 0000000..2f7c9c0 Binary files /dev/null and b/utils/__pycache__/custom_transforms.cpython-37.pyc differ diff --git a/utils/__pycache__/custom_transforms.cpython-38.pyc b/utils/__pycache__/custom_transforms.cpython-38.pyc new file mode 100644 index 0000000..53a968c Binary files /dev/null and b/utils/__pycache__/custom_transforms.cpython-38.pyc differ diff --git a/utils/__pycache__/datasets.cpython-37.pyc b/utils/__pycache__/datasets.cpython-37.pyc new file mode 100644 index 0000000..afcd5a1 Binary files /dev/null and b/utils/__pycache__/datasets.cpython-37.pyc differ diff --git a/utils/__pycache__/datasets.cpython-38.pyc b/utils/__pycache__/datasets.cpython-38.pyc new file mode 100644 index 0000000..63debce Binary files /dev/null and b/utils/__pycache__/datasets.cpython-38.pyc differ diff --git a/utils/__pycache__/general.cpython-37.pyc b/utils/__pycache__/general.cpython-37.pyc new file mode 100644 index 0000000..db0c21d Binary files /dev/null and b/utils/__pycache__/general.cpython-37.pyc differ diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc new file mode 100644 index 0000000..2a6944b Binary files /dev/null and b/utils/__pycache__/general.cpython-38.pyc differ diff --git a/utils/__pycache__/google_utils.cpython-37.pyc b/utils/__pycache__/google_utils.cpython-37.pyc new file mode 100644 index 0000000..d30eebb Binary files /dev/null and b/utils/__pycache__/google_utils.cpython-37.pyc differ diff --git a/utils/__pycache__/google_utils.cpython-38.pyc b/utils/__pycache__/google_utils.cpython-38.pyc new file mode 100644 index 0000000..e6856fd Binary files /dev/null and b/utils/__pycache__/google_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/lr_scheduler.cpython-36.pyc b/utils/__pycache__/lr_scheduler.cpython-36.pyc new file mode 100644 index 0000000..1f52926 Binary files /dev/null and b/utils/__pycache__/lr_scheduler.cpython-36.pyc differ diff --git a/utils/__pycache__/lr_scheduler.cpython-37.pyc b/utils/__pycache__/lr_scheduler.cpython-37.pyc new file mode 100644 index 0000000..f537012 Binary files /dev/null and b/utils/__pycache__/lr_scheduler.cpython-37.pyc differ diff --git a/utils/__pycache__/lr_scheduler.cpython-38.pyc b/utils/__pycache__/lr_scheduler.cpython-38.pyc new file mode 100644 index 0000000..e227ac1 Binary files /dev/null and b/utils/__pycache__/lr_scheduler.cpython-38.pyc differ diff --git a/utils/__pycache__/metrics.cpython-36.pyc b/utils/__pycache__/metrics.cpython-36.pyc new file mode 100644 index 0000000..80dafaf Binary files /dev/null and b/utils/__pycache__/metrics.cpython-36.pyc differ diff --git a/utils/__pycache__/metrics.cpython-37.pyc b/utils/__pycache__/metrics.cpython-37.pyc new file mode 100644 index 0000000..b65018a Binary files /dev/null and b/utils/__pycache__/metrics.cpython-37.pyc differ diff --git a/utils/__pycache__/metrics.cpython-38.pyc b/utils/__pycache__/metrics.cpython-38.pyc new file mode 100644 index 0000000..870bc3f Binary files /dev/null and b/utils/__pycache__/metrics.cpython-38.pyc differ diff --git a/utils/__pycache__/mypath.cpython-37.pyc b/utils/__pycache__/mypath.cpython-37.pyc new file mode 100644 index 0000000..5cdabfd Binary files /dev/null and b/utils/__pycache__/mypath.cpython-37.pyc differ diff --git a/utils/__pycache__/plots.cpython-37.pyc b/utils/__pycache__/plots.cpython-37.pyc new file mode 100644 index 0000000..0eb0043 Binary files /dev/null and b/utils/__pycache__/plots.cpython-37.pyc differ diff --git a/utils/__pycache__/plots.cpython-38.pyc b/utils/__pycache__/plots.cpython-38.pyc new file mode 100644 index 0000000..9d06c93 Binary files /dev/null and b/utils/__pycache__/plots.cpython-38.pyc differ diff --git a/utils/__pycache__/postprocess_utils.cpython-37.pyc b/utils/__pycache__/postprocess_utils.cpython-37.pyc new file mode 100644 index 0000000..5582bbf Binary files /dev/null and b/utils/__pycache__/postprocess_utils.cpython-37.pyc differ diff --git a/utils/__pycache__/postprocess_utils.cpython-38.pyc b/utils/__pycache__/postprocess_utils.cpython-38.pyc new file mode 100644 index 0000000..cbba7fa Binary files /dev/null and b/utils/__pycache__/postprocess_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/saver.cpython-36.pyc b/utils/__pycache__/saver.cpython-36.pyc new file mode 100644 index 0000000..a7a5d87 Binary files /dev/null and b/utils/__pycache__/saver.cpython-36.pyc differ diff --git a/utils/__pycache__/saver.cpython-38.pyc b/utils/__pycache__/saver.cpython-38.pyc new file mode 100644 index 0000000..472a77d Binary files /dev/null and b/utils/__pycache__/saver.cpython-38.pyc differ diff --git a/utils/__pycache__/segutils.cpython-37.pyc b/utils/__pycache__/segutils.cpython-37.pyc new file mode 100644 index 0000000..c49709c Binary files /dev/null and b/utils/__pycache__/segutils.cpython-37.pyc differ diff --git a/utils/__pycache__/segutils.cpython-38.pyc b/utils/__pycache__/segutils.cpython-38.pyc new file mode 100644 index 0000000..e5e177f Binary files /dev/null and b/utils/__pycache__/segutils.cpython-38.pyc differ diff --git a/utils/__pycache__/summaries.cpython-36.pyc b/utils/__pycache__/summaries.cpython-36.pyc new file mode 100644 index 0000000..b707c01 Binary files /dev/null and b/utils/__pycache__/summaries.cpython-36.pyc differ diff --git a/utils/__pycache__/summaries.cpython-37.pyc b/utils/__pycache__/summaries.cpython-37.pyc new file mode 100644 index 0000000..8a55645 Binary files /dev/null and b/utils/__pycache__/summaries.cpython-37.pyc differ diff --git a/utils/__pycache__/summaries.cpython-38.pyc b/utils/__pycache__/summaries.cpython-38.pyc new file mode 100644 index 0000000..87fa64e Binary files /dev/null and b/utils/__pycache__/summaries.cpython-38.pyc differ diff --git a/utils/__pycache__/torch_utils.cpython-37.pyc b/utils/__pycache__/torch_utils.cpython-37.pyc new file mode 100644 index 0000000..bb25e7b Binary files /dev/null and b/utils/__pycache__/torch_utils.cpython-37.pyc differ diff --git a/utils/__pycache__/torch_utils.cpython-38.pyc b/utils/__pycache__/torch_utils.cpython-38.pyc new file mode 100644 index 0000000..152769a Binary files /dev/null and b/utils/__pycache__/torch_utils.cpython-38.pyc differ diff --git a/utils/__pycache__/wj2_loss.cpython-37.pyc b/utils/__pycache__/wj2_loss.cpython-37.pyc new file mode 100644 index 0000000..0a3b287 Binary files /dev/null and b/utils/__pycache__/wj2_loss.cpython-37.pyc differ diff --git a/utils/__pycache__/wj2_loss.cpython-38.pyc b/utils/__pycache__/wj2_loss.cpython-38.pyc new file mode 100644 index 0000000..7b54bb2 Binary files /dev/null and b/utils/__pycache__/wj2_loss.cpython-38.pyc differ diff --git a/utils/__pycache__/wj_loss.cpython-37.pyc b/utils/__pycache__/wj_loss.cpython-37.pyc new file mode 100644 index 0000000..a6ce73d Binary files /dev/null and b/utils/__pycache__/wj_loss.cpython-37.pyc differ diff --git a/utils/__pycache__/wj_loss.cpython-38.pyc b/utils/__pycache__/wj_loss.cpython-38.pyc new file mode 100644 index 0000000..bcdb66f Binary files /dev/null and b/utils/__pycache__/wj_loss.cpython-38.pyc differ diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000..92a3b5e --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,98 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) + + +# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- +class AconC(nn.Module): + r""" ACON activation (activate or not). + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not). + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..87dc394 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,161 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.safe_load(f) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/utils/bce_loss.py b/utils/bce_loss.py new file mode 100644 index 0000000..581b9a5 --- /dev/null +++ b/utils/bce_loss.py @@ -0,0 +1,77 @@ +import torch +import torch.nn as nn +from torch.autograd import Variable as V + +import cv2 +import numpy as np +class dice_bce_loss(nn.Module): + def __init__(self, batch=True): + super(dice_bce_loss, self).__init__() + self.batch = batch + self.bce_loss = nn.CrossEntropyLoss() + + def soft_dice_coeff(self, y_pred,y_true): + smooth = 0.0 # may change + if self.batch: + i = torch.sum(y_true) + j = torch.sum(y_pred) + intersection = torch.sum(y_true * y_pred) + else: + i = y_true.sum(1).sum(1).sum(1) + j = y_pred.sum(1).sum(1).sum(1) + intersection = (y_true * y_pred).sum(1).sum(1).sum(1) + score = (2. * intersection + smooth) / (i + j + smooth) + #score = (intersection + smooth) / (i + j - intersection + smooth)#iou + return score.mean() + + def soft_dice_loss(self, y_pred,y_true): + loss = 1 - self.soft_dice_coeff(y_true, y_pred) + return loss + + def __call__(self, y_pred,y_true): + #print(y_true.requires_grad,y_pred.requires_grad); + a = self.bce_loss(y_pred, y_true) + b = self.soft_dice_loss(y_true, y_pred) + return 1.0* a + 0.0 * b +class dice_loss(nn.Module): + def __init__(self, batch=True): + super(dice_loss, self).__init__() + self.batch = batch + def soft_dice_coeff(self, y_pred,y_true): + smooth = 0.0 # may change + if self.batch: + '''i = torch.sum(y_true) + j = torch.sum(y_pred) + intersection2= y_true * y_pred + intersection = torch.sum(y_true * y_pred)''' + ##y_true,y_pred都是index编码。 + ##step1,求取类别0的交集个数 + true_zeros = torch.sum(y_true==0) + pred_zeros = torch.sum(y_pred==0) + all = torch.sum((y_true*y_pred)==0) + zeros_cross = true_zeros + pred_zeros - all + + ##step2,去取交集的数目 + cross = torch.sum(y_pred == y_true) + y_true_p = torch.sum(y_true>0 ) + y_pred_p = torch.sum(y_pred>0) + + i = y_true_p + j = y_pred_p + intersection = cross - zeros_cross + else: + i = y_true.sum(1).sum(1).sum(1) + j = y_pred.sum(1).sum(1).sum(1) + intersection = (y_true * y_pred).sum(1).sum(1) + + score = ( 2 * intersection + smooth) / (i + j + smooth) + #score = (intersection + smooth) / (i + j - intersection + smooth)#iou + return score.mean() + + def soft_dice_loss(self, y_pred,y_true): + loss = 1 - self.soft_dice_coeff(y_true, y_pred) + return loss + + def __call__(self, y_pred,y_true): + b = self.soft_dice_loss(y_true, y_pred) + return b \ No newline at end of file diff --git a/utils/calculate_weights.py b/utils/calculate_weights.py new file mode 100644 index 0000000..2c2c982 --- /dev/null +++ b/utils/calculate_weights.py @@ -0,0 +1,29 @@ +import os +from tqdm import tqdm +import numpy as np +from mypath import Path + +def calculate_weigths_labels(dataset, dataloader, num_classes): + # Create an instance from the data loader + z = np.zeros((num_classes,)) + # Initialize tqdm + tqdm_batch = tqdm(dataloader) + print('Calculating classes weights') + for sample in tqdm_batch: + y = sample['label'] + y = y.detach().cpu().numpy() + mask = (y >= 0) & (y < num_classes) + labels = y[mask].astype(np.uint8) + count_l = np.bincount(labels, minlength=num_classes) + z += count_l + tqdm_batch.close() + total_frequency = np.sum(z) + class_weights = [] + for frequency in z: + class_weight = 1 / (np.log(1.02 + (frequency / total_frequency))) + class_weights.append(class_weight) + ret = np.array(class_weights) + classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy') + np.save(classes_weights_path, ret) + + return ret \ No newline at end of file diff --git a/utils/class_dict.csv b/utils/class_dict.csv new file mode 100644 index 0000000..0c84711 --- /dev/null +++ b/utils/class_dict.csv @@ -0,0 +1,3 @@ +name,r,g,b +0,0,0,0 +1,255,255,255 \ No newline at end of file diff --git a/utils/custom_transforms.py b/utils/custom_transforms.py new file mode 100644 index 0000000..7da3652 --- /dev/null +++ b/utils/custom_transforms.py @@ -0,0 +1,66 @@ +import torch +import random +import numpy as np + +from PIL import Image, ImageOps, ImageFilter + +class Normalize(object): + """Normalize a tensor image with mean and standard deviation. + Args: + mean (tuple): means for each channel. + std (tuple): standard deviations for each channel. + """ + def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)): + self.mean = mean + self.std = std + + def __call__(self, sample): + img = sample['image'] + # mask = sample['label'] + img = np.array(img).astype(np.float32) + # mask = np.array(mask).astype(np.float32) + img /= 255.0 + img -= self.mean + img /= self.std + + # return {'image': img, + # 'label': mask} + return {'image': img} + + +class ToTensor(object): + """Convert ndarrays in sample to Tensors.""" + + def __call__(self, sample): + # swap color axis because + # numpy image: H x W x C + # torch image: C X H X W + img = sample['image'] + # mask = sample['label'] + img = np.array(img).astype(np.float32).transpose((2, 0, 1)) + # mask = np.array(mask).astype(np.float32) + + img = torch.from_numpy(img).float() + # mask = torch.from_numpy(mask).float() + + # return img, mask + return img + + + +class FixedResize(object): + def __init__(self, size): + self.size = (size, size) # size: (h, w) + + def __call__(self, sample): + img = sample['image'] + # mask = sample['label'] + + # assert img.size == mask.size + + img = img.resize(self.size, Image.BILINEAR) + # mask = mask.resize(self.size, Image.NEAREST) + + # return {'image': img, + # 'label': mask} + return {'image': img} \ No newline at end of file diff --git a/utils/datasets.py b/utils/datasets.py new file mode 100644 index 0000000..36416b1 --- /dev/null +++ b/utils/datasets.py @@ -0,0 +1,1067 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + print(f'{i + 1}/{n}: {s}... ', end='') + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] + while cap.isOpened() and n < f: + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n % 4: # read every 4th frame + success, im = cap.retrieve() + self.imgs[i] = im if success else self.imgs[i] * 0 + time.sleep(1 / self.fps[i]) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img0 = self.imgs.copy() + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.close() + + if nf == 0: + logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + try: + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + except Exception as e: + logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = load_mosaic(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit('../coco128') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file diff --git a/utils/general.py b/utils/general.py new file mode 100644 index 0000000..9a88271 --- /dev/null +++ b/utils/general.py @@ -0,0 +1,692 @@ +# YOLOv5 general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1, verbose=True): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def is_docker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def is_colab(): + # Is environment a Google Colab instance + try: + import google.colab + return True + except Exception as e: + return False + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def file_size(file): + # Return file size in MB + return Path(file).stat().st_size / 1e6 + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not is_docker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_python(minimum='3.7.0', required=True): + # Check current python version vs. required python version + current = platform.python_version() + result = pkg.parse_version(current) >= pkg.parse_version(minimum) + if required: + assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed' + return result + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") + try: + print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + except Exception as e: + print(f'{prefix} {e}') + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + print(f'Downloading {s} ...') + torch.hub.download_url_to_file(s, f) + r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip + elif s.startswith('bash '): # bash script + print(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s) # return None + print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result + else: + raise Exception('Dataset not found.') + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): + # Multi-threaded file download and unzip function + def download_one(url, dir): + # Download 1 file + f = dir / Path(url).name # filename + if not f.exists(): + print(f'Downloading {url} to {f}...') + if curl: + os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail + else: + torch.hub.download_url_to_file(url, f, progress=True) # torch download + if unzip and f.suffix in ('.zip', '.gz'): + print(f'Unzipping {f}...') + if f.suffix == '.zip': + s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite + elif f.suffix == '.gz': + s = f'tar xfz {f} --directory {f.parent}' # unzip + if delete: # delete zip file after unzip + s += f' && rm {f}' + os.system(s) + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.close() + pool.join() + else: + for u in tuple(url) if isinstance(url, str) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=(), max_det=300): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.safe_dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop) + return crop + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + suffix = path.suffix + path = path.with_suffix('') + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + path = Path(f"{path}{sep}{n}{suffix}") # update path + dir = path if path.suffix == '' else path.parent # directory + if not dir.exists() and mkdir: + dir.mkdir(parents=True, exist_ok=True) # make directory + return path diff --git a/utils/google_utils.py b/utils/google_utils.py new file mode 100644 index 0000000..63d3e5b --- /dev/null +++ b/utils/google_utils.py @@ -0,0 +1,127 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='ultralytics/yolov5'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '')) + + if not file.exists(): + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', + 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except: + tag = 'v5.0' # current release + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/lr_scheduler.py b/utils/lr_scheduler.py new file mode 100644 index 0000000..9bc90c9 --- /dev/null +++ b/utils/lr_scheduler.py @@ -0,0 +1,70 @@ +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# Created by: Hang Zhang +# ECE Department, Rutgers University +# Email: zhang.hang@rutgers.edu +# Copyright (c) 2017 +# This source code is licensed under the MIT-style license found in the +# LICENSE file in the root directory of this source tree +# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +import math + + +class LR_Scheduler(object): + """Learning Rate Scheduler + + Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}`` + + Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))`` + + Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9`` + + Args: + args: + :attr:`args.lr_scheduler` lr scheduler mode (`cos`, `poly`), + :attr:`args.lr` base learning rate, :attr:`args.epochs` number of epochs, + :attr:`args.lr_step` + + iters_per_epoch: number of iterations per epoch + """ + def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0, + lr_step=0, warmup_epochs=0): + self.mode = mode + print('Using {} LR Scheduler!'.format(self.mode)) + self.lr = base_lr + if mode == 'step': + assert lr_step + self.lr_step = lr_step + self.iters_per_epoch = iters_per_epoch + self.N = num_epochs * iters_per_epoch + self.epoch = -1 + self.warmup_iters = warmup_epochs * iters_per_epoch + + def __call__(self, optimizer, i, epoch, best_pred): + T = epoch * self.iters_per_epoch + i + if self.mode == 'cos': + lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi)) + elif self.mode == 'poly': + lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9) + elif self.mode == 'step': + lr = self.lr * (0.1 ** (epoch // self.lr_step)) + else: + raise NotImplemented + # warm up lr schedule + if self.warmup_iters > 0 and T < self.warmup_iters: + lr = lr * 1.0 * T / self.warmup_iters + if epoch > self.epoch: + print('\n=>Epoches %i, learning rate = %.4f, \ + previous best = %.4f' % (epoch, lr, best_pred)) + self.epoch = epoch + assert lr >= 0 + self._adjust_learning_rate(optimizer, lr) + + def _adjust_learning_rate(self, optimizer, lr): + if len(optimizer.param_groups) == 1: + optimizer.param_groups[0]['lr'] = lr + else: + # enlarge the lr at the head + optimizer.param_groups[0]['lr'] = lr + for i in range(1, len(optimizer.param_groups)): + optimizer.param_groups[i]['lr'] = lr * 10 diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..f99eda3 --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,293 @@ +import numpy as np + + +class Evaluator(object): + def __init__(self, num_class): + self.num_class = num_class + + # self.confusion_matrix = np.zeros((self.num_class,)*2) # 原始 + self.confusion_matrix = np.zeros((self.num_class, self.num_class)) # 改动 + + def Recall_Precision(self): + TP = np.diag(self.confusion_matrix) + TP_add_FN = self.confusion_matrix.sum(axis=1) # 每一行的和 + TP_add_FP = self.confusion_matrix.sum(axis=0) # 每一列的和 + Recall = TP / TP_add_FN # 模型正确识别出为正类的样本的数量占总的正类样本数量的比值。一般情况下,Recall越高,说明有更多的正类样本被模型预测正确,模型的效果越好。 + Precision = TP / TP_add_FP # 表示在模型识别为正类的样本中,真正为正类的样本所占的比例。 + F1 = 2*(Precision*Recall)/(Precision+Recall) + # recall= np.nanmean(Recall) + # precision = np.nanmean(Precision) + # f1= np.nanmean(F1) + + return Recall, Precision, F1 + + def Pixel_Accuracy(self): + Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum() + return Acc + + def Pixel_Accuracy_Class(self): + Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1) + Acc = np.nanmean(Acc) + return Acc + + def Mean_Intersection_over_Union(self): + class_IoU = np.diag(self.confusion_matrix) / ( + np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) - + np.diag(self.confusion_matrix)) + MIoU = np.nanmean(class_IoU) + return class_IoU, MIoU + + def Frequency_Weighted_Intersection_over_Union(self): + freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix) + iu = np.diag(self.confusion_matrix) / ( + np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) - + np.diag(self.confusion_matrix)) + + FWIoU = (freq[freq > 0] * iu[freq > 0]).sum() + return FWIoU + + def _generate_matrix(self, gt_image, pre_image): + mask = (gt_image >= 0) & (gt_image < self.num_class) + label = self.num_class * gt_image[mask].astype('int') + pre_image[mask] + count = np.bincount(label, minlength=self.num_class**2) + confusion_matrix = count.reshape(self.num_class, self.num_class) + return confusion_matrix + + def add_batch(self, gt_image, pre_image): + assert gt_image.shape == pre_image.shape + self.confusion_matrix += self._generate_matrix(gt_image, pre_image) + + def reset(self): + + # self.confusion_matrix = np.zeros((self.num_class,) * 2) # 原始 + self.confusion_matrix = np.zeros((self.num_class, self.num_class)) # 改动 + + + + +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + + diff --git a/utils/mypath.py b/utils/mypath.py new file mode 100644 index 0000000..b2b97ee --- /dev/null +++ b/utils/mypath.py @@ -0,0 +1,14 @@ +class Path(object): + @staticmethod # staticmethod用于修饰类中的方法,使其可以在不创建类实例的情况下调用方法,这样做的好处是执行效率比较高。 + def db_root_dir(dataset): + if dataset == 'pascal': + return '/path/to/datasets/VOCdevkit/VOC2012/' # folder that contains VOCdevkit/. + elif dataset == 'sbd': + return '/path/to/datasets/benchmark_RELEASE/' # folder that contains dataset/. + elif dataset == 'cityscapes': + return '/path/to/datasets/cityscapes/' # foler that contains leftImg8bit/ + elif dataset == 'coco': + return '/path/to/datasets/coco/' + else: + print('Dataset {} not available.'.format(dataset)) + raise NotImplementedError diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..8313ef2 --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,446 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=color) # plot + if label: + font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(im) + + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.safe_load(f) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) diff --git a/utils/postprocess_utils.py b/utils/postprocess_utils.py new file mode 100644 index 0000000..3e82da4 --- /dev/null +++ b/utils/postprocess_utils.py @@ -0,0 +1,78 @@ +import numpy as np + +def center_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框重点坐标xy + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + center_x=0.5*(boundbxs_x1+boundbxs_x2) + center_y=0.5*(boundbxs_y1+boundbxs_y2) + return center_x,center_y + +def fourcorner_coordinate(boundbxs): + ''' + 输入:两个对角坐标xyxy + 输出:矩形框四个角点坐标,以contours顺序。 + ''' + boundbxs_x1=boundbxs[0] + boundbxs_y1=boundbxs[1] + boundbxs_x2=boundbxs[2] + boundbxs_y2=boundbxs[3] + wid=boundbxs_x2-boundbxs_x1 + hei=boundbxs_y2-boundbxs_y1 + boundbxs_x3=boundbxs_x1+wid + boundbxs_y3=boundbxs_y1 + boundbxs_x4=boundbxs_x1 + boundbxs_y4 = boundbxs_y1+hei + contours_rec=[[boundbxs_x1,boundbxs_y1],[boundbxs_x3,boundbxs_y3],[boundbxs_x2,boundbxs_y2],[boundbxs_x4,boundbxs_y4]] + return contours_rec + +def remove_simivalue(list1,list2): + ''' + 将list1中属于list2的元素都删除。 + 输入:两个嵌套列表 + 返回:嵌套列表 + ''' + list33=list1.copy() + for i in range(len(list1)): + for j in range(len(list2)): + if list2[j] == list1[i]: + # list33.pop(list1[i]) + list33.remove(list1[i]) + return list33 + +def remove_sameeleme_inalist(list3): + ''' + 将list3中重复嵌套列表元素删除。 + 输入:嵌套列表 + 返回:嵌套列表 + ''' + list3=list3 + list4=[] + list4.append(list3[0]) + for dict in list3: + k=0 + for item in list4: + if dict!=item: + k=k+1 + else: + break + if k==len(list4): + list4.append(dict) + return list4 + +def order_points(pts): + ''' sort rectangle points by clockwise ''' + sort_x = pts[np.argsort(pts[:, 0]), :] + + Left = sort_x[:2, :] + Right = sort_x[2:, :] + # Left sort + Left = Left[np.argsort(Left[:, 1])[::-1], :] + # Right sort + Right = Right[np.argsort(Right[:, 1]), :] + return np.concatenate((Left, Right), axis=0) diff --git a/utils/saver.py b/utils/saver.py new file mode 100644 index 0000000..d5e03b4 --- /dev/null +++ b/utils/saver.py @@ -0,0 +1,167 @@ +# import shutil +# from collections import OrderedDict # collections模块中的OrderedDict类 +# import glob +# import os +# import cv2 +# import numpy as np +# import torch +# import sys # 引入某一模块的方法 +# sys.path.append("../") # 为了导入上级目录的d2lzh_pytorch.py,添加一个新路径 +# import time +# +# +# class Saver(object): +# +# def __init__(self, args): +# self.args = args +# self.directory = os.path.join('runs', args.dataset, args.checkname) # 路径拼接:runs\pascal\deeplab-resnet +# self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*'))) # 搜索并排序(默认升序) +# run_id = int(self.runs[-1].split('_')[-1]) + 1 if self.runs else 0 # split() 通过指定分隔符对字符串进行切片,run_id=?,从0开始加1 +# +# # self.experiment_dir = os.path.join(self.directory, 'experiment_{}'.format(str(run_id))) # runs\pascal\deeplab-resnet\experiment_? +# self.experiment_dir = args.output_dir # 自行设置的输出目录 +# if not os.path.exists(self.experiment_dir): +# os.makedirs(self.experiment_dir) # 生成该路径下的目录 +# +# # def save_checkpoint(self, state, is_best, filename='checkpoint.pth'): +# # """Saves checkpoint to disk""" +# # filename = os.path.join(self.experiment_dir, filename) # runs\pascal\deeplab-resnet\experiment_?\checkpoint.pth.tar +# # torch.save(state, filename) # 生成checkpoint.pth +# # if is_best: +# # best_pred = state['best_pred'] +# # epoch = state['epoch'] +# # str_ = ("%15.5g;" * 2) % (epoch, best_pred) +# # with open(os.path.join(self.experiment_dir, 'best_pred.txt'), 'a') as f: +# # f.write(str_+'\n') +# # # if self.runs: +# # # previous_miou = [0.0] +# # # for run in self.runs: +# # # run_id = run.split('_')[-1] +# # # path = os.path.join(self.directory, 'experiment_{}'.format(str(run_id)), 'best_pred.txt') +# # # if os.path.exists(path): +# # # with open(path, 'r') as f: +# # # miou = float(f.readline()) +# # # previous_miou.append(miou) +# # # else: +# # # continue +# # # max_miou = max(previous_miou) +# # # if best_pred > max_miou: +# # # shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth'))#全局最佳模型 +# # # else: +# # # shutil.copyfile(filename, os.path.join(self.directory, 'model_best.pth')) +# +# def save_val_result(self, epoch, Acc, Acc_class, mIoU, class_IoU, FWIoU, recall, precision, f1): +# str_ = ("%15.5g;" * 13) % (epoch, Acc, Acc_class, mIoU, FWIoU, class_IoU[0], class_IoU[1], recall[0], recall[1], precision[0], precision[1], f1[0], f1[1]) # txt保存指标 +# with open(os.path.join(self.experiment_dir, 'val_result.txt'), 'a') as f: # 这句话自带文件关闭功能,所以和那些先open再write再close的方式来说,更加pythontic! +# f.write(str_ + '\n') +# +# # def save_experiment_config(self, num_pictures): +# # logfile = os.path.join(self.experiment_dir, 'parameters.txt') # runs\pascal\deeplab-resnet\experiment_?\parameters.txt +# # log_file = open(logfile, 'w') +# # p = OrderedDict() # 创建实例对象 +# # # 字典能够将信息关联起来,但它们不记录键值对的顺序。OrederedDict实例的行为与字典相同,区别在于记录了添加的键值对的顺序。 +# # p['datset'] = self.args.dataset +# # p['backbone'] = self.args.backbone +# # p['out_stride'] = self.args.out_stride +# # p['lr'] = self.args.lr +# # p['lr_scheduler'] = self.args.lr_scheduler +# # p['loss_type'] = self.args.loss_type +# # p['epoch'] = self.args.epochs +# # p['base_size'] = self.args.base_size +# # p['crop_size'] = self.args.crop_size +# # p['batch_size'] = self.args.batch_size +# # p['num_pictures'] = num_pictures +# # +# # for key, val in p.items(): +# # log_file.write(key + ':' + str(val) + '\n') +# # log_file.close() +# +# # def predict_save_images(self, model, args, epoch, label_info, test_loader, pathName): +# def predict_save_images(self, model, args, epoch, label_info, test_loader): +# # print('调用成功了') +# # if not args.dataset=='potsdam': +# # csv_path = os.path.join('path', args.dataset, 'class_dict.csv') +# +# # else: +# # csv_path = os.path.join('path/ISPRS', args.dataset, 'class_dict.csv') +# # label_info = get_label_info(csv_path) +# # print(test_loader) +# # Time_model = [] +# # Time_test = [] +# # time00 = time.time() +# # cnt_list = [] +# # for (sample, name, WH) in test_loader: +# for (sample, name, WH) in test_loader: #封装在CbySegmentation.__getitem__里,需调整 +# bs = len(name) #name里是图名,WH是高宽 +# # cnt_list.append(bs) +# # begin1 = time.time() +# # image = sample[0] #取第一个tensor是原图,第二个是mask +# image = sample #取第一个tensor是原图,第二个是mask 这里要送进去四维的,batch为1。将mask去掉了 +# # print('sample_shape',sample.shape) +# # print('image_shape',image.shape) +# # print('sample',sample) +# # print('name',name) +# # print('WH',WH) +# model.eval() +# if args.cuda: +# image = image.cuda() +# # begin2 = time.time() +# with torch.no_grad(): +# predict = model(image) +# # end2 = time.time() +# # time_model = end2 - begin2 +# # print('batchTime:%.3f ms, each:%.3f , bs:%d ' % (time_model*1000.0, time_model*1000.0/(bs * 1.0), bs)) +# +# # predict=torch.squeeze(predict) +# +# predict = predict.data.cpu().numpy() +# predict = np.argmax(predict, axis=1) +# +# label_values = [label_info[key] for key in label_info] +# colour_codes = np.array(label_values) +# +# predict = colour_codes[predict.astype(int)] +# +# # crop_size恢复到原图尺寸 +# for ii in range(bs): +# # print('line120:',WH) +# w, h = WH[0][ii], WH[1][ii] +# # w,h=WH +# predict_one = cv2.resize(predict[ii], (int(w), int(h)), interpolation=cv2.INTER_NEAREST) +# # save_path = os.path.join(self.experiment_dir, pathName) +# save_path = self.experiment_dir +# if not os.path.exists(save_path): +# os.makedirs(save_path) # 生成该路径下的目录 +# # save_path = os.path.join(save_path, '%d_' % epoch+name[ii]) +# save_path = os.path.join(save_path, name[ii]) +# # print('save_path',save_path) +# # print('epoch',epoch) +# # print('name[ii]',name[ii]) +# +# cv2.imwrite(save_path, cv2.cvtColor(np.uint8(predict_one), cv2.COLOR_RGB2BGR)) # 保存图片 +# # end1 = time.time() +# # time_test = end1 - begin1 +# +# # print('time test: batchTime:%.3f ms, one Time:%.3f, bs:%d '%(time_test*1000.0,time_test*1000.0/(bs*1.0),bs)) +# # Time_model.append(time_model) +# # Time_test.append(time_test) +# # time11 = time.time() +# +# # Max_model = max(Time_model) # 原始 +# # Min_model = min(Time_model) # 原始 +# # Max_test = max(Time_test) # 原始 +# # Min_test = min(Time_test) # 原始 +# # +# # cnt_sample = sum(cnt_list) +# # ave_model = np.mean(Time_model) +# # ave_test = np.mean(Time_test) +# # print() +# # print('each model: ave:%.3f ms bs:%d' % (sum(Time_model)*1000.0/cnt_sample, cnt_list[0])) +# # print('bacthc inference:max:%.3f ms ,min:%3f ms,ave:%3f ms bs:%d ' % (Max_model*1000.0, Min_model*1000.0, ave_model*1000.0, cnt_list[1])) # 原始 +# # print('All task total time:%.3f s' % (time11-time00)) +# # +# # print('ave_mo del:max:%.3f ms ,min:%.3f ms,ave:%.3f ms'%(Max_test*1000.0,Min_test*1000.0,ave_test*1000.0)) # 原始 +# return sample,predict_one +# +# +# diff --git a/utils/segutils.py b/utils/segutils.py new file mode 100644 index 0000000..a156586 --- /dev/null +++ b/utils/segutils.py @@ -0,0 +1,33 @@ +from torchvision import transforms +from utils import custom_transforms as tr +import numpy as np +import pandas as pd + + +def transform_ts(args,sample): + #将图像从cv读的格式转为归一化,并转为tensor + composed_transforms = transforms.Compose([ + tr.FixedResize(size=args['crop_size']), + tr.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)), + tr.ToTensor()]) + return composed_transforms(sample) + + + +def colour_code_segmentation(image, label_values): + label_values = [label_values[key] for key in label_values] + colour_codes = np.array(label_values) + x = colour_codes[image.astype(int)] + return x + + +def get_label_info(csv_path): + ann = pd.read_csv(csv_path) + label = {} + for iter, row in ann.iterrows(): + label_name = row['name'] + r = row['r'] + g = row['g'] + b = row['b'] + label[label_name] = [int(r), int(g), int(b)] + return label diff --git a/utils/summaries.py b/utils/summaries.py new file mode 100644 index 0000000..f980a20 --- /dev/null +++ b/utils/summaries.py @@ -0,0 +1,24 @@ +import os +import torch +from torchvision.utils import make_grid +from tensorboardX import SummaryWriter +from dataloaders.utils import decode_seg_map_sequence + + +class TensorboardSummary(object): + def __init__(self, directory): + self.directory = directory + + def create_summary(self): + writer = SummaryWriter(log_dir=os.path.join(self.directory)) + return writer + + def visualize_image(self, writer, dataset, image, target, output, global_step): + grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True) + writer.add_image('Image', grid_image, global_step) + grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(), + dataset=dataset), 3, normalize=False, range=(0, 255)) + writer.add_image('Predicted label', grid_image, global_step) + grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(), + dataset=dataset), 3, normalize=False, range=(0, 255)) + writer.add_image('Groundtruth label', grid_image, global_step) \ No newline at end of file diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..5074fa9 --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,304 @@ +# YOLOv5 PyTorch utils + +import datetime +import logging +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError as e: + return '' # not a git repository + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/utils/wj2_loss.py b/utils/wj2_loss.py new file mode 100644 index 0000000..65d5591 --- /dev/null +++ b/utils/wj2_loss.py @@ -0,0 +1,151 @@ +import numpy as np +import matplotlib.pyplot as plt +from torch.nn import Module +import torch +import torch.nn as nn +class wj2_bce_Loss(Module): + def __init__(self, kernel_size=11, sigma=2,weights=[0.2,1.0,1.0], as_loss=True): + super().__init__() + self.kernel_size = kernel_size + self.sigma = sigma + self.weights = torch.tensor(weights) + self.as_loss = as_loss + self.gaussian_kernel = self._create_gaussian_kernel(self.kernel_size, self.sigma) + self.sobel_kernel = torch.tensor([[[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]]]) + self.pad = int((self.kernel_size - 1)/2) + self.tmax = torch.tensor(1.0) + #self.criterion = nn.CrossEntropyLoss( reduction = 'none') + self.criterion1 = nn.CrossEntropyLoss( ) + self.criterion2 = nn.MSELoss() + def forward(self, x, y): + + if not self.gaussian_kernel.is_cuda: + self.gaussian_kernel = self.gaussian_kernel.to(x.device) + if not self.sobel_kernel.is_cuda: + self.sobel_kernel = self.sobel_kernel.to(x.device) + if not self.tmax.is_cuda: + self.tmax = self.tmax.to(x.device) + if not self.weights.is_cuda: + self.weights = self.weights.to(x.device) + + #get pred weight + preds_x = torch.argmax(x,axis=1) + preds_edge = self._get_weight(preds_x)##(bs,1,h,w) + #get label weight + labels_edge = self._get_weight(y) ##(bs,1,h,w) + + celoss = self.criterion1(x,y.long()) + edgeloss = self.criterion2(preds_edge, labels_edge) + + return self.weights[0]*edgeloss + self.weights[1]*celoss + + def _get_weight(self,mask): + ##preds 变成0,1图,(bs,h,w) + mask_map = (mask <= self.tmax).float() * mask + (mask > self.tmax).float() * self.tmax + mask_map = mask_map.unsqueeze(1) + padLayer = nn.ReflectionPad2d(1) + mask_pad = padLayer(mask_map) + + # 定义sobel算子参数 + mask_edge = torch.conv2d(mask_pad.float(), self.sobel_kernel.float(), padding=0) + mask_edge = torch.absolute(mask_edge) + + ##低通滤波膨胀边界 + smooth_edge = torch.conv2d(mask_edge.float(), self.gaussian_kernel.float(), padding=self.pad) + return smooth_edge + + def _create_gaussian_kernel(self, kernel_size, sigma): + start = (1 - kernel_size) / 2 + end = (1 + kernel_size) / 2 + kernel_1d = torch.arange(start, end, step=1, dtype=torch.float) + kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2) + kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0) + + kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d) + kernel_2d = kernel_2d.expand(1, 1, kernel_size, kernel_size).contiguous() + return kernel_2d + + +def GaussLowPassFiltering(ksize,sigma): + kernel = np.zeros((ksize,ksize),dtype=np.float32) + cons = 1.0/(2.0*np.pi*sigma*sigma) + + for i in range(ksize): + for j in range(ksize): + x = i - (ksize-1)/2 + y = j - (ksize-1)/2 + kernel[j,i] = cons * np.exp((-1.0)*(x**2+y**2)/2.0/(sigma**2) ) + print(kernel) + plt.figure(0);plt.imshow(kernel);plt.show() + return kernel.reshape(1,1,ksize,ksize) + +def create_gaussian_kernel( kernel_size, sigma): + + start = (1 - kernel_size) / 2 + end = (1 + kernel_size) / 2 + kernel_1d = torch.arange(start, end, step=1, dtype=torch.float) + kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2) + kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0) + + kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d) + kernel_2d = kernel_2d.expand(3, 1, kernel_size, kernel_size).contiguous() + return kernel_2d + +def main(): + import matplotlib.pyplot as plt + import numpy as np + import torch + import torch.nn as nn + #preds=torch.rand(8,5,10,10) + #preds=torch.argmax(preds,axis=1) + preds=torch.zeros(8,100,100) + preds[:,:,50:]=3.0 + t_max = torch.tensor(1.0) + + + ##preds 变成0,1图,(bs,h,w) + preds_map = (preds <= t_max).float() * preds + (preds > t_max).float() * t_max + + preds_map = preds_map.unsqueeze(1) + padLayer = nn.ReflectionPad2d(1) + preds_pad = padLayer(preds_map) + # 定义sobel算子参数 + sobel_kernel =torch.tensor([[[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]]]) + preds_edge_dilate = torch.conv2d(preds_pad.float(), sobel_kernel.float(), padding=0) + preds_edge_dilate = torch.absolute(preds_edge_dilate) + + ##低通滤波,平滑边界 + f_shift ,pad, sigma= 11, 5 , 2 + + kernel = torch.from_numpy(GaussLowPassFiltering(f_shift,sigma)) + smooth_edge = torch.conv2d(preds_edge_dilate.float(), kernel.float(), padding=pad) + print() + + show_result0 = preds_map.numpy() + show_result2=smooth_edge.numpy() + show_result3=preds.numpy() + #print(show_result2[0,0,:,5]) + #print(show_result2[0,0,5,:]) + #plt.figure(0);plt.imshow(show_result0[0,0]);plt.figure(1); + plt.imshow(show_result2[0,0]);plt.show(); + #plt.figure(3);plt.imshow(show_result3[0]);plt.show(); + print() +def test_loss_moule(): + preds=torch.rand(8,5,100,100) + #preds=torch.argmax(preds,axis=1) + + targets =torch.zeros(8,100,100) + targets[:,:,50:]=3.0 + + for weights in [[1.0,1.0,1.0],[ 0.0,0.0,1.0],[ 1.0,0.0,0.0],[ 0.0,1.0,0.0],[ 1.0,1.0,0.0] ]: + loss_layer = wj_bce_Loss(kernel_size=11, sigma=2,weights=weights, as_loss=True) + loss = loss_layer(preds,targets) + print(weights,' loss: ',loss) + + +if __name__=='__main__': + #main() + #kk = create_gaussian_kernel( kernel_size=11, sigma=2) + #print(kk.numpy().shape) + #plt.figure(0);plt.imshow(kk[0,0]);plt.show() + test_loss_moule() diff --git a/utils/wj_loss.py b/utils/wj_loss.py new file mode 100644 index 0000000..1171d44 --- /dev/null +++ b/utils/wj_loss.py @@ -0,0 +1,187 @@ +import numpy as np +import matplotlib.pyplot as plt +from torch.nn import Module +import torch +import torch.nn as nn +class wj_bce_Loss(Module): + def __init__(self, kernel_size=11, sigma=2,weights=[0.2,1.0,1.0], weight_fuse='add',classweight=None,as_loss=True): + super().__init__() + self.kernel_size = kernel_size + self.sigma = sigma + self.weights = torch.tensor(weights) + self.as_loss = as_loss + self.gaussian_kernel = self._create_gaussian_kernel(self.kernel_size, self.sigma) + self.sobel_kernel = torch.tensor([[[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]]]) + self.pad = int((self.kernel_size - 1)/2) + self.tmax = torch.tensor(1.0) + self.weight_fuse = weight_fuse + if classweight: + self.criterion = nn.CrossEntropyLoss( weight=torch.tensor(classweight),reduction = 'none') + else: + self.criterion = nn.CrossEntropyLoss( reduction = 'none') + def forward(self, x, y): + + if not self.gaussian_kernel.is_cuda: + self.gaussian_kernel = self.gaussian_kernel.to(x.device) + if not self.sobel_kernel.is_cuda: + self.sobel_kernel = self.sobel_kernel.to(x.device) + if not self.tmax.is_cuda: + self.tmax = self.tmax.to(x.device) + if not self.weights.is_cuda: + self.weights = self.weights.to(x.device) + + + mix_weights = self._get_mix_weight(x, y) + loss = self.criterion(x,y.long()) + return torch.mean(loss*mix_weights) + + def _get_weight(self,mask): + ##preds 变成0,1图,(bs,h,w) + mask_map = (mask <= self.tmax).float() * mask + (mask > self.tmax).float() * self.tmax + mask_map = mask_map.unsqueeze(1) + padLayer = nn.ReflectionPad2d(1) + mask_pad = padLayer(mask_map) + + # 定义sobel算子参数 + mask_edge = torch.conv2d(mask_pad.float(), self.sobel_kernel.float(), padding=0) + mask_edge = torch.absolute(mask_edge) + + ##低通滤波膨胀边界 + smooth_edge = torch.conv2d(mask_edge.float(), self.gaussian_kernel.float(), padding=self.pad) + return smooth_edge + def _get_mix_weight(self, x, y): + #get pred weight + preds_x = torch.argmax(x,axis=1) + preds_weights = self._get_weight(preds_x).squeeze(1) + #get label weight + labels_weights = self._get_weight(y).squeeze(1) + #normal weight + normal_weights = torch.ones(y.shape) + if not normal_weights.is_cuda: + normal_weights = normal_weights.to(x.device) + #print(self.weights) + if self.weight_fuse=='multify': + mix_weights = self.weights[0] * preds_weights * labels_weights + self.weights[2] *normal_weights + else: + mix_weights = self.weights[0] * preds_weights + self.weights[1] * labels_weights + self.weights[2] *normal_weights + return mix_weights + + def _create_gaussian_kernel(self, kernel_size, sigma): + start = (1 - kernel_size) / 2 + end = (1 + kernel_size) / 2 + kernel_1d = torch.arange(start, end, step=1, dtype=torch.float) + kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2) + kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0) + + kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d) + kernel_2d = kernel_2d.expand(1, 1, kernel_size, kernel_size).contiguous() + return kernel_2d +class thFloater(Module): + def __init__(self, weights=[0.5,0.5]): + super().__init__() + + self.weights = torch.tensor(weights) + self.baseCriterion = nn.CrossEntropyLoss( reduction = 'none') + def forward(self, x, y): + if not self.weights.is_cuda: + self.weights = self.weights.to(x[0].device) + assert len(x) == 2 + loss_river = self.baseCriterion(x[0],y[0].long()) + #loss_floater = self.baseCriterion(x[1],y[1].long()) * y[0] + loss_floater = self.baseCriterion(x[1],y[1].long()) + return torch.mean(loss_river * self.weights[0] + loss_floater * self.weights[1] ) + + + +def GaussLowPassFiltering(ksize,sigma): + kernel = np.zeros((ksize,ksize),dtype=np.float32) + cons = 1.0/(2.0*np.pi*sigma*sigma) + + for i in range(ksize): + for j in range(ksize): + x = i - (ksize-1)/2 + y = j - (ksize-1)/2 + kernel[j,i] = cons * np.exp((-1.0)*(x**2+y**2)/2.0/(sigma**2) ) + + + return kernel.reshape(1,1,ksize,ksize) + +def create_gaussian_kernel( kernel_size, sigma): + + start = (1 - kernel_size) / 2 + end = (1 + kernel_size) / 2 + kernel_1d = torch.arange(start, end, step=1, dtype=torch.float) + kernel_1d = torch.exp(-torch.pow(kernel_1d / sigma, 2) / 2) + kernel_1d = (kernel_1d / kernel_1d.sum()).unsqueeze(dim=0) + + kernel_2d = torch.matmul(kernel_1d.t(), kernel_1d) + kernel_2d = kernel_2d.expand(3, 1, kernel_size, kernel_size).contiguous() + return kernel_2d + +def main(): + import matplotlib.pyplot as plt + import numpy as np + import torch + import torch.nn as nn + #preds=torch.rand(8,5,10,10) + #preds=torch.argmax(preds,axis=1) + preds=torch.zeros(8,100,100) + preds[:,:,50:]=3.0 + t_max = torch.tensor(1.0) + + + ##preds 变成0,1图,(bs,h,w) + preds_map = (preds <= t_max).float() * preds + (preds > t_max).float() * t_max + + preds_map = preds_map.unsqueeze(1) + padLayer = nn.ReflectionPad2d(1) + preds_pad = padLayer(preds_map) + # 定义sobel算子参数 + sobel_kernel =torch.tensor([[[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]]]) + preds_edge_dilate = torch.conv2d(preds_pad.float(), sobel_kernel.float(), padding=0) + preds_edge_dilate = torch.absolute(preds_edge_dilate) + + ##低通滤波,平滑边界 + f_shift ,pad, sigma= 11, 5 , 2 + + kernel = torch.from_numpy(GaussLowPassFiltering(f_shift,sigma)) + smooth_edge = torch.conv2d(preds_edge_dilate.float(), kernel.float(), padding=pad) + print('####line134') + cv2.imwrite('') + show_result0 = preds_map.numpy() + show_result2 = smooth_edge.numpy() + show_result3=preds.numpy() + #print(show_result2[0,0,:,5]) + #print(show_result2[0,0,5,:]) + #plt.figure(0);plt.imshow(show_result0[0,0]);plt.figure(1); + plt.imshow(show_result2[0,0]);plt.show(); + #plt.figure(3);plt.imshow(show_result3[0]);plt.show(); + print() +def test_loss_moule(): + preds=torch.rand(8,5,100,100) + #preds=torch.argmax(preds,axis=1) + + targets =torch.zeros(8,100,100) + targets[:,:,50:]=3.0 + + for weights in [[1.0,1.0,1.0],[ 0.0,0.0,1.0],[ 1.0,0.0,0.0],[ 0.0,1.0,0.0],[ 1.0,1.0,0.0] ]: + loss_layer = wj_bce_Loss(kernel_size=11, sigma=2,weights=weights, as_loss=True) + loss = loss_layer(preds,targets) + print(weights,' loss: ',loss) + +def test_multify_output(): + pred1=torch.rand(8,2,100,100) + pred2=torch.rand(8,5,100,100) + + target1 =torch.randint(0,2,(8,100,100)) + target2 =torch.randint(0,5,(8,100,100)) + loss_layer = thFloater(weights=[0.5,0.5]) + loss = loss_layer([pred1,pred2],[target1,target2]) + print(loss) +if __name__=='__main__': + #main() + #kk = create_gaussian_kernel( kernel_size=11, sigma=2) + #print(kk.numpy().shape) + #plt.figure(0);plt.imshow(kk[0,0]);plt.show() + #test_loss_moule() + test_multify_output() diff --git a/weights/best_luoshui20230608.pt b/weights/best_luoshui20230608.pt new file mode 100644 index 0000000..d4d3f1c Binary files /dev/null and b/weights/best_luoshui20230608.pt differ diff --git a/weights/model_final.pth b/weights/model_final.pth new file mode 100644 index 0000000..bfbd1eb Binary files /dev/null and b/weights/model_final.pth differ