# 最新版违停检测代码 from models.model_stages import BiSeNet from predict_city.heliushuju import Heliushuju from torch.utils.data import DataLoader import numpy as np import os import argparse import cv2 import torch import torchvision.transforms as transforms import matplotlib.pyplot as plt # from complexIllegalParkingUtilsNewest import mixNoParking_road_postprocess from complexIllegalParkingUtilsNewest import mixNoParking_road_postprocess os.environ['CUDA_VISIBLE_DEVICES'] = '0' # print("line15", torch.cuda.is_available()) class MscEvalV0(object): def __init__(self, scaleH=1 / 3, scaleW=1 / 3, ignore_label=255): self.ignore_label = ignore_label self.scaleH = scaleH self.scaleW = scaleW self.to_tensor = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) # IllegalParkingTestData def __call__(self, net, dl, n_classes): # evaluate maskPath = '../IllegalParkingTestData/masks' testImagePath = '../IllegalParkingTestData/images' File1 = os.listdir(testImagePath) for file in File1: print('####beg to :', file) txtRowContent = [] saveVehicleCoordinate = [] singleTxtContent = [] txtPath = '../IllegalParkingTestData/detections' + os.sep + file[:-4] + '.txt' testImage = testImagePath + os.sep + file testImageArray = cv2.imread(testImage) txtContent = open(txtPath, 'r', encoding='utf-8') content = txtContent.readlines() for line in content: if line[0].isnumeric(): e = line.splitlines(False)[0] f = e.split(',', -1) if float(f[-1]) == 0: for i in range(len(f)): txtRowContent.append(float(f[i])) saveVehicleCoordinate.append((int(txtRowContent[1]), int(txtRowContent[2]))) saveVehicleCoordinate.append((int(txtRowContent[3]), int(txtRowContent[4]))) singleTxtContent.append(txtRowContent) txtRowContent = [] txtContent.close() # print("line57, singleTxtContent: ", singleTxtContent) mask = cv2.imread(maskPath + os.sep + file[:-4] + '_mask.png') imgName = file[:-4] + '.png' mask = mask[:, :, 0] # 字典形式传参数 traffic_dict = {'RoadArea': 16000, 'roundness': 0.5, 'laneArea': 2, 'modelSize': (1920, 1080), 'testImageName': file, 'fitOrder': 2} # print('####line63: det results ', singleTxtContent, mask.shape, np.max(mask),np.min(mask)) save_path = './demo/' + file # targetList, time_infos, finalLane, lane_line, abc = mixNoParking_road_postprocess(singleTxtContent, mask, traffic_dict) # targetList, time_infos = mixNoParking_road_postprocess(singleTxtContent, mask, traffic_dict, imgName) targetList, time_infos = mixNoParking_road_postprocess(singleTxtContent, mask, traffic_dict) print('####line66:', time_infos) # print("line65", targetList) """在测试图片上画出检测框""" for i in range(len(targetList)): if targetList[i][7] != 0: X1 = targetList[i][0] Y1 = targetList[i][1] X2 = targetList[i][2] Y2 = targetList[i][3] cv2.rectangle(testImageArray, (int(X1), int(Y1)), (int(X2), int(Y2)), (0, 0, 255), thickness=3, lineType=cv2.LINE_AA) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(testImageArray, str(format(targetList[i][6], ".2f")), (int(X1) + 4, int(Y1 - 1)), font, 1, (0, 255, 0), 2, cv2.LINE_AA) cv2.imwrite(save_path, testImageArray) # """分别将最左侧和最右侧车道线簇中的点连起来,并显示""" # for k in range(len(finalLane)): # for i in range(len(finalLane[k])): # if i + 1 <= len(finalLane[k]) - 1: # cv2.line(lane_line, (int(finalLane[k][i][0]), int(finalLane[k][i][1])), # (int(finalLane[k][i + 1][0]), int(finalLane[k][i + 1][1])), (0, 0, 255), thickness=2, # lineType=cv2.LINE_AA) # else: # break # cv2.imwrite('./demo/' + 'realLane_' + '{}'.format(file[:-4]) + '.png', lane_line) # """分别将最左侧和最右侧车道线簇拟合的二次曲线画出来""" # y = np.array(list(range(0, 1080))) # x1 = abc[0] * (y ** 2) + abc[1] * y + abc[2] # x2 = abc[3] * (y ** 2) + abc[4] * y + abc[5] # plt.plot(x1, y); # plt.plot(x2, y); # plt.imshow(lane_line) # plt.savefig('./demo/' + 'fitLane_' + '{}'.format(file[:-4]) + '.png') # plt.show() def evaluatev0(respth='', dspth='', backbone='', scaleH=1 / 3, scaleW=1 / 3, use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, use_conv_last=False): # dataset batchsize = 1 n_workers = 0 dsval = Heliushuju(dspth, mode='test') dl = DataLoader(dsval, batch_size=batchsize, shuffle=False, num_workers=n_workers, drop_last=False) n_classes = 4 # print("backbone:", backbone) net = BiSeNet(backbone=backbone, n_classes=n_classes, use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4, use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16, use_conv_last=use_conv_last) net.load_state_dict(torch.load(respth)) net.cuda() net.eval() with torch.no_grad(): single_scale = MscEvalV0(scaleH=scaleH, scaleW=scaleW) single_scale(net, dl, 4) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='./model_save/pths/best.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='./data/test/images', help='source') # file/folder, 0 for webcam parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--update', action='store_true', help='update all models') opt = parser.parse_args() evaluatev0(respth='./model_save/pths/stdc_360X640_highWayParking.pth', dspth='/home/thsw/WJ/zyy/IllegalParkingTestData/masks', backbone='STDCNet813', scaleH=1 / 3, scaleW=1 / 3, use_boundary_2=False, use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, use_conv_last=False)