落水人员检测
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

279 lines
12KB

  1. '''
  2. 这个版本增加了船舶过滤功能
  3. '''
  4. import time
  5. import sys
  6. from core.models.bisenet import BiSeNet
  7. from models.AIDetector_pytorch import Detector
  8. from models.AIDetector_pytorch import plot_one_box,Colors
  9. from utils.postprocess_utils import center_coordinate,fourcorner_coordinate,remove_simivalue,remove_sameeleme_inalist
  10. import os
  11. os.environ['CUDA_VISIBLE_DEVICES'] = '1'
  12. from models.model_stages import BiSeNet
  13. import cv2
  14. import torch
  15. import torch.nn.functional as F
  16. from PIL import Image
  17. import numpy as np
  18. import torchvision.transforms as transforms
  19. from utils.segutils import colour_code_segmentation
  20. from utils.segutils import get_label_info
  21. os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
  22. os.environ["CUDA_VISIBLE_DEVICES"] = "0"
  23. sys.path.append("../") # 为了导入上级目录的,添加一个新路径
  24. def AI_postprocess(preds,_mask_cv,pars,_img_cv):
  25. '''还未考虑船上人过滤'''
  26. '''输入:落水人员的结果(类别+坐标)、原图、mask图像
  27. 过程:获得mask的轮廓,判断人员是否在轮廓内。
  28. 在,则保留且绘制;不在,舍弃。
  29. 返回:最终绘制的结果图、最终落水人员(坐标、类别、置信度),
  30. '''
  31. '''1、最大分割水域作为判断依据'''
  32. zoom_factor=4 #缩小因子设置为4,考虑到numpy中分别遍历xy进行缩放耗时大。
  33. original_height = _mask_cv.shape[0]
  34. original_width=_mask_cv.shape[1]
  35. zoom_height=int(original_height/zoom_factor)
  36. zoom_width=int(original_width/zoom_factor)
  37. _mask_cv = cv2.resize(_mask_cv, (zoom_width,zoom_height)) #缩小原图,宽在前,高在后
  38. t4 = time.time()
  39. img_gray = cv2.cvtColor(_mask_cv, cv2.COLOR_BGR2GRAY) if len(_mask_cv.shape)==3 else _mask_cv #
  40. t5 = time.time()
  41. contours, thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
  42. # 寻找轮廓(多边界)
  43. contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, 2)
  44. contour_info = []
  45. for c in contours:
  46. contour_info.append((
  47. c,
  48. cv2.isContourConvex(c),
  49. cv2.contourArea(c),
  50. ))
  51. contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
  52. t6 = time.time()
  53. '''新增模块::如果水域为空,则返回原图、无落水人员等。'''
  54. if contour_info==[]:
  55. # final_img=_img_cv
  56. final_head_person_filterwater=[]
  57. timeInfos=0
  58. # return final_img, final_head_person_filterwater
  59. return final_head_person_filterwater,timeInfos
  60. else:
  61. max_contour = contour_info[0]
  62. max_contour=max_contour[0]*zoom_factor# contours恢复原图尺寸
  63. print(max_contour)
  64. t7 = time.time()
  65. '''2.1、preds中head+person取出,boat取出。'''
  66. init_head_person=[]
  67. init_boat = []
  68. for i in range(len(preds)):
  69. if preds[i][4]=='head' or preds[i][4]=='person':
  70. init_head_person.append(preds[i])
  71. else:
  72. init_boat.append(preds[i])
  73. t8 = time.time()
  74. '''新增模块:2.2、preds中head+person取出,过滤掉head与person中指向同一人的部分,保留同一人的person标签。'''
  75. init_head=[]
  76. init_person=[]
  77. #head与person标签分开
  78. for i in range(len(init_head_person)):
  79. if init_head_person[i][4]=='head':
  80. init_head.append(init_head_person[i])
  81. else:
  82. init_person.append(init_head_person[i])
  83. # person的框形成contours
  84. person_contour=[]
  85. for i in range(len(init_person)):
  86. boundbxs_temp=[init_person[i][0],init_person[i][1],init_person[i][2],init_person[i][3]]
  87. contour_temp_person=fourcorner_coordinate(boundbxs_temp) #得到person预测框的顺序contour
  88. contour_temp_person=np.array(contour_temp_person)
  89. contour_temp_person=np.float32(contour_temp_person)
  90. person_contour.append(np.array(contour_temp_person))
  91. # head是否在person的contours内,在说明是同一人,过滤掉。
  92. list_head=[]
  93. for i in range(len(init_head)):
  94. for j in range(len(person_contour)):
  95. center_x, center_y=center_coordinate(init_head[i])
  96. flag = cv2.pointPolygonTest(person_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。
  97. if flag==1:
  98. pass
  99. else:
  100. list_head.append(init_head[i])
  101. # person和最终head合并起来
  102. init_head_person_temp=init_person+list_head
  103. '''3、preds中head+person,通过1中水域过滤'''
  104. init_head_person_filterwater=init_head_person_temp
  105. final_head_person_filterwater=[]
  106. for i in range(len(init_head_person_filterwater)):
  107. center_x, center_y=center_coordinate(init_head_person_filterwater[i])
  108. flag = cv2.pointPolygonTest(max_contour, (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。
  109. if flag==1:
  110. final_head_person_filterwater.append(init_head_person_filterwater[i])
  111. else:
  112. pass
  113. t9 = time.time()
  114. '''4、水域过滤后的head+person,再通过船舶范围过滤'''
  115. init_head_person_filterboat=final_head_person_filterwater
  116. # final_head_person_filterboat=[]
  117. #获取船舶范围
  118. boat_contour=[]
  119. for i in range(len(init_boat)):
  120. boundbxs1=[init_boat[i][0],init_boat[i][1],init_boat[i][2],init_boat[i][3]]
  121. contour_temp=fourcorner_coordinate(boundbxs1) #得到boat预测框的顺序contour
  122. contour_temp_=np.array(contour_temp)
  123. contour_temp_=np.float32(contour_temp_)
  124. boat_contour.append(np.array(contour_temp_))
  125. t10 = time.time()
  126. # 遍历船舶范围,取出在船舶范围内的head和person(可能有重复元素)
  127. list_headperson_inboat=[]
  128. for i in range(len(init_head_person_filterboat)):
  129. for j in range(len(boat_contour)):
  130. center_x, center_y=center_coordinate(init_head_person_filterboat[i])
  131. # yyyyyyyy=boat_contour[j]
  132. flag = cv2.pointPolygonTest(boat_contour[j], (center_x, center_y), False) #若为False,会找点是否在内,外,或轮廓上(相应返回+1, -1, 0)。
  133. if flag==1:
  134. list_headperson_inboat.append(init_head_person_filterboat[i])
  135. else:
  136. pass
  137. print('list_headperson_inboat',list_headperson_inboat)
  138. if len(list_headperson_inboat)==0:
  139. pass
  140. else:
  141. list_headperson_inboat=remove_sameeleme_inalist(list_headperson_inboat) #将重复嵌套列表元素删除
  142. # 过滤船舶范围内的head和person
  143. final_head_person_filterboat=remove_simivalue(init_head_person_filterboat,list_headperson_inboat)
  144. t11 = time.time()
  145. '''5、输出最终落水人员,并绘制保存检测图'''
  146. colors = Colors()
  147. if final_head_person_filterwater is not None:
  148. for i in range(len(final_head_person_filterboat)):
  149. # lbl = self.names[int(cls_id)]
  150. lbl = final_head_person_filterboat[i][4]
  151. xyxy=[final_head_person_filterboat[i][0],final_head_person_filterboat[i][1],final_head_person_filterboat[i][2],final_head_person_filterboat[i][3]]
  152. c = int(5)
  153. plot_one_box(xyxy, _img_cv, label=lbl, color=colors(c, True), line_thickness=3)
  154. final_img=_img_cv
  155. t12 = time.time()
  156. # cv2.imwrite('final_result.png', _img_cv)
  157. t13 = time.time()
  158. print('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s'
  159. %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) )
  160. timeInfos=('存图:%s, 过滤标签:%s ,遍历船舶范围:%s,水域过滤后的head+person:%s,水域过滤:%s,head+person、boat取出:%s,新增如果水域为空:%s,找contours:%s,图像改变:%s'
  161. %((t13-t12) * 1000,(t12-t11) * 1000,(t11-t10) * 1000,(t10-t9) * 1000,(t9-t8) * 1000,(t8-t7) * 1000,(t7-t6) * 1000,(t6-t5) * 1000,(t5-t4) * 1000 ) )
  162. return final_head_person_filterwater,timeInfos #返回最终绘制的结果图、最终落水人员(坐标、类别、置信度)
  163. def AI_process(model, segmodel, args1,path1):
  164. '''对原图进行目标检测和水域分割'''
  165. '''输入:检测模型、分割模型、配置参数、路径
  166. 返回:返回目标检测结果、原图像、分割图像,
  167. '''
  168. '''检测图片'''
  169. t21=time.time()
  170. _img_cv = cv2.imread(path1) # 将这里的送入yolov5
  171. t22 = time.time()
  172. # _img_cv=_img_cv.numpy()
  173. pred = model.detect(_img_cv) # 检测结果
  174. #对pred处理,处理成list嵌套
  175. pred=[[*x[0:4],x[4],x[5].cpu().tolist()] for x in pred[1]]
  176. # pred=[[x[0],*x[1:5],x[5].cpu().float()] for x in pred[1]]
  177. print('pred', pred)
  178. t23 = time.time()
  179. '''分割图片'''
  180. img = Image.open(path1).convert('RGB')
  181. t231 = time.time()
  182. transf1 = transforms.ToTensor()
  183. transf2 = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
  184. imgs = transf1(img)
  185. imgs = transf2(imgs)
  186. print(path1) # numpy数组格式为(H,W,C)
  187. size = [360, 640]
  188. imgs = imgs.unsqueeze(0)
  189. imgs = imgs.cuda()
  190. N, C, H, W = imgs.size()
  191. self_scale = 360 / H
  192. new_hw = [int(H * self_scale), int(W * self_scale)]
  193. print("line50", new_hw)
  194. imgs = F.interpolate(imgs, new_hw, mode='bilinear', align_corners=True)
  195. t24 = time.time()
  196. with torch.no_grad():
  197. logits = segmodel(imgs)[0]
  198. t241 = time.time()
  199. logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True)
  200. probs = torch.softmax(logits, dim=1)
  201. preds = torch.argmax(probs, dim=1)
  202. preds_squeeze = preds.squeeze(0)
  203. preds_squeeze_predict = colour_code_segmentation(np.array(preds_squeeze.cpu()), args1['label_info'])
  204. preds_squeeze_predict = cv2.resize(np.uint8(preds_squeeze_predict), (W, H))
  205. predict_mask = cv2.cvtColor(np.uint8(preds_squeeze_predict), cv2.COLOR_RGB2BGR)
  206. _mask_cv =predict_mask
  207. t25 = time.time()
  208. cv2.imwrite('seg_result.png', _mask_cv)
  209. t26 = time.time()
  210. print('存分割图:%s, 分割后处理:%s ,分割推理:%s ,分割图变小:%s,分割图读图:%s,检测模型推理:%s,读图片:%s'
  211. %((t26-t25) * 1000,(t25-t241) * 1000,(t241-t24) * 1000,(t24-t231) * 1000,(t231-t23) * 1000,(t23-t22) * 1000,(t22-t21) * 1000 ) )
  212. return pred, _img_cv, _mask_cv #返回目标检测结果、原图像、分割图像
  213. def main():
  214. '''配置参数'''
  215. label_info = get_label_info('utils/class_dict.csv')
  216. pars={'cuda':'0','crop_size':512,'input_dir':'input_dir','output_dir':'output_dir','workers':16,'label_info':label_info,
  217. 'dspth':'./data/','backbone':'STDCNet813','use_boundary_2':False, 'use_boundary_4':False, 'use_boundary_8':True, 'use_boundary_16':False,'use_conv_last':False}
  218. dete_weights='weights/best_luoshui20230608.pt'
  219. '''分割模型权重路径'''
  220. seg_weights = 'weights/model_final.pth'
  221. '''初始化目标检测模型'''
  222. model = Detector(dete_weights)
  223. '''初始化分割模型2'''
  224. n_classes = 2
  225. segmodel = BiSeNet(backbone=pars['backbone'], n_classes=n_classes,
  226. use_boundary_2=pars['use_boundary_2'], use_boundary_4=pars['use_boundary_4'],
  227. use_boundary_8=pars['use_boundary_8'], use_boundary_16=pars['use_boundary_16'],
  228. use_conv_last=pars['use_conv_last'])
  229. segmodel.load_state_dict(torch.load(seg_weights))
  230. segmodel.cuda()
  231. segmodel.eval()
  232. '''图像测试'''
  233. folders = os.listdir(pars['input_dir'])
  234. for i in range(len(folders)):
  235. path1 = pars['input_dir'] + '/' + folders[i]
  236. t1=time.time()
  237. '''对原图进行目标检测和水域分割'''
  238. pred, _img_cv, _mask_cv=AI_process(model,segmodel, pars,path1)
  239. t2 = time.time()
  240. '''进入后处理,判断水域内有落水人员'''
  241. haha,zzzz=AI_postprocess(pred, _mask_cv,pars,_img_cv )
  242. t3 = time.time()
  243. print('总时间分布:前处理t2-t1,后处理t3-t2',(t2-t1)*1000,(t3-t2)*1000)
  244. if __name__ == "__main__":
  245. main()