You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

141 line
4.9KB

  1. import torch
  2. import sys,os
  3. sys.path.extend(['segutils'])
  4. from core.models.bisenet import BiSeNet
  5. from torchvision import transforms
  6. import cv2,glob
  7. import numpy as np
  8. from core.models.dinknet import DinkNet34
  9. import matplotlib.pyplot as plt
  10. import time
  11. class SegModel(object):
  12. def __init__(self, nclass=2,weights=None,modelsize=512,device='cuda:0'):
  13. #self.args = args
  14. self.model = BiSeNet(nclass)
  15. #self.model = DinkNet34(nclass)
  16. checkpoint = torch.load(weights)
  17. self.modelsize = modelsize
  18. self.model.load_state_dict(checkpoint['model'])
  19. self.device = device
  20. self.model= self.model.to(self.device)
  21. '''self.composed_transforms = transforms.Compose([
  22. transforms.Normalize(mean=(0.335, 0.358, 0.332), std=(0.141, 0.138, 0.143)),
  23. transforms.ToTensor()]) '''
  24. self.mean = (0.335, 0.358, 0.332)
  25. self.std = (0.141, 0.138, 0.143)
  26. def eval(self,image):
  27. time0 = time.time()
  28. imageH,imageW,imageC = image.shape
  29. image = self.preprocess_image(image)
  30. time1 = time.time()
  31. self.model.eval()
  32. image = image.to(self.device)
  33. with torch.no_grad():
  34. output = self.model(image)
  35. time2 = time.time()
  36. pred = output.data.cpu().numpy()
  37. pred = np.argmax(pred, axis=1)[0]#得到每行
  38. time3 = time.time()
  39. pred = cv2.resize(pred.astype(np.uint8),(imageW,imageH))
  40. time4 = time.time()
  41. outstr= 'pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f \n '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) )
  42. #print('pre-precess:%.1f ,infer:%.1f ,post-precess:%.1f ,post-resize:%.1f, total:%.1f '%( self.get_ms(time1,time0),self.get_ms(time2,time1),self.get_ms(time3,time2),self.get_ms(time4,time3),self.get_ms(time4,time0) ))
  43. return pred,outstr
  44. def get_ms(self,t1,t0):
  45. return (t1-t0)*1000.0
  46. def preprocess_image(self,image):
  47. time0 = time.time()
  48. image = cv2.resize(image,(self.modelsize,self.modelsize))
  49. time0 = time.time()
  50. image = image.astype(np.float32)
  51. image /= 255.0
  52. image[:,:,0] -=self.mean[0]
  53. image[:,:,1] -=self.mean[1]
  54. image[:,:,2] -=self.mean[2]
  55. image[:,:,0] /= self.std[0]
  56. image[:,:,1] /= self.std[1]
  57. image[:,:,2] /= self.std[2]
  58. image = cv2.cvtColor( image,cv2.COLOR_RGB2BGR)
  59. #image -= self.mean
  60. #image /= self.std
  61. image = np.transpose(image, ( 2, 0, 1))
  62. image = torch.from_numpy(image).float()
  63. image = image.unsqueeze(0)
  64. return image
  65. def get_ms(t1,t0):
  66. return (t1-t0)*1000.0
  67. def get_largest_contours(contours):
  68. areas = [cv2.contourArea(x) for x in contours]
  69. max_area = max(areas)
  70. max_id = areas.index(max_area)
  71. return max_id
  72. if __name__=='__main__':
  73. image_url = '/home/thsw2/WJ/data/THexit/val/images/DJI_0645.JPG'
  74. nclass = 2
  75. weights = '../weights/segmentation/BiSeNet/checkpoint.pth'
  76. segmodel = SegModel(nclass=nclass,weights=weights)
  77. image_urls=glob.glob('/home/thsw2/WJ/data/THexit/val/images/*')
  78. out_dir ='../runs/detect/exp2-seg';os.makedirs(out_dir,exist_ok=True)
  79. for image_url in image_urls[0:1]:
  80. image_url = '/home/thsw2/WJ/data/THexit/val/images/54(199).JPG'
  81. image_array0 = cv2.imread(image_url)
  82. pred = segmodel.eval(image_array0 )
  83. #plt.figure(1);plt.imshow(pred);
  84. #plt.show()
  85. binary0 = pred.copy()
  86. time0 = time.time()
  87. contours, hierarchy = cv2.findContours(binary0,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
  88. max_id = -1
  89. if len(contours)>0:
  90. max_id = get_largest_contours(contours)
  91. binary0[:,:] = 0
  92. print(contours[0].shape,contours[1].shape,contours[0])
  93. cv2.fillPoly(binary0, [contours[max_id][:,0,:]], 1)
  94. time1 = time.time()
  95. #num_labels,_,Areastats,centroids = cv2.connectedComponentsWithStats(binary0,connectivity=4)
  96. time2 = time.time()
  97. cv2.drawContours(image_array0,contours,max_id,(0,255,255),3)
  98. time3 = time.time()
  99. out_url='%s/%s'%(out_dir,os.path.basename(image_url))
  100. ret = cv2.imwrite(out_url,image_array0)
  101. time4 = time.time()
  102. print('image:%s findcontours:%.1f ms , connect:%.1f ms ,draw:%.1f save:%.1f'%(os.path.basename(image_url),get_ms(time1,time0),get_ms(time2,time1), get_ms(time3,time2),get_ms(time4,time3), ) )
  103. plt.figure(0);plt.imshow(pred)
  104. plt.figure(1);plt.imshow(image_array0)
  105. plt.figure(2);plt.imshow(binary0)
  106. plt.show()
  107. #print(out_url,ret)