You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

529 lines
23KB

  1. from __future__ import print_function
  2. import os
  3. import numpy as np
  4. import matplotlib
  5. matplotlib.use('Agg')
  6. import matplotlib.pyplot as plt
  7. import matplotlib.patches as patches
  8. from skimage import io
  9. import glob
  10. import time,cv2
  11. import argparse
  12. from filterpy.kalman import KalmanFilter
  13. from PIL import Image,ImageDraw,ImageFont
  14. np.random.seed(0)
  15. '''
  16. def plot_one_box_ForTrack(x, im, color=None, label=None, line_thickness=3):
  17. # Plots one bounding box on image 'im' using OpenCV
  18. assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.'
  19. tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness
  20. color = color or [random.randint(0, 255) for _ in range(3)]
  21. c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
  22. cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
  23. if label:
  24. tf = max(tl - 1, 1) # font thickness
  25. t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
  26. c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
  27. cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled
  28. cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
  29. '''
  30. def plot_one_box_ForTrack(box, im, color=None, label=None, line_thickness=None):
  31. # Plots one bounding box on image 'im' using PIL
  32. im = Image.fromarray(im)
  33. draw = ImageDraw.Draw(im)
  34. line_thickness = line_thickness or max(int(min(im.size) / 200), 2)
  35. draw.rectangle([(box[0],box[1]),(box[2],box[3])], width=line_thickness, outline=tuple(color)) # plot
  36. if label:
  37. tmax = min(round(max(im.size) / 40),20)
  38. fontsize = max(tmax, 12)
  39. font = ImageFont.truetype("../AIlib2/conf/platech.ttf", fontsize,encoding='utf-8')
  40. txt_width, txt_height = font.getsize(label)
  41. draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color))
  42. draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font)
  43. im_array = np.asarray(im)
  44. return im_array
  45. def drawBoxTraceSimplied(track_det_result,iiframe, img_draw,rainbows=None,boxFlag=True,traceFlag=True,names=[]):
  46. boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
  47. if boxFlag:
  48. ###在某一帧上,画上检测框
  49. for box in boxes_oneFrame:
  50. x0,y0,x1,y1,conf,cls = box[0:6]
  51. #cv2.rectangle(img_draw, ( int(x0), int(y0) ), ( int(x1), int(y1) ), (255,0,20), 2)
  52. if len(names)==0:
  53. txtstring='%d:%.2f'%(cls,conf)
  54. else: txtstring='%s:%.2f'%(names[int(cls)],conf)
  55. img_draw = plot_one_box_ForTrack( box[0:4], img_draw, color=rainbows[ int(cls)], label=txtstring, line_thickness=3)
  56. if traceFlag:
  57. ###在某一帧上,画上轨迹
  58. track_ids = boxes_oneFrame[:,7].tolist()
  59. boxes_before_oneFrame = track_det_result[ track_det_result[:,6]<=iiframe ]
  60. for trackId in track_ids:
  61. boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,7]==trackId]
  62. xcs = (boxes_before_oneFrame_oneId[:,0]+boxes_before_oneFrame_oneId[:,2])//2
  63. ycs = (boxes_before_oneFrame_oneId[:,1]+boxes_before_oneFrame_oneId[:,3])//2
  64. [cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ),
  65. ( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2)
  66. for i,_ in enumerate(xcs) if i < len(xcs)-1 ]
  67. return img_draw
  68. def moving_average_wang(interval, windowsize):
  69. outNum = interval.copy()
  70. if windowsize==1:
  71. return outNum
  72. assert windowsize%2!=0
  73. window = np.ones(int(windowsize)) / float(windowsize)
  74. re = np.convolve(interval, window, 'valid')
  75. cnt = int((windowsize - 1)/2+0.5)
  76. total = len(interval)
  77. outNum = np.zeros( (total,),dtype=np.float32 )
  78. outNum[0]=interval[0]
  79. outNum[-1]=interval[-1]
  80. for i in range(1,cnt):
  81. outNum[i] = np.mean( interval[0:2*i-1] )
  82. outNum[-i-1] = np.mean( interval[-2*i-1:] )
  83. #print('###line113:',outNum.shape,re.shape,cnt,windowsize)
  84. outNum[cnt:-cnt]=re[:]
  85. return outNum
  86. def track_draw_trace(tracks,im0):
  87. for track in tracks:
  88. [cv2.line(im0, (int(track.centroidarr[i][0]),
  89. int(track.centroidarr[i][1])),
  90. (int(track.centroidarr[i+1][0]),
  91. int(track.centroidarr[i+1][1])),
  92. (255,0,0), thickness=2)
  93. for i,_ in enumerate(track.centroidarr)
  94. if i < len(track.centroidarr)-1 ]
  95. return im0
  96. """Function to Draw Bounding boxes"""
  97. def track_draw_boxes(img, bbox, identities=None, categories=None, names=None ):
  98. for i, box in enumerate(bbox):
  99. #print('####line33 sort.py:',box)
  100. x1, y1, x2, y2 = [int(x) for x in box]
  101. cat = int(categories[i]) if categories is not None else 0
  102. id = int(identities[i]) if identities is not None else 0
  103. data = (int((box[0]+box[2])/2),(int((box[1]+box[3])/2)))
  104. label = str(id) + ":"+ names[cat]
  105. (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
  106. cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,20), 2)
  107. cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1)
  108. cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX,
  109. 0.6, [255, 255, 255], 1)
  110. # cv2.circle(img, data, 6, color,-1) #centroid of box
  111. return img
  112. def track_draw_all_boxes(tracked_dets,im0,names):
  113. if len(tracked_dets)>0:
  114. bbox_xyxy = tracked_dets[:,:4]
  115. identities = tracked_dets[:, 8]
  116. categories = tracked_dets[:, 4]
  117. track_draw_boxes(im0, bbox_xyxy, identities, categories, names)
  118. return im0
  119. ####轨迹采用跟踪链中的结果。box采用track.update后的结果。
  120. def track_draw_boxAndTrace(tracked_dets,tracks,im0,names):
  121. track_draw_all_boxes(tracked_dets,im0,names)
  122. track_draw_trace(tracks,im0)
  123. return im0
  124. ####轨迹和box都采用跟踪链中的结果
  125. def track_draw_trace_boxes(tracks,im0,names):
  126. for track in tracks:
  127. [cv2.line(im0, (int(track.centroidarr[i][0]),
  128. int(track.centroidarr[i][1])),
  129. (int(track.centroidarr[i+1][0]),
  130. int(track.centroidarr[i+1][1])),
  131. (255,0,0), thickness=2)
  132. for i,_ in enumerate(track.centroidarr)
  133. if i < len(track.centroidarr)-1 ]
  134. bbox_xyxy = track.bbox_history[-1][0:4]
  135. identities,categories = track.id , track.detclass
  136. #print('####sort.py line74:',bbox_xyxy)
  137. track_draw_boxes(im0, [bbox_xyxy], [identities], [categories], names)
  138. return im0
  139. def linear_assignment(cost_matrix):
  140. try:
  141. import lap #linear assignment problem solver
  142. _, x, y = lap.lapjv(cost_matrix, extend_cost = True)
  143. return np.array([[y[i],i] for i in x if i>=0])
  144. except ImportError:
  145. from scipy.optimize import linear_sum_assignment
  146. x,y = linear_sum_assignment(cost_matrix)
  147. return np.array(list(zip(x,y)))
  148. """From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]"""
  149. def iou_batch(bb_test, bb_gt):
  150. bb_gt = np.expand_dims(bb_gt, 0)
  151. bb_test = np.expand_dims(bb_test, 1)
  152. xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0])
  153. yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
  154. xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
  155. yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
  156. w = np.maximum(0., xx2 - xx1)
  157. h = np.maximum(0., yy2 - yy1)
  158. wh = w * h
  159. o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
  160. + (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
  161. return(o)
  162. """Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio"""
  163. def convert_bbox_to_z(bbox):
  164. w = bbox[2] - bbox[0]
  165. h = bbox[3] - bbox[1]
  166. x = bbox[0] + w/2.
  167. y = bbox[1] + h/2.
  168. s = w * h
  169. #scale is just area
  170. r = w / float(h)
  171. return np.array([x, y, s, r]).reshape((4, 1))
  172. """Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
  173. [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right"""
  174. def convert_x_to_bbox(x, score=None):
  175. w = np.sqrt(x[2] * x[3])
  176. h = x[2] / w
  177. if(score==None):
  178. return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
  179. else:
  180. return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
  181. """This class represents the internal state of individual tracked objects observed as bbox."""
  182. class KalmanBoxTracker(object):
  183. count = 0
  184. def __init__(self, bbox):
  185. """
  186. Initialize a tracker using initial bounding box
  187. Parameter 'bbox' must have 'detected class' int number at the -1 position.
  188. """
  189. self.kf = KalmanFilter(dim_x=7, dim_z=4)
  190. self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
  191. self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
  192. self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes')
  193. self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
  194. self.kf.P *= 10.
  195. self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things)
  196. self.kf.Q[4:,4:] *= 0.5
  197. self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR
  198. self.time_since_update = 0
  199. self.id = KalmanBoxTracker.count
  200. KalmanBoxTracker.count += 1
  201. self.history = []
  202. self.hits = 0
  203. self.hit_streak = 0
  204. self.age = 0
  205. self.frames = []
  206. self.centroidarr = []
  207. CX = (bbox[0]+bbox[2])//2
  208. CY = (bbox[1]+bbox[3])//2
  209. self.centroidarr.append((CX,CY))
  210. #keep yolov5 detected class information
  211. self.detclass = bbox[5]
  212. self.frames.append( bbox[6] ) ###new added for interpolation
  213. # If we want to store bbox
  214. self.bbox_history = [bbox]
  215. def update(self, bbox):
  216. """
  217. Updates the state vector with observed bbox
  218. """
  219. self.time_since_update = 0
  220. self.history = []
  221. self.hits += 1
  222. self.hit_streak += 1
  223. self.kf.update(convert_bbox_to_z(bbox))
  224. self.detclass = bbox[5]
  225. CX = (bbox[0]+bbox[2])//2
  226. CY = (bbox[1]+bbox[3])//2
  227. self.centroidarr.append((CX,CY))
  228. self.frames.append( bbox[6] ) ###new added for interpolation
  229. self.bbox_history.append(bbox)
  230. def predict(self):
  231. """
  232. Advances the state vector and returns the predicted bounding box estimate
  233. """
  234. if((self.kf.x[6]+self.kf.x[2])<=0):
  235. self.kf.x[6] *= 0.0
  236. self.kf.predict()
  237. self.age += 1
  238. if(self.time_since_update>0):
  239. self.hit_streak = 0
  240. self.time_since_update += 1
  241. self.history.append(convert_x_to_bbox(self.kf.x))
  242. # bbox=self.history[-1]
  243. # CX = (bbox[0]+bbox[2])/2
  244. # CY = (bbox[1]+bbox[3])/2
  245. # self.centroidarr.append((CX,CY))
  246. return self.history[-1]
  247. def get_state(self):
  248. """
  249. Returns the current bounding box estimate
  250. # test
  251. arr1 = np.array([[1,2,3,4]])
  252. arr2 = np.array([0])
  253. arr3 = np.expand_dims(arr2, 0)
  254. np.concatenate((arr1,arr3), axis=1)
  255. """
  256. arr_detclass = np.expand_dims(np.array([self.detclass]), 0)
  257. arr_u_dot = np.expand_dims(self.kf.x[4],0)
  258. arr_v_dot = np.expand_dims(self.kf.x[5],0)
  259. arr_s_dot = np.expand_dims(self.kf.x[6],0)
  260. return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1)
  261. def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3):
  262. """
  263. Assigns detections to tracked object (both represented as bounding boxes)
  264. Returns 3 lists of
  265. 1. matches,
  266. 2. unmatched_detections
  267. 3. unmatched_trackers
  268. """
  269. if(len(trackers)==0):
  270. return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
  271. iou_matrix = iou_batch(detections, trackers)
  272. if min(iou_matrix.shape) > 0:
  273. a = (iou_matrix > iou_threshold).astype(np.int32)
  274. if a.sum(1).max() == 1 and a.sum(0).max() ==1:
  275. matched_indices = np.stack(np.where(a), axis=1)
  276. else:
  277. matched_indices = linear_assignment(-iou_matrix)
  278. else:
  279. matched_indices = np.empty(shape=(0,2))
  280. unmatched_detections = []
  281. for d, det in enumerate(detections):
  282. if(d not in matched_indices[:,0]):
  283. unmatched_detections.append(d)
  284. unmatched_trackers = []
  285. for t, trk in enumerate(trackers):
  286. if(t not in matched_indices[:,1]):
  287. unmatched_trackers.append(t)
  288. #filter out matched with low IOU
  289. matches = []
  290. for m in matched_indices:
  291. if(iou_matrix[m[0], m[1]]<iou_threshold):
  292. unmatched_detections.append(m[0])
  293. unmatched_trackers.append(m[1])
  294. else:
  295. matches.append(m.reshape(1,2))
  296. if(len(matches)==0):
  297. matches = np.empty((0,2), dtype=int)
  298. else:
  299. matches = np.concatenate(matches, axis=0)
  300. return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
  301. class Sort(object):
  302. # def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
  303. def __init__(self, max_age=1, min_hits=1000, iou_threshold=0.1):
  304. """
  305. Parameters for SORT
  306. """
  307. self.max_age = max_age # 最大检测数:目标未被检测到的帧数,超过之后会被删
  308. self.min_hits = min_hits # 目标命中的最小次数,小于该次数不返回
  309. self.iou_threshold = iou_threshold
  310. self.trackers = []
  311. self.frame_count = 0
  312. def getTrackers(self,):
  313. return self.trackers
  314. def update(self, dets= np.empty((0,6))):
  315. """
  316. Parameters:
  317. 'dets' - a numpy array of detection in the format [[x1, y1, x2, y2, score], [x1,y1,x2,y2,score],...]
  318. Ensure to call this method even frame has no detections. (pass np.empty((0,5)))
  319. Returns a similar array, where the last column is object ID (replacing confidence score)
  320. NOTE: The number of objects returned may differ from the number of objects provided.
  321. """
  322. self.frame_count += 1
  323. # 在当前帧逐个预测轨迹位置,记录状态异常的跟踪器索引
  324. # 根据当前所有的卡尔曼跟踪器个数(即上一帧中跟踪的目标个数)创建二维数组:行号为卡尔曼滤波器的标识索引,列向量为跟踪
  325. # Get predicted locations from existing trackers
  326. trks = np.zeros((len(self.trackers), 6)) # 存储跟踪器的预测
  327. to_del = [] # 存储要删除的目标框
  328. ret = [] # 存储要返回的追踪目标框
  329. # 循环遍历卡尔曼跟踪器列表
  330. for t, trk in enumerate(trks):
  331. # 使用卡尔曼跟踪器t产生对应目标的跟踪框
  332. pos = self.trackers[t].predict()[0]
  333. # 遍历完成后,trk中存储了上一帧中跟踪的目标的预测跟踪框
  334. trk[:] = [pos[0], pos[1], pos[2], pos[3], 0, 0]
  335. # 如果跟踪框中包含空值则将该跟踪框添加到要删除的列表中
  336. if np.any(np.isnan(pos)):
  337. to_del.append(t)
  338. # numpy.ma.masked_invalid 屏蔽出现无效值的数组(NaN 或 inf)
  339. # numpy.ma.compress_rows 压缩包含掩码值的2-D 数组的整行,将包含掩码值的整行去除
  340. # trks中存储了上一帧中跟踪的目标并且在当前帧中的预测跟踪框
  341. trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
  342. # 逆向删除异常的跟踪器,防止破坏索引
  343. for t in reversed(to_del):
  344. self.trackers.pop(t)
  345. # 将目标检测框与卡尔曼滤波器预测的跟踪框关联获取跟踪成功的目标,新增的目标,离开画面的目标
  346. matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)
  347. # 将跟踪成功的目标框更新到对应的卡尔曼滤波器
  348. # Update matched trackers with assigned detections
  349. for m in matched:
  350. self.trackers[m[1]].update(dets[m[0], :])
  351. # 为新增的目标创建新的卡尔曼滤波器对象进行跟踪
  352. # Create and initialize new trackers for unmatched detections
  353. for i in unmatched_dets:
  354. trk = KalmanBoxTracker(np.hstack(dets[i,:]))
  355. #trk = KalmanBoxTracker(np.hstack( (dets[i,:], np.array([0] )) ) ) ##初始化多了一个数,可能是为了标记说明这是第一次出现,box有7个数
  356. #print(' ###line271: ', np.hstack((dets[i,:], np.array([0])) ).shape)
  357. self.trackers.append(trk)
  358. # 自后向前遍历,仅返回在当前帧出现且命中周期大于self.min_hits(除非跟踪刚开始)的跟踪结果;如果未命中时间大于self.max_age则删除跟踪器。
  359. # hit_streak忽略目标初始的若干帧
  360. i = len(self.trackers)
  361. for trk in reversed(self.trackers):
  362. # 返回当前边界框的估计值
  363. d = trk.get_state()[0]
  364. # 跟踪成功目标的box与id放入ret列表中
  365. if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
  366. ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value
  367. i -= 1
  368. #remove dead tracklet
  369. # 跟踪失败或离开画面的目标从卡尔曼跟踪器中删除
  370. if(trk.time_since_update >self.max_age):
  371. self.trackers.pop(i) #pop按键或索引位置删除对应元素
  372. # 返回当前画面中所有目标的box与id,以二维矩阵形式返回
  373. if(len(ret) > 0):
  374. #print('####sort.py line282:',len(ret),ret[0].shape, (np.concatenate(ret)).shape)
  375. return np.concatenate(ret)
  376. return np.empty((0,6))
  377. def parse_args():
  378. """Parse input arguments."""
  379. parser = argparse.ArgumentParser(description='SORT demo')
  380. parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
  381. parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
  382. parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
  383. parser.add_argument("--max_age",
  384. help="Maximum number of frames to keep alive a track without associated detections.",
  385. type=int, default=1)
  386. parser.add_argument("--min_hits",
  387. help="Minimum number of associated detections before track is initialised.",
  388. type=int, default=3)
  389. parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
  390. args = parser.parse_args()
  391. return args
  392. if __name__ == '__main__':
  393. # all train
  394. args = parse_args()
  395. display = args.display
  396. phase = args.phase
  397. total_time = 0.0
  398. total_frames = 0
  399. colours = np.random.rand(32, 3) #used only for display
  400. if(display):
  401. if not os.path.exists('mot_benchmark'):
  402. print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
  403. exit()
  404. plt.ion()
  405. fig = plt.figure()
  406. ax1 = fig.add_subplot(111, aspect='equal')
  407. if not os.path.exists('output'):
  408. os.makedirs('output')
  409. pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
  410. for seq_dets_fn in glob.glob(pattern):
  411. mot_tracker = Sort(max_age=args.max_age,
  412. min_hits=args.min_hits,
  413. iou_threshold=args.iou_threshold) #create instance of the SORT tracker
  414. seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
  415. seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
  416. with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
  417. print("Processing %s."%(seq))
  418. for frame in range(int(seq_dets[:,0].max())):
  419. frame += 1 #detection and frame numbers begin at 1
  420. dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
  421. dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
  422. total_frames += 1
  423. if(display):
  424. fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
  425. im =io.imread(fn)
  426. ax1.imshow(im)
  427. plt.title(seq + ' Tracked Targets')
  428. start_time = time.time()
  429. trackers = mot_tracker.update(dets)
  430. cycle_time = time.time() - start_time
  431. total_time += cycle_time
  432. for d in trackers:
  433. print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
  434. if(display):
  435. d = d.astype(np.int32)
  436. ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
  437. if(display):
  438. fig.canvas.flush_events()
  439. plt.draw()
  440. ax1.cla()
  441. print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
  442. if(display):
  443. print("Note: to get real runtime results run without the option: --display")