Browse Source

add cityMangement model

master
wangjin0928 1 year ago
parent
commit
3310d3555c
10 changed files with 686 additions and 11 deletions
  1. +114
    -5
      AI.py
  2. BIN
      __pycache__/AI.cpython-38.pyc
  3. +3
    -1
      readme.md
  4. BIN
      trackUtils/__pycache__/sort.cpython-38.pyc
  5. +483
    -0
      trackUtils/sort.py
  6. BIN
      utilsK/__pycache__/queRiver.cpython-38.pyc
  7. +75
    -5
      utilsK/queRiver.py
  8. +4
    -0
      weights/conf/cityMangement/labelnames.json
  9. +7
    -0
      weights/conf/cityMangement/para.json
  10. BIN
      weights/conf/cityMangement/yolov5.pt

+ 114
- 5
AI.py View File

@@ -6,7 +6,8 @@ from segutils.trafficUtils import trafficPostProcessing,colour_code_segmentation
from utils.torch_utils import select_device
from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint
from utilsK.queRiver import get_labelnames,get_label_arrays,post_process_,img_pad,draw_painting_joint,detectDraw,getDetections
from trackUtils.sort import moving_average_wang
from utils.datasets import letterbox
import numpy as np
@@ -15,6 +16,8 @@ import math
from PIL import Image
import torch.nn.functional as F
from copy import deepcopy
from scipy import interpolate
def xywh2xyxy(box,iW=None,iH=None):
xc,yc,w,h = box[0:4]
x0 =max(0, xc-w/2.0)
@@ -246,7 +249,7 @@ def AI_process_v2(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,d
return p_result,time_info
def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False):
def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=True,device=' cuda:0',conf_thres=0.25, iou_thres=0.45,allowedList=[0,1,2,3], font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3} ,trtFlag_det=False,SecNms=None):
#输入参数
# im0s---原始图像列表
# model---检测模型,segmodel---分割模型(如若没有用到,则为None)
@@ -288,12 +291,119 @@ def AI_process_forest(im0s,model,segmodel,names,label_arraylist,rainbows,half=Tr
datas = [[''], img, im0s, None,pred,seg_pred,10]
ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos,ovlap_thres=SecNms)
#p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,object_config=allowedList,segmodel=segFlag,font=font,padInfos=padInfos)
time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
return p_result,time_info+timeOut
def AI_det_track( im0s_in,modelPar,processPar,sort_tracker, drawPar):
im0s,iframe=im0s_in[0],im0s_in[1]
model = modelPar['det_Model']
half,device,conf_thres, iou_thres,trtFlag_det = processPar['half'], processPar['device'], processPar['conf_thres'], processPar['iou_thres'],processPar['trtFlag_det']
names,label_arraylist,rainbows,font,allowedList = drawPar['names'], drawPar['label_arraylist'], drawPar['rainbows'], drawPar['font'],drawPar['allowedList']
time0=time.time()
if trtFlag_det:
img, padInfos = img_pad(im0s[0], size=(640,640,3)) ;img = [img]
else:
img = [letterbox(x, 640, auto=True, stride=32)[0] for x in im0s];padInfos=None
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
seg_pred = None;segFlag=False
time1=time.time()
pred = yolov5Trtforward(model,img) if trtFlag_det else model(img,augment=False)[0]
time2=time.time()
datas = [[''], img, im0s, None,pred,seg_pred,10]
ObjectPar={ 'object_config':allowedList, 'slopeIndex':[] ,'segmodel':segFlag,'segRegionCnt':0 }
p_result,timeOut = getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,10,ObjectPar=ObjectPar,font=font,padInfos=padInfos)
#p_result[1]= detectDraw(p_result[1],p_result[2],label_arraylist,rainbows,font)
time_info = 'letterbox:%.1f, infer:%.1f, '%( (time1-time0)*1000,(time2-time1)*1000 )
#在这里增加设置调用追踪器的频率
#..................USE TRACK FUNCTION....................
#pass an empty array to sort
dets_to_sort = np.empty((0,7), dtype=np.float32)
# NOTE: We send in detected object class too
for detclass,x1,y1,x2,y2,conf in p_result[2]:
#print('#######line342:',x1,y1,x2,y2,img.shape,[x1, y1, x2, y2, conf, detclass,iframe])
dets_to_sort = np.vstack((dets_to_sort,
np.array([x1, y1, x2, y2, conf, detclass,iframe],dtype=np.float32) ))
# Run SORT
tracked_dets = deepcopy(sort_tracker.update(dets_to_sort) )
tracks =sort_tracker.getTrackers()
p_result.append(tracked_dets) ###index=4
p_result.append(tracks) ###index=5
return p_result,time_info+timeOut
def AI_det_track_batch(imgarray_list, iframe_list ,modelPar,processPar,sort_tracker, drawPar,trackPar):
#for im0s in im0s_batch:
# p_result,time_info = AI_det_track( im0s,modelPar,processPar,sort_tracker, drawPar)
det_cnt,windowsize = trackPar['det_cnt'] ,trackPar['windowsize']
trackers_dic={}
frame_min = iframe_list[0];frame_max=iframe_list[-1];new_frames = np.linspace(frame_min,frame_max,frame_max-frame_min+1 )
index_list = list(range( 0, len(iframe_list) ,det_cnt ));index_list.append( len(iframe_list) - 1 )
for iframe_index, index_frame in enumerate(index_list):
p_result,timeOut = AI_det_track( [ [imgarray_list[index_frame]] ,iframe_list[index_frame] ],modelPar,processPar,sort_tracker, drawPar )
for tracker in p_result[5]:
trackers_dic[tracker.id]=deepcopy(tracker)
time_patch1 = time.time()
track_det_result = np.empty((0,8))
for trackId in trackers_dic.keys():
tracker = trackers_dic[trackId]
bbox_history = np.array(tracker.bbox_history)
if len(bbox_history)<2: continue
###把(x0,y0,x1,y1)转换成(xc,yc,w,h)
xcs_ycs = (bbox_history[:,0:2] + bbox_history[:,2:4] )/2
whs = bbox_history[:,2:4] - bbox_history[:,0:2]
bbox_history[:,0:2] = xcs_ycs;bbox_history[:,2:4] = whs;
arrays_box = bbox_history[:,0:7].transpose();frames=bbox_history[:,6]
#frame_min--表示该批次图片的起始帧,如该批次是[1,100],则frame_min=1,[101,200]--frame_min=101
#frames[0]--表示该目标出现的起始帧,如[1,11,21,31,41],则frames[0]=1,frames[0]可能会在frame_min之前出现,即一个横跨了多个批次。
##如果要最好化插值范围,则取内区间[frame_min,则frame_max ]和[frames[0],frames[-1] ]的交集
#inter_frame_min = int(max(frame_min, frames[0])); inter_frame_max = int(min( frame_max, frames[-1] )) ##
##如果要求得到完整的目标轨迹,则插值区间要以目标出现的起始点为准
inter_frame_min=int(frames[0]);inter_frame_max=int(frames[-1])
new_frames= np.linspace(inter_frame_min,inter_frame_max,inter_frame_max-inter_frame_min+1 )
f_linear = interpolate.interp1d(frames,arrays_box); interpolation_x0s = (f_linear(new_frames)).transpose()
move_cnt_use =(len(interpolation_x0s)+1)//2*2-1 if len(interpolation_x0s)<windowsize else windowsize
for im in range(4):
interpolation_x0s[:,im] = moving_average_wang(interpolation_x0s[:,im],move_cnt_use )
cnt = inter_frame_max-inter_frame_min+1; trackIds = np.zeros((cnt,1)) + trackId
interpolation_x0s = np.hstack( (interpolation_x0s, trackIds ) )
track_det_result = np.vstack(( track_det_result, interpolation_x0s) )
#print('#####line116:',trackId,frame_min,frame_max,'----------',interpolation_x0s.shape,track_det_result.shape ,'-----')
##将[xc,yc,w,h]转为[x0,y0,x1,y1]
x0s = track_det_result[:,0] - track_det_result[:,2]/2 ; x1s = track_det_result[:,0] + track_det_result[:,2]/2
y0s = track_det_result[:,1] - track_det_result[:,3]/2 ; y1s = track_det_result[:,1] + track_det_result[:,3]/2
track_det_result[:,0] = x0s; track_det_result[:,1] = y0s;
track_det_result[:,2] = x1s; track_det_result[:,3] = y1s;
return track_det_result
def ocr_process(pars):
img_patch,engine,context,converter,AlignCollate_normal,device=pars[0:6]
@@ -345,8 +455,7 @@ def main():
if half: model.half()
segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
##图像测试
#url='images/examples/20220624_响水河_12300_1621.jpg'
impth = 'images/examples/'

BIN
__pycache__/AI.cpython-38.pyc View File


+ 3
- 1
readme.md View File

@@ -29,4 +29,6 @@
1.增加“chanelEmerency”河道落水人员检测
2.增加OCR2模型,换了新的CRNN识别模型。
2023.6.9
1.更新张建川新的河道分割模型,放在业务“river2”
1.更新张建川新的河道分割模型,放在业务“river2”
2023.7.3
1.添加城管项目,纯检测“车辆”、“垃圾”,业务名称"cityMangement"

BIN
trackUtils/__pycache__/sort.cpython-38.pyc View File


+ 483
- 0
trackUtils/sort.py View File

@@ -0,0 +1,483 @@
from __future__ import print_function
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io
import glob
import time,cv2
import argparse
from filterpy.kalman import KalmanFilter
np.random.seed(0)
def drawBoxTraceSimplied(track_det_result,iiframe, img_draw):
boxes_oneFrame = track_det_result[ track_det_result[:,6]==iiframe ]
###在某一帧上,画上检测框
for box in boxes_oneFrame:
x0,y0,x1,y1 = box[0:4]
cv2.rectangle(img_draw, ( int(x0), int(y0) ), ( int(x1), int(y1) ), (255,0,20), 2)
###在某一帧上,画上轨迹
track_ids = boxes_oneFrame[:,7].tolist()
boxes_before_oneFrame = track_det_result[ track_det_result[:,6]<=iiframe ]
for trackId in track_ids:
boxes_before_oneFrame_oneId = boxes_before_oneFrame[boxes_before_oneFrame[:,7]==trackId]
xcs = (boxes_before_oneFrame_oneId[:,0]+boxes_before_oneFrame_oneId[:,2])//2
ycs = (boxes_before_oneFrame_oneId[:,1]+boxes_before_oneFrame_oneId[:,3])//2
[cv2.line(img_draw, ( int(xcs[i]) , int(ycs[i]) ),
( int(xcs[i+1]),int(ycs[i+1]) ),(255,0,0), thickness=2)
for i,_ in enumerate(xcs) if i < len(xcs)-1 ]
return img_draw
def moving_average_wang(interval, windowsize):
outNum = interval.copy()
if windowsize==1:
return outNum
assert windowsize%2!=0
window = np.ones(int(windowsize)) / float(windowsize)
re = np.convolve(interval, window, 'valid')
cnt = int((windowsize - 1)/2+0.5)
total = len(interval)
outNum = np.zeros( (total,),dtype=np.float32 )
outNum[0]=interval[0]
outNum[-1]=interval[-1]
for i in range(1,cnt):
outNum[i] = np.mean( interval[0:2*i-1] )
outNum[-i-1] = np.mean( interval[-2*i-1:] )
#print('###line113:',outNum.shape,re.shape,cnt,windowsize)
outNum[cnt:-cnt]=re[:]
return outNum
def track_draw_trace(tracks,im0):
for track in tracks:
[cv2.line(im0, (int(track.centroidarr[i][0]),
int(track.centroidarr[i][1])),
(int(track.centroidarr[i+1][0]),
int(track.centroidarr[i+1][1])),
(255,0,0), thickness=2)
for i,_ in enumerate(track.centroidarr)
if i < len(track.centroidarr)-1 ]
return im0
"""Function to Draw Bounding boxes"""
def track_draw_boxes(img, bbox, identities=None, categories=None, names=None ):
for i, box in enumerate(bbox):
#print('####line33 sort.py:',box)
x1, y1, x2, y2 = [int(x) for x in box]
cat = int(categories[i]) if categories is not None else 0
id = int(identities[i]) if identities is not None else 0
data = (int((box[0]+box[2])/2),(int((box[1]+box[3])/2)))
label = str(id) + ":"+ names[cat]
(w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,20), 2)
cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (255,144,30), -1)
cv2.putText(img, label, (x1, y1 - 5),cv2.FONT_HERSHEY_SIMPLEX,
0.6, [255, 255, 255], 1)
# cv2.circle(img, data, 6, color,-1) #centroid of box
return img
def track_draw_all_boxes(tracked_dets,im0,names):
if len(tracked_dets)>0:
bbox_xyxy = tracked_dets[:,:4]
identities = tracked_dets[:, 8]
categories = tracked_dets[:, 4]
track_draw_boxes(im0, bbox_xyxy, identities, categories, names)
return im0
####轨迹采用跟踪链中的结果。box采用track.update后的结果。
def track_draw_boxAndTrace(tracked_dets,tracks,im0,names):
track_draw_all_boxes(tracked_dets,im0,names)
track_draw_trace(tracks,im0)
return im0
####轨迹和box都采用跟踪链中的结果
def track_draw_trace_boxes(tracks,im0,names):
for track in tracks:
[cv2.line(im0, (int(track.centroidarr[i][0]),
int(track.centroidarr[i][1])),
(int(track.centroidarr[i+1][0]),
int(track.centroidarr[i+1][1])),
(255,0,0), thickness=2)
for i,_ in enumerate(track.centroidarr)
if i < len(track.centroidarr)-1 ]
bbox_xyxy = track.bbox_history[-1][0:4]
identities,categories = track.id , track.detclass
#print('####sort.py line74:',bbox_xyxy)
track_draw_boxes(im0, [bbox_xyxy], [identities], [categories], names)
return im0
def linear_assignment(cost_matrix):
try:
import lap #linear assignment problem solver
_, x, y = lap.lapjv(cost_matrix, extend_cost = True)
return np.array([[y[i],i] for i in x if i>=0])
except ImportError:
from scipy.optimize import linear_sum_assignment
x,y = linear_sum_assignment(cost_matrix)
return np.array(list(zip(x,y)))
"""From SORT: Computes IOU between two boxes in the form [x1,y1,x2,y2]"""
def iou_batch(bb_test, bb_gt):
bb_gt = np.expand_dims(bb_gt, 0)
bb_test = np.expand_dims(bb_test, 1)
xx1 = np.maximum(bb_test[...,0], bb_gt[..., 0])
yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1])
xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2])
yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1])
+ (bb_gt[..., 2] - bb_gt[..., 0]) * (bb_gt[..., 3] - bb_gt[..., 1]) - wh)
return(o)
"""Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the center of the box and s is the scale/area and r is the aspect ratio"""
def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w/2.
y = bbox[1] + h/2.
s = w * h
#scale is just area
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
"""Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right"""
def convert_x_to_bbox(x, score=None):
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
"""This class represents the internal state of individual tracked objects observed as bbox."""
class KalmanBoxTracker(object):
count = 0
def __init__(self, bbox):
"""
Initialize a tracker using initial bounding box
Parameter 'bbox' must have 'detected class' int number at the -1 position.
"""
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0],[0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10. # R: Covariance matrix of measurement noise (set to high for noisy inputs -> more 'inertia' of boxes')
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.5 # Q: Covariance matrix of process noise (set to high for erratically moving things)
self.kf.Q[4:,4:] *= 0.5
self.kf.x[:4] = convert_bbox_to_z(bbox) # STATE VECTOR
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
self.frames = []
self.centroidarr = []
CX = (bbox[0]+bbox[2])//2
CY = (bbox[1]+bbox[3])//2
self.centroidarr.append((CX,CY))
#keep yolov5 detected class information
self.detclass = bbox[5]
self.frames.append( bbox[6] ) ###new added for interpolation
# If we want to store bbox
self.bbox_history = [bbox]
def update(self, bbox):
"""
Updates the state vector with observed bbox
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
self.detclass = bbox[5]
CX = (bbox[0]+bbox[2])//2
CY = (bbox[1]+bbox[3])//2
self.centroidarr.append((CX,CY))
self.frames.append( bbox[6] ) ###new added for interpolation
self.bbox_history.append(bbox)
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate
"""
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
# bbox=self.history[-1]
# CX = (bbox[0]+bbox[2])/2
# CY = (bbox[1]+bbox[3])/2
# self.centroidarr.append((CX,CY))
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate
# test
arr1 = np.array([[1,2,3,4]])
arr2 = np.array([0])
arr3 = np.expand_dims(arr2, 0)
np.concatenate((arr1,arr3), axis=1)
"""
arr_detclass = np.expand_dims(np.array([self.detclass]), 0)
arr_u_dot = np.expand_dims(self.kf.x[4],0)
arr_v_dot = np.expand_dims(self.kf.x[5],0)
arr_s_dot = np.expand_dims(self.kf.x[6],0)
return np.concatenate((convert_x_to_bbox(self.kf.x), arr_detclass, arr_u_dot, arr_v_dot, arr_s_dot), axis=1)
def associate_detections_to_trackers(detections, trackers, iou_threshold = 0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of
1. matches,
2. unmatched_detections
3. unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = iou_batch(detections, trackers)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() ==1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-iou_matrix)
else:
matched_indices = np.empty(shape=(0,2))
unmatched_detections = []
for d, det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0], m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
# def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
def __init__(self, max_age=1, min_hits=1000, iou_threshold=0.1):
"""
Parameters for SORT
"""
self.max_age = max_age # 最大检测数:目标未被检测到的帧数,超过之后会被删
self.min_hits = min_hits # 目标命中的最小次数,小于该次数不返回
self.iou_threshold = iou_threshold
self.trackers = []
self.frame_count = 0
def getTrackers(self,):
return self.trackers
def update(self, dets= np.empty((0,6))):
"""
Parameters:
'dets' - a numpy array of detection in the format [[x1, y1, x2, y2, score], [x1,y1,x2,y2,score],...]
Ensure to call this method even frame has no detections. (pass np.empty((0,5)))
Returns a similar array, where the last column is object ID (replacing confidence score)
NOTE: The number of objects returned may differ from the number of objects provided.
"""
self.frame_count += 1
# 在当前帧逐个预测轨迹位置,记录状态异常的跟踪器索引
# 根据当前所有的卡尔曼跟踪器个数(即上一帧中跟踪的目标个数)创建二维数组:行号为卡尔曼滤波器的标识索引,列向量为跟踪
# Get predicted locations from existing trackers
trks = np.zeros((len(self.trackers), 6)) # 存储跟踪器的预测
to_del = [] # 存储要删除的目标框
ret = [] # 存储要返回的追踪目标框
# 循环遍历卡尔曼跟踪器列表
for t, trk in enumerate(trks):
# 使用卡尔曼跟踪器t产生对应目标的跟踪框
pos = self.trackers[t].predict()[0]
# 遍历完成后,trk中存储了上一帧中跟踪的目标的预测跟踪框
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0, 0]
# 如果跟踪框中包含空值则将该跟踪框添加到要删除的列表中
if np.any(np.isnan(pos)):
to_del.append(t)
# numpy.ma.masked_invalid 屏蔽出现无效值的数组(NaN 或 inf)
# numpy.ma.compress_rows 压缩包含掩码值的2-D 数组的整行,将包含掩码值的整行去除
# trks中存储了上一帧中跟踪的目标并且在当前帧中的预测跟踪框
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
# 逆向删除异常的跟踪器,防止破坏索引
for t in reversed(to_del):
self.trackers.pop(t)
# 将目标检测框与卡尔曼滤波器预测的跟踪框关联获取跟踪成功的目标,新增的目标,离开画面的目标
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets, trks, self.iou_threshold)
# 将跟踪成功的目标框更新到对应的卡尔曼滤波器
# Update matched trackers with assigned detections
for m in matched:
self.trackers[m[1]].update(dets[m[0], :])
# 为新增的目标创建新的卡尔曼滤波器对象进行跟踪
# Create and initialize new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(np.hstack(dets[i,:]))
#trk = KalmanBoxTracker(np.hstack( (dets[i,:], np.array([0] )) ) ) ##初始化多了一个数,可能是为了标记说明这是第一次出现,box有7个数
#print(' ###line271: ', np.hstack((dets[i,:], np.array([0])) ).shape)
self.trackers.append(trk)
# 自后向前遍历,仅返回在当前帧出现且命中周期大于self.min_hits(除非跟踪刚开始)的跟踪结果;如果未命中时间大于self.max_age则删除跟踪器。
# hit_streak忽略目标初始的若干帧
i = len(self.trackers)
for trk in reversed(self.trackers):
# 返回当前边界框的估计值
d = trk.get_state()[0]
# 跟踪成功目标的box与id放入ret列表中
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append(np.concatenate((d, [trk.id+1])).reshape(1,-1)) #+1'd because MOT benchmark requires positive value
i -= 1
#remove dead tracklet
# 跟踪失败或离开画面的目标从卡尔曼跟踪器中删除
if(trk.time_since_update >self.max_age):
self.trackers.pop(i) #pop按键或索引位置删除对应元素
# 返回当前画面中所有目标的box与id,以二维矩阵形式返回
if(len(ret) > 0):
#print('####sort.py line282:',len(ret),ret[0].shape, (np.concatenate(ret)).shape)
return np.concatenate(ret)
return np.empty((0,6))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='SORT demo')
parser.add_argument('--display', dest='display', help='Display online tracker output (slow) [False]',action='store_true')
parser.add_argument("--seq_path", help="Path to detections.", type=str, default='data')
parser.add_argument("--phase", help="Subdirectory in seq_path.", type=str, default='train')
parser.add_argument("--max_age",
help="Maximum number of frames to keep alive a track without associated detections.",
type=int, default=1)
parser.add_argument("--min_hits",
help="Minimum number of associated detections before track is initialised.",
type=int, default=3)
parser.add_argument("--iou_threshold", help="Minimum IOU for match.", type=float, default=0.3)
args = parser.parse_args()
return args
if __name__ == '__main__':
# all train
args = parse_args()
display = args.display
phase = args.phase
total_time = 0.0
total_frames = 0
colours = np.random.rand(32, 3) #used only for display
if(display):
if not os.path.exists('mot_benchmark'):
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
exit()
plt.ion()
fig = plt.figure()
ax1 = fig.add_subplot(111, aspect='equal')
if not os.path.exists('output'):
os.makedirs('output')
pattern = os.path.join(args.seq_path, phase, '*', 'det', 'det.txt')
for seq_dets_fn in glob.glob(pattern):
mot_tracker = Sort(max_age=args.max_age,
min_hits=args.min_hits,
iou_threshold=args.iou_threshold) #create instance of the SORT tracker
seq_dets = np.loadtxt(seq_dets_fn, delimiter=',')
seq = seq_dets_fn[pattern.find('*'):].split(os.path.sep)[0]
with open(os.path.join('output', '%s.txt'%(seq)),'w') as out_file:
print("Processing %s."%(seq))
for frame in range(int(seq_dets[:,0].max())):
frame += 1 #detection and frame numbers begin at 1
dets = seq_dets[seq_dets[:, 0]==frame, 2:7]
dets[:, 2:4] += dets[:, 0:2] #convert to [x1,y1,w,h] to [x1,y1,x2,y2]
total_frames += 1
if(display):
fn = os.path.join('mot_benchmark', phase, seq, 'img1', '%06d.jpg'%(frame))
im =io.imread(fn)
ax1.imshow(im)
plt.title(seq + ' Tracked Targets')
start_time = time.time()
trackers = mot_tracker.update(dets)
cycle_time = time.time() - start_time
total_time += cycle_time
for d in trackers:
print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1'%(frame,d[4],d[0],d[1],d[2]-d[0],d[3]-d[1]),file=out_file)
if(display):
d = d.astype(np.int32)
ax1.add_patch(patches.Rectangle((d[0],d[1]),d[2]-d[0],d[3]-d[1],fill=False,lw=3,ec=colours[d[4]%32,:]))
if(display):
fig.canvas.flush_events()
plt.draw()
ax1.cla()
print("Total Tracking took: %.3f seconds for %d frames or %.1f FPS" % (total_time, total_frames, total_frames / total_time))
if(display):
print("Note: to get real runtime results run without the option: --display")

BIN
utilsK/__pycache__/queRiver.cpython-38.pyc View File


+ 75
- 5
utilsK/queRiver.py View File

@@ -108,10 +108,7 @@ def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,ifr
#segmodel=True
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
if ovlap_thres:
try:
pred = overlap_box_suppression(pred, ovlap_thres)
except Exception as e:
print('post_process_ warning in overlap_box_suppression:%s'%(e))
pred = overlap_box_suppression(pred, ovlap_thres)
time1=time.time()
i=0;det=pred[0]###一次检测一张图片
time1_1 = time.time()
@@ -168,7 +165,80 @@ def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,ifr
time3=time.time()
strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) )
return [im0s[0],im0,det_xywh,iframe],strout
def getDetections(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,ObjectPar={ 'object_config':[0,1,2,3,4], 'slopeIndex':[5,6,7] ,'segmodel':True,'segRegionCnt':1 },font={ 'line_thickness':None, 'fontSize':None,'boxLine_thickness':None,'waterLineColor':(0,255,255),'waterLineWidth':3},padInfos=None ,ovlap_thres=None):
object_config,slopeIndex,segmodel,segRegionCnt=ObjectPar['object_config'],ObjectPar['slopeIndex'],ObjectPar['segmodel'],ObjectPar['segRegionCnt']
##输入dataset genereate 生成的数据,model预测的结果pred,nms参数
##主要操作NMS ---> 坐标转换 ---> 画图
##输出原图、AI处理后的图、检测结果
time0=time.time()
path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6];
#segmodel=True
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
if ovlap_thres:
pred = overlap_box_suppression(pred, ovlap_thres)
time1=time.time()
i=0;det=pred[0]###一次检测一张图片
time1_1 = time.time()
#p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
p, s, im0 = path[i], '%g: ' % i, im0s[i]
time1_2 = time.time()
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
time1_3 = time.time()
det_xywh=[];
#im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR);
if segmodel:
if len(seg_pred)==2:
im0,water = illBuildings(seg_pred,im0)
else:
river={ 'color':font['waterLineColor'],'line_width':font['waterLineWidth'],'segRegionCnt':segRegionCnt,'segLineShow':font['segLineShow'] }
im0,water = drawWater(seg_pred,im0,river)
time2=time.time()
#plt.imshow(im0);plt.show()
if len(det)>0:
# Rescale boxes from img_size to im0 size
if not padInfos:
det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
else:
#print('####line131:',det[:, :])
det[:, :4] = scale_back( det[:, :4],padInfos).round()
#print('####line133:',det[:, :])
#用seg模型,确定有效检测匡及河道轮廓线
if segmodel:
cls_indexs = det[:, 5].clone().cpu().numpy().astype(np.int32)
##判断哪些目标属于岸坡的
slope_flag = np.array([x in slopeIndex for x in cls_indexs ] )
det_c = det.clone(); det_c=det_c.cpu().numpy()
try:
area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )*1.0/(1.0*(x[2]-x[0])*(x[3]-x[1])+0.00001) for x in det_c] )
except:
print('*****************************line143: error:',det_c)
water_flag = np.array(area_factors>0.1)
det = det[water_flag|slope_flag]##如果是水上目标,则需要与水的iou超过0.1;如果是岸坡目标,则直接保留。
#对检测匡绘图
for *xyxy, conf, cls in reversed(det):
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
cls_c = cls.cpu().numpy()
conf_c = conf.cpu().numpy()
tt=[ int(x.cpu()) for x in xyxy]
line = [float(cls_c), *tt, float(conf_c)] # label format
det_xywh.append(line)
label = f'{names[int(cls)]} {conf:.2f}'
if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示
continue
time3=time.time()
strout='nms:%s drawWater:%s,copy:%s,toTensor:%s,detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2),get_ms(time1_1,time1_2),get_ms(time1_2,time1_3), get_ms(time2,time3) )
return [im0s[0],im0,det_xywh,iframe],strout
def detectDraw(im0,dets,label_arraylist,rainbows,font):
for det in dets:
xyxy = det[1:5]
cls = det[0];
conf = det[5]
im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],font=font)
return im0

def preprocess(par):

+ 4
- 0
weights/conf/cityMangement/labelnames.json View File

@@ -0,0 +1,4 @@
{
"labelnames":["车辆","垃圾"],
"labelIndexs":["SL01","SL02"]
}

+ 7
- 0
weights/conf/cityMangement/para.json View File

@@ -0,0 +1,7 @@
{


"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"ovlap_thres_crossCategory":0.6,"classes":2,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }


}

BIN
weights/conf/cityMangement/yolov5.pt View File


Loading…
Cancel
Save