This commit is contained in:
wangjin0928 2025-04-26 14:13:15 +08:00
parent fc460a740b
commit 3686c38b89
195 changed files with 57864 additions and 65506 deletions

13
AI.py
View File

@ -19,8 +19,6 @@ from copy import deepcopy
from scipy import interpolate from scipy import interpolate
import glob import glob
from loguru import logger
def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']): def get_images_videos(impth, imageFixs=['.jpg','.JPG','.PNG','.png'],videoFixs=['.MP4','.mp4','.avi']):
imgpaths=[];###获取文件里所有的图像 imgpaths=[];###获取文件里所有的图像
videopaths=[]###获取文件里所有的视频 videopaths=[]###获取文件里所有的视频
@ -137,15 +135,9 @@ def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'h
time2=time.time() time2=time.time()
p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos) p_result, timeOut = getDetectionsFromPreds(pred,img,im0s[0],conf_thres=conf_thres,iou_thres=iou_thres,ovlap_thres=ovlap_thres,padInfos=padInfos)
#logger.info("获取视频p_result :{}, requestId:{}, names:{}", '+++' * 10, p_result[2],names)
if score_byClass: if score_byClass:
p_result[2] = score_filter_byClass(p_result[2],score_byClass) p_result[2] = score_filter_byClass(p_result[2],score_byClass)
#print('-'*10,p_result[2])
#if mode=='highWay3.0': #if mode=='highWay3.0':
#if segmodel: #if segmodel:
if segPar and segPar['mixFunction']['function']: if segPar and segPar['mixFunction']['function']:
@ -153,7 +145,7 @@ def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'h
mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2] mixFunction = segPar['mixFunction']['function'];H,W = im0s[0].shape[0:2]
parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2]) parMix = segPar['mixFunction']['pars'];#print('###line117:',parMix,p_result[2])
parMix['imgSize'] = (W,H) parMix['imgSize'] = (W,H)
#print(' -----------line149: ',p_result[2] ,'\n', seg_pred, parMix ,' sumpSeg:',np.sum(seg_pred)) #print(' -----------line110: ',p_result[2] ,'\n', seg_pred)
p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix ) p_result[2] , timeMixPost= mixFunction(p_result[2], seg_pred, pars=parMix )
#print(' -----------line112: ',p_result[2] ) #print(' -----------line112: ',p_result[2] )
p_result.append(seg_pred) p_result.append(seg_pred)
@ -163,8 +155,8 @@ def AI_process(im0s,model,segmodel,names,label_arraylist,rainbows,objectPar={ 'h
#print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut )) #print('#### line121: segstr:%s timeMixPost:%s timeOut:%s'%( segstr.strip(), timeMixPost,timeOut ))
time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost ) time_info = 'letterbox:%.1f, seg:%.1f , infer:%.1f,%s, seginfo:%s ,timeMixPost:%s '%( (time01-time0)*1000, (time1-time01)*1000 ,(time2-time1)*1000,timeOut , segstr.strip(),timeMixPost )
#if mode=='highWay3.0': #if mode=='highWay3.0':
#print('line159:','-'*10,p_result[2])
print('-'*10,p_result[2])
return p_result,time_info return p_result,time_info
def default_mix(predlist,par): def default_mix(predlist,par):
return predlist[0],'' return predlist[0],''
@ -194,7 +186,6 @@ def AI_process_N(im0s,modelList,postProcess):
#ret就是混合处理后的结果 #ret就是混合处理后的结果
ret = mixFunction( predsList, postProcess['pars']) ret = mixFunction( predsList, postProcess['pars'])
return ret[0],timeInfos+ret[1] return ret[0],timeInfos+ret[1]
def getMaxScoreWords(detRets0): def getMaxScoreWords(detRets0):

View File

@ -143,7 +143,6 @@ def detect_marking_points(detector, image, thresh, device,modelType='pth'):
image_preprocess = preprocess_image(image).to(device) image_preprocess = preprocess_image(image).to(device)
if modelType=='pth': if modelType=='pth':
prediction = detector(image_preprocess) prediction = detector(image_preprocess)
#print(prediction) #print(prediction)
elif modelType=='trt': elif modelType=='trt':
@ -197,16 +196,13 @@ def DMPR_process(img0, model, device, DMPRmodelPar):
img, ratio, (dw, dh) = letterbox(img0, DMPRmodelPar['dmprimg_size'], auto=False) img, ratio, (dw, dh) = letterbox(img0, DMPRmodelPar['dmprimg_size'], auto=False)
t1 = time.time() t1 = time.time()
#print('###line188:', height, width, img.shape)
det = detect_marking_points(model, img, DMPRmodelPar['dmpr_thresh'], device,modelType=DMPRmodelPar['modelType']) det = detect_marking_points(model, img, DMPRmodelPar['dmpr_thresh'], device,modelType=DMPRmodelPar['modelType'])
t2 = time.time() t2 = time.time()
if len(det): if len(det):
det[:, 1:3] = scale_coords2(img.shape[:2], det[:, 1:3], img0.shape) det[:, 1:3] = scale_coords2(img.shape[:2], det[:, 1:3], img0.shape)
t3 = time.time() t3 = time.time()
timeInfos = 'dmpr:%1.f (lettbox:%.1f dectect:%.1f scaleBack:%.1f) '%( (t3-t0)*1000,(t1-t0)*1000,(t2-t1)*1000,(t3-t2)*1000, ) timeInfos = 'dmpr:%1.f (lettbox:%.1f dectect:%.1f scaleBack:%.1f) '%( (t3-t0)*1000,(t1-t0)*1000,(t2-t1)*1000,(t3-t2)*1000, )
return det,timeInfos return det,timeInfos

View File

@ -1,142 +0,0 @@
import math
import numpy as np
import torch
import time
from loguru import logger
def dmpr_yolo( yolo_det, dmpr_det,pars):
#if len(yolo_det)==0 or len(dmpr_det)==0:
#print('line11:\n',yolo_det, dmpr_det,pars)
time1=time.time()
if len(yolo_det)==0:
return yolo_det,' No yolo detections'
img_shape = (pars['imgSize'][1],pars['imgSize'][0])
cls = pars['carCls']; scaleRatio = pars['scaleRatio']
illParkCls = pars['illCls'];border = pars['border']
yolo_det = np.array(yolo_det)
yolo_det_0 = yolo_det.copy()
#print('-'*10,'line17',yolo_det_0)
# 过滤在图像边界的box(防止出现类似一小半车辆的情况)
x_c = (yolo_det[:, 0] + yolo_det[:, 2]) / 2
y_c = (yolo_det[:, 1] + yolo_det[:, 3]) / 2
tmp = (x_c >= border) & (x_c <= (img_shape[1] - border)) & (y_c >= border) & (y_c <= (img_shape[0] - border))
yolo_det = yolo_det[tmp]
# 创建yolo_det_clone内容为x1, y1, x2, y2, conf, cls, unlabel (unlabel代表该类是否需要忽略0不忽略 其他:忽略)
logger.info("标记 :{}, yolo_det:{}, dmpr_det:{}", '+++'* 10, yolo_det, dmpr_det)
yolo_det_clone = yolo_det.copy()
tmp_0_tensor = np.zeros([len(yolo_det), 1])
yolo_det_clone = np.concatenate([yolo_det_clone, tmp_0_tensor], axis=1)
# cls为需要计算的类别
yolo_det = yolo_det[yolo_det[:, -1] == cls]
# new_yolo_det为膨胀后数据内容为x1, y1, x2, y2, flag (flag代表膨胀后车位内是否包含角点 且 与角点方向差值小于90度, 其值为第一个满足条件的角点索引)
new_yolo_det = np.zeros([len(yolo_det), 7])
# yolo框膨胀长的边两边各膨胀0.4倍总长短的边两边各膨胀0.2倍总长
x_length = yolo_det[:, 2] - yolo_det[:, 0] #x2-x1
y_length = yolo_det[:, 3] - yolo_det[:, 1] #y2-y1
# x, y哪个方向差值大哪个方向膨胀的多
x_dilate_coefficient = ((x_length > y_length) + 1)*scaleRatio
y_dilate_coefficient = ((~(x_length > y_length)) + 1)*scaleRatio
logger.info("膨胀前标记 :{}, yolo_det:{}", '+++'*10, new_yolo_det)
# 原始框中心点x_c, y_c
new_yolo_det[:, 5] = (yolo_det[:, 0] + yolo_det[:, 2]) / 2
new_yolo_det[:, 6] = (yolo_det[:, 1] + yolo_det[:, 3]) / 2
# 膨胀
new_yolo_det[:, 0] = np.round(yolo_det[:, 0] - x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x1 膨胀
new_yolo_det[:, 1] = np.round(yolo_det[:, 1] - y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y1 膨胀
new_yolo_det[:, 2] = np.round(yolo_det[:, 2] + x_dilate_coefficient * x_length).clip(0, img_shape[1]) #x2 膨胀
new_yolo_det[:, 3] = np.round(yolo_det[:, 3] + y_dilate_coefficient * y_length).clip(0, img_shape[0]) #y2 膨胀
m, n = new_yolo_det.size, dmpr_det.size
logger.info("膨胀后标记 :{}, yolo_det:{} m:{}, n:{}", '+++'*10, new_yolo_det,m,n)
if not m or not n:
#print('##line47 original yolo_det_clone:',yolo_det_clone)
yolo_det_clone[np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls),-2] = illParkCls
#yolo_det_clone[yolo_det_clone[:, -1] == 0 & yolo_det_clone[:, -2==cls] , -2] = illParkCls
return yolo_det_clone[:,0:6], ' no cars or T/L corners'
new_yolo = new_yolo_det[:, np.newaxis, :].repeat(dmpr_det.shape[0], 1) # 扩展为 (m , n, 5)
dmpr_det = dmpr_det[np.newaxis, ...].repeat(new_yolo_det.shape[0], 0)
yolo_dmpr = np.concatenate((new_yolo, dmpr_det), axis=2) # (m, n, 10)
logger.info("膨胀后标记 :{}, yolo_dmpr:{}", '+++' * 10, yolo_dmpr)
x_p, y_p = yolo_dmpr[..., 8], yolo_dmpr[..., 9]
x1, y1, x2, y2 = yolo_dmpr[..., 0], yolo_dmpr[..., 1], yolo_dmpr[..., 2], yolo_dmpr[..., 3]
x_c, y_c = yolo_dmpr[..., 5], yolo_dmpr[..., 6]
direction1 = np.arctan2(y_c - y_p, x_c - x_p) / math.pi * 180
direction2 = yolo_dmpr[..., 10] / math.pi * 180
direction3 = direction2 + 90 # L形角点另外一个方向
direction3[direction3 > 180] -= 360
ang_diff = direction1 - direction2
ang_diff2 = direction1 - direction3
# 判断膨胀后yolo框包含角点关系 && 包含角点的时候计算水平框中心点与角点的角度关系
# direction ∈ -180 180 若角差大于180需算补角
# T形角点比较一个方向L形角点比较两个方向
mask = (x_p >= x1) & (x_p <= x2) & (y_p >= y1) & (y_p <= y2) & \
(((yolo_dmpr[..., 11] <= 0.5) & # T形角点情况
(((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) |
(((ang_diff) < -180) & ((360 + ang_diff) <= 90)))) |
((yolo_dmpr[..., 11] > 0.5) & # L形角点情况
(((ang_diff >= -90) & (ang_diff <= 90)) | ((ang_diff > 180) & ((360 - ang_diff) <= 90)) |
(((ang_diff) < -180) & ((360 + ang_diff) <= 90))) &
(((ang_diff2 >= -90) & (ang_diff2 <= 90)) | ((ang_diff2 > 180) & ((360 - ang_diff2) <= 90)) |
(((ang_diff2) < -180) & ((360 + ang_diff2) <= 90)))))
logger.info("省略 :{}, mask1:{}", '+++' * 10, mask)
res = np.sum(mask, axis=1)
logger.info("省略 :{}, mask2:{}", '+++' * 10, res)
logger.info("省略 :{}, yolo_det_clone:{}", '+++' * 10, yolo_det_clone)
yolo_det_clone[yolo_det_clone[:, -2] == cls, -1] = res
#print('##line69 original yolo_det_clone:',yolo_det_clone)
#yolo_det_clone[yolo_det_clone[:, -1] == 0, -2] = illParkCls
logger.info("省略:{}, yolo_det_clone:{}", '+++' * 10, yolo_det_clone)
#print('-'*20,'--line78',yolo_det_clone)
yolo_det_clone[ np.logical_and( yolo_det_clone[:,-1]==0,yolo_det_clone[:,-2]==cls) ,-2 ] = illParkCls
#print('-'*20,'--line80:',yolo_det_clone)
yolo_det_clone = yolo_det_clone[:,0:6]
time2=time.time()
return np.array(yolo_det_clone), 'dmpr_yolo:%.1f'%( (time2-time1)*1000 )
def stdc_yolo(stdc_det, yolo_det):
im = np.uint8(stdc_det)
x_c = ((yolo_det[:, 0] + yolo_det[:, 2]) // 2).astype(int)
y_c = ((yolo_det[:, 1] + yolo_det[:, 3]) // 2).astype(int)
yolo_filted = yolo_det[im[y_c, x_c] == 0]
return yolo_filted
def dmpr_yolo_stdc(predsList,pars):
if len(predsList)==2:
yolo_det, dmpr_det = predsList[0:2]
else:
yolo_det, dmpr_det,stdc_det = predsList[0:3]
if len(yolo_det)==0:
return yolo_det,' No yolo detections'
if isinstance(yolo_det,list):
yolo_det = np.array(yolo_det)
if len(predsList)>2:
yolo_det = stdc_yolo(stdc_det, yolo_det)
return dmpr_yolo(yolo_det, dmpr_det,pars)

View File

@ -56,7 +56,8 @@ class Detect(nn.Module):
# y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh # y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
# z.append(y.view(bs, -1, self.no)) # z.append(y.view(bs, -1, self.no))
prediction = self.m[1](x[1]) prediction = self.m[1](x[1]) #40*40
#prediction = self.m[0](x[0]) #80*80
point_pred, angle_pred = torch.split(prediction, 4, dim=1) point_pred, angle_pred = torch.split(prediction, 4, dim=1)
point_pred = torch.sigmoid(point_pred) point_pred = torch.sigmoid(point_pred)
angle_pred = torch.tanh(angle_pred) angle_pred = torch.tanh(angle_pred)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,61 +0,0 @@
GPUID: 0
WORKERS: 1
PRINT_FREQ: 10
SAVE_FREQ: 10
PIN_MEMORY: False
OUTPUT_DIR: 'output'
CUDNN:
BENCHMARK: True
DETERMINISTIC: False
ENABLED: True
DATASET:
DATASET: 360CC
ROOT: "../textGenerator/dataset/dataset9/images"
CHAR_FILE: '../textGenerator/dataset/dataset9/chars.txt'
JSON_FILE: {'train': '../textGenerator/dataset/dataset9/train.txt', 'val': '../textGenerator/dataset/dataset9/val.txt'}
# JSON_FILE: {'train': 'H:/DL-DATASET/360M/train.txt', 'val': 'H:/DL-DATASET/360M/test.txt'}
SCALE_FACTOR: 0.25
ROT_FACTOR: 30
STD: 0.193
MEAN: 0.588
ALPHABETS: ''
TRAIN:
BATCH_SIZE_PER_GPU: 32
SHUFFLE: True
BEGIN_EPOCH: 0
END_EPOCH: 100
RESUME:
IS_RESUME: False
FILE: 'output/360CC/crnn/2023-04-27-13-01/checkpoints/checkpoint_99_acc_0.5030.pth'
OPTIMIZER: 'adam'
LR: 0.0001
WD: 0.0
LR_STEP: [60, 80]
LR_FACTOR: 0.1
MOMENTUM: 0.0
NESTEROV: False
RMSPROP_ALPHA:
RMSPROP_CENTERED:
FINETUNE:
IS_FINETUNE: False
FINETUNE_CHECKPOINIT: 'output/checkpoints/mixed_second_finetune_acc_97P7.pth'
FREEZE: true
TEST:
BATCH_SIZE_PER_GPU: 16
SHUFFLE: True # for random test rather than test on the whole validation set
NUM_TEST_BATCH: 1000
NUM_TEST_DISP: 10
MODEL:
NAME: 'crnn'
IMAGE_SIZE:
OW: 160 # origial width: 280
H: 32
W: 160 # resized width: 160
NUM_CLASSES: 0
NUM_HIDDEN: 256

File diff suppressed because it is too large Load Diff

View File

@ -1,92 +0,0 @@
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
A
B
C
D
E
F
G
H
I
J
K
L
M
N
O
P
Q
R
S
T
U
V
W
X
Y
Z
0
1
2
3
4
5
6
7
8
9
°
!
"
#
$
%
&
'
(
)
*
+
,
-
.
/
:
;
?
@
[
\
]
^
_
`
{
|
}
~

View File

@ -1,5 +1,5 @@
{ {
"labelnames_实际":["国旗","浮标","船名","船只","未挂国旗船只" ], "labelnames_实际":["国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ],
"labelnames":[ "国旗","浮标","船名","船只","未挂国旗船只" ], "labelnames":[ "国旗","浮标","船名","船只","未挂国旗船只","未封仓船只" ],
"labelIndexs":["SL040", "SL041","SL042","SL043","SL044"] "labelIndexs":["SL040", "SL041","SL042","SL043","SL044"]
} }

2
conf/cityMangement3/labelnames.json Normal file → Executable file
View File

@ -1,4 +1,4 @@
{ {
"labelnames":["车辆","垃圾","商贩","违停"], "labelnames":["车辆","垃圾","商贩","违停","占道经营","裸土"],
"labelIndexs":["SL01","SL02","SL03","SL04"] "labelIndexs":["SL01","SL02","SL03","SL04"]
} }

0
conf/cityMangement3/para.json Normal file → Executable file
View File

0
conf/crackMeasurement/labelnames.json Normal file → Executable file
View File

0
conf/crowdCounting/labelnames.json Normal file → Executable file
View File

3
conf/firework/labelnames.json Executable file
View File

@ -0,0 +1,3 @@
{
"labelnames":["烟花"]
}

7
conf/firework/para.json Normal file
View File

@ -0,0 +1,7 @@
{
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
}

View File

@ -0,0 +1,3 @@
{
"labelnames":["抛洒物","车辆"]
}

0
conf/pothole/labelnames.json Normal file → Executable file
View File

0
conf/pothole/para.json Normal file → Executable file
View File

5
conf/riverT/labelnames.json Executable file
View File

@ -0,0 +1,5 @@
{
"labelnames":[ "漂浮物","垃圾","排口","非法建筑","非法种植","水生植物","游泳人员","钓鱼人员","船只","蓝藻"] ,
"labelIndexs":[ "SL04","SL05","SL06","SL07","SL08","SL09","SL10","SL11","SL12","SL13" ],
"labelOrders":[0,1,2,3,4,5,6,7,8,9]
}

7
conf/riverT/para.json Executable file
View File

@ -0,0 +1,7 @@
{
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.3,"ovlap_thres_crossCategory":0.65,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
}

3
conf/rubbish/labelnames.json Executable file
View File

@ -0,0 +1,3 @@
{
"labelnames":["建筑垃圾","白色垃圾","其他垃圾"]
}

7
conf/rubbish/para.json Normal file
View File

@ -0,0 +1,7 @@
{
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
}

7
conf/smartSite/para.json Normal file
View File

@ -0,0 +1,7 @@
{
"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":13,"rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]] }
}

View File

@ -1,4 +0,0 @@
{
"labelnames":["烟雾","火焰"],
"labelIndexs":["SL01","SL02"]
}

0
crowdUtils/toTrt.py Normal file → Executable file
View File

View File

@ -42,10 +42,9 @@ class Detect(nn.Module):
z = [] # inference output z = [] # inference output
self.training |= self.export self.training |= self.export
for i in range(self.nl): for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv x[i] = self.m[i](x[i]) # convi
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]: if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device) self.grid[i] = self._make_grid(nx, ny).to(x[i].device)

View File

@ -127,5 +127,3 @@
2.在yolov5模型中增加了“云朵”类别减少”烟雾“的误识别但”云朵“并未输出在后处理的时候就已经过滤了。 2.在yolov5模型中增加了“云朵”类别减少”烟雾“的误识别但”云朵“并未输出在后处理的时候就已经过滤了。
3.增加了后处理函数,在“行人”的基础上,判断他们之间的距离,群定是否是人群。主要有两个参数: 3.增加了后处理函数,在“行人”的基础上,判断他们之间的距离,群定是否是人群。主要有两个参数:
'crowdThreshold':判断是否是人群时人的数量,'distancePersonScale':人与人之间的距离/人的身高 'crowdThreshold':判断是否是人群时人的数量,'distancePersonScale':人与人之间的距离/人的身高
2025.04.25
1.这个仓库把权重移出去了放在pth分支下。

Some files were not shown because too many files have changed in this diff Show More