Browse Source

DSP

master
thsw 2 years ago
commit
b33d091cbd
100 changed files with 46862 additions and 0 deletions
  1. +353
    -0
      DSP_Send_tranfer_oss.py
  2. +595
    -0
      DSP_master.py
  3. +77
    -0
      GetUploadDetails.py
  4. BIN
      __pycache__/DSP_Send_tranfer_oss.cpython-38.pyc
  5. BIN
      __pycache__/Send_tranfer.cpython-38.pyc
  6. BIN
      __pycache__/Send_tranfer_oss.cpython-38.pyc
  7. BIN
      __pycache__/queRiver.cpython-38.pyc
  8. BIN
      __pycache__/subprocess.cpython-38.pyc
  9. +119
    -0
      code_bak/Send_debug.py
  10. +117
    -0
      code_bak/Send_debugF.py
  11. +77
    -0
      code_bak/Send_debugF2Wrong.py
  12. +192
    -0
      code_bak/client.py
  13. +59
    -0
      code_bak/consumer.py
  14. +67
    -0
      code_bak/consumer2.py
  15. +420
    -0
      code_bak/consumer_sleep.py
  16. +79
    -0
      code_bak/decode.py
  17. +645
    -0
      code_bak/master_0508.py
  18. +571
    -0
      code_bak/master_0509.py
  19. +1
    -0
      code_bak/source.txt
  20. +76
    -0
      code_bak/source_query.py
  21. +30
    -0
      code_bak/test_multiprocess.py
  22. +16
    -0
      conf/bak/model_5class.json
  23. +16
    -0
      conf/bak/model_9class.json
  24. +6
    -0
      conf/errorDic.json
  25. +14
    -0
      conf/master.json
  26. +17
    -0
      conf/model.json
  27. BIN
      conf/platech.ttf
  28. +20
    -0
      conf/send_oss.json
  29. +68
    -0
      consumer2.py
  30. +10
    -0
      create.sh
  31. +0
    -0
      debut.txt
  32. +2
    -0
      detect.sh
  33. +3019
    -0
      logs/logChildProcess/offline/gpuprocess.log
  34. +15798
    -0
      logs/logChildProcess/online/gpuprocess.log
  35. +6480
    -0
      logs/master/detector.log
  36. +11133
    -0
      logs/send/SendPics.log
  37. +2667
    -0
      master.log
  38. +0
    -0
      models/__init__.py
  39. BIN
      models/__pycache__/__init__.cpython-37.pyc
  40. BIN
      models/__pycache__/__init__.cpython-38.pyc
  41. BIN
      models/__pycache__/common.cpython-37.pyc
  42. BIN
      models/__pycache__/common.cpython-38.pyc
  43. BIN
      models/__pycache__/experimental.cpython-37.pyc
  44. BIN
      models/__pycache__/experimental.cpython-38.pyc
  45. BIN
      models/__pycache__/yolo.cpython-38.pyc
  46. +405
    -0
      models/common.py
  47. +134
    -0
      models/experimental.py
  48. +123
    -0
      models/export.py
  49. +58
    -0
      models/hub/anchors.yaml
  50. +51
    -0
      models/hub/yolov3-spp.yaml
  51. +41
    -0
      models/hub/yolov3-tiny.yaml
  52. +51
    -0
      models/hub/yolov3.yaml
  53. +42
    -0
      models/hub/yolov5-fpn.yaml
  54. +54
    -0
      models/hub/yolov5-p2.yaml
  55. +56
    -0
      models/hub/yolov5-p6.yaml
  56. +67
    -0
      models/hub/yolov5-p7.yaml
  57. +48
    -0
      models/hub/yolov5-panet.yaml
  58. +60
    -0
      models/hub/yolov5l6.yaml
  59. +60
    -0
      models/hub/yolov5m6.yaml
  60. +48
    -0
      models/hub/yolov5s-transformer.yaml
  61. +60
    -0
      models/hub/yolov5s6.yaml
  62. +60
    -0
      models/hub/yolov5x6.yaml
  63. +277
    -0
      models/yolo.py
  64. +48
    -0
      models/yolov5l.yaml
  65. +48
    -0
      models/yolov5m.yaml
  66. +48
    -0
      models/yolov5s.yaml
  67. +48
    -0
      models/yolov5x.yaml
  68. +303
    -0
      oss.py
  69. +94
    -0
      producer.py
  70. +307
    -0
      queRiver.py
  71. +1
    -0
      readme.md
  72. +501
    -0
      segutils/GPUtils.py
  73. BIN
      segutils/__pycache__/GPUtils.cpython-38.pyc
  74. BIN
      segutils/__pycache__/segWaterBuilding.cpython-38.pyc
  75. BIN
      segutils/__pycache__/segmodel.cpython-38.pyc
  76. +1
    -0
      segutils/core/__init__.py
  77. BIN
      segutils/core/__pycache__/__init__.cpython-36.pyc
  78. BIN
      segutils/core/__pycache__/__init__.cpython-38.pyc
  79. +0
    -0
      segutils/core/data/__init__.py
  80. BIN
      segutils/core/data/__pycache__/__init__.cpython-36.pyc
  81. BIN
      segutils/core/data/__pycache__/__init__.cpython-38.pyc
  82. +23
    -0
      segutils/core/data/dataloader/__init__.py
  83. BIN
      segutils/core/data/dataloader/__pycache__/__init__.cpython-36.pyc
  84. BIN
      segutils/core/data/dataloader/__pycache__/ade.cpython-36.pyc
  85. BIN
      segutils/core/data/dataloader/__pycache__/cityscapes.cpython-36.pyc
  86. BIN
      segutils/core/data/dataloader/__pycache__/mscoco.cpython-36.pyc
  87. BIN
      segutils/core/data/dataloader/__pycache__/pascal_aug.cpython-36.pyc
  88. BIN
      segutils/core/data/dataloader/__pycache__/pascal_voc.cpython-36.pyc
  89. BIN
      segutils/core/data/dataloader/__pycache__/sbu_shadow.cpython-36.pyc
  90. BIN
      segutils/core/data/dataloader/__pycache__/segbase.cpython-36.pyc
  91. +172
    -0
      segutils/core/data/dataloader/ade.py
  92. +137
    -0
      segutils/core/data/dataloader/cityscapes.py
  93. +90
    -0
      segutils/core/data/dataloader/lip_parsing.py
  94. +136
    -0
      segutils/core/data/dataloader/mscoco.py
  95. +104
    -0
      segutils/core/data/dataloader/pascal_aug.py
  96. +112
    -0
      segutils/core/data/dataloader/pascal_voc.py
  97. +88
    -0
      segutils/core/data/dataloader/sbu_shadow.py
  98. +93
    -0
      segutils/core/data/dataloader/segbase.py
  99. +69
    -0
      segutils/core/data/dataloader/utils.py
  100. +0
    -0
      segutils/core/data/downloader/__init__.py

+ 353
- 0
DSP_Send_tranfer_oss.py View File

@@ -0,0 +1,353 @@
from PIL import Image
import numpy as np
import cv2
import base64
import io,os
import requests
import time,json
import string,random
import glob,string,sys
from multiprocessing import Process,Queue
import oss2,copy
from kafka import KafkaProducer, KafkaConsumer
from utilsK.sendUtils import *

from utilsK.masterUtils import create_logFile,wrtiteLog,writeELK_log,send_kafka
from voduploadsdk.UploadVideoRequest import UploadVideoRequest
from voduploadsdk.AliyunVodUtils import *
from voduploadsdk.AliyunVodUploader import AliyunVodUploader
import hashlib
from kafka.errors import kafka_errors
##for CeKanYuan
#10月21日,通过图像名称判断,是那个平台。方式不好。
#10月22日,改成访问固定的地址,从地址中读取,平台的名称与地址。每隔2分钟访问一次。
#3月18日,采用OSS阿里云存储桶
#platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo'
platform_query_url='SendLog/platformQuery.json'
api = 'http://121.40.249.52:9050/api/taskFile/submitUAVKHQuestion'
#api = 'http://47.98.157.120:9040/api/taskFile/submitUAVKHQuestion'

##这套名字,是联通的。
name_dic={
"排口":"入河、湖排口",
"排污口": "入河、湖排口",
"水生植被": "水生植物",
"漂浮物": "有大面积漂物",
"结束": "结束",
'其它' :'其它'
}
## for TH river
##这套代码是河长制度的。
'''
nameID_dic={
"排口":'00000',
"排污口": '8378',
"水生植被": '8380',
"漂浮物": '8368',
"结束":'9999',
"其它":'8888'
}
'''

msg_dict_off={
"request_id":"fflvgyntTsZCamqjuLArkiSYIbKXEeWx",#消息ID标识
"status":"running",#任务状态
"type":str(2),#消息类型 1:实时 2:离线
#"error":str(9999),#错误信息####
"error_code":"",#//错误编号
"error_msg":"",#//错误描述
"progress":"",
"results":[#问题结果
{
"original_url":"",#原图地址
"sign_url":"",#AI标记地址
"category_id":"",#分类标识
"description":"",#问题描述
"analyse_time":"",#时间戳
}
]
}

msg_dict_on={
"request_id":"nnlvgyntTsZCamqjuLArkiSYIbKXEeWx",#消息ID标识
"status":"running",#任务状态
"type":str(1),#消息类型 1:实时 2:离线
"error_code":"",#//错误编号
"error_msg":"",#//错误描述
"progressOn":"",
"results":[#问题结果
{
"original_url":"",#原视频地址(离线识别时为空不传,实时识别时需要上传)
"sign_url":"",#识别后视频地址
}
]
}


def mintor_offline_ending(parIn):

indir,server,topic,fp_log = parIn['indir'],parIn['server'],parIn['topic'] ,parIn['fp_log']
par_kafka={};par_kafka['server']=server;par_kafka['topic']=topic;
logger = parIn['logger'];thread='Send-tranfer-oss:mintor-offline-ending'
time_interval = parIn['timeInterval']
###轮询image_tmp的文件夹,每10s一次,一旦产生离线结束标志,则不停地发送没30秒heartbeat信号。
producer = KafkaProducer(
bootstrap_servers=par_kafka['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000,
)
outStrList={}
writeELK_log(msg='child processs starts',fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
time_ss0=time.time()
while True:
filelist_AI = sorted(glob.glob('%s/*_AI.txt'%(indir)),key=os.path.getmtime)
filelist = filelist_AI
off_msgs=[]
for filename in filelist[0:]:
filename_base = os.path.basename(filename)
##解析文件名
typename,requestId,onLineType = parse_filename_for_oss(filename_base)
if (onLineType=='off') and (typename=='结束'):
off_msgs.append(requestId)

for requestId in off_msgs:
msg_heart = copy.deepcopy(msg_dict_off)
msg_heart['status']='running'
msg_heart["request_id"]=requestId
msg_heart = json.dumps(msg_heart, ensure_ascii=False)
outStrList['success']= '----- send heartBeat in Transfer success, msg:%s '%(requestId)
outStrList['failure']='----- kafka error when sending heartBeat in Transfer '
outStrList['Refailure']='----- kafka error when Re-sending heartBeat in Transfer '

send_kafka(producer,par_kafka,msg_heart,outStrList,fp_log,line=sys._getframe().f_lineno,logger=logger,thread=thread );
time.sleep(time_interval)
time_ss1=time.time()
if time_ss1 - time_ss0>120:
outstrs = 'child process sleeping:%f s '%(time_ss1-time_ss0)
writeELK_log(msg=outstrs,fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
time_ss0=time_ss1
def test5(par):
indir,outdir,logdir,jsonDir = par['indir'],par['outdir'],par['logdir'],par['jsonDir']
hearBeatTimeMs = par['hearBeatTimeMs']
videoBakDir,ossPar,vodPar,kafkaPar = par['videoBakDir'], par['ossPar'],par['vodPar'],par['kafkaPar']

time0_0 = time.time();logname='SendPics.log';thread='Send-tranfer-oss:main'
fp_log=create_logFile(logdir=logdir,name=logname)
logger=logdir.replace('/','.')+'.'+logname
writeELK_log(msg='Send_tranfer_oss process starts',fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
nameID_dic=getNamedic(par['labelnamesFile'].strip())
parIn={};####给心跳信号发射的子进程参数
parIn['indir'],parIn['server'],parIn['topic'] ,parIn['fp_log']=indir,kafkaPar['boostServer'],kafkaPar['topic'],fp_log
parIn['timeInterval'] = hearBeatTimeMs;parIn['logger']=logger
HeartProcess=Process(target=mintor_offline_ending,name='process-sendHeartOnly',args=(parIn,))
HeartProcess.start()

ifind=0
time0_0 = time.time()

producer = KafkaProducer(
bootstrap_servers=kafkaPar['boostServer'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000,
)
###登陆准备存储桶
auth = oss2.Auth(ossPar['AId'], ossPar['ASt'])
# Endpoint以杭州为例,其它Region请按实际情况填写。
bucket = oss2.Bucket(auth, ossPar['Epoint'], ossPar['bucketName'])
##VOD
clt = init_vod_client(vodPar['AId'], vodPar['ASt'])
uploader = AliyunVodUploader(vodPar['AId'], vodPar['ASt'])
writeELK_log(msg='Load Parameter over',fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
par_heart={};outStrList={}
time_b0=time.time()
while True:
filelist_AI = sorted(glob.glob('%s/*_AI.txt'%(indir)),key=os.path.getmtime)
filelist=[]
for filename in filelist_AI:
filename_base = os.path.basename(filename)
typename,requestId,onLineType = parse_filename_for_oss(filename_base)
if typename in ["其它"]:
continue
filelist.append(filename)
if len(filelist)!=0:
time0 = time.time()
for filename in filelist[0:]:
filename_base = os.path.basename(filename)
##解析文件名
typename,requestId,onLineType = parse_filename_for_oss(filename_base)
##存储文件
filename_OR=filename.replace('_AI.','_OR.')
filename_AI_image = filename.replace('.txt','.jpg')
filename_OR_image = filename_OR.replace('.txt','.jpg')
taskInfos = lodaMsgInfos(jsonDir,requestId)
oss_dir = taskInfos['results_base_dir']
#if typename in ["排口","其它"]:
# continue
if typename not in ['结束','超时结束']:
time_s1 = time.time()
ObjectName_AI=os.path.join(oss_dir,os.path.basename(filename_AI_image))
ObjectName_OR=os.path.join(oss_dir,os.path.basename(filename_OR_image))
bucket.put_object_from_file(ObjectName_AI, filename_AI_image)
ret2=bucket.put_object_from_file(ObjectName_OR, filename_OR_image)

outstr=' oss bucket upload %s %s %s '%('***'*3,'Send:',filename)
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
msg = copy.deepcopy(msg_dict_off)
if onLineType!='off': msg['type']=str(1)
else: msg['type']=str(2)
msg['results'][0]['original_url']= ObjectName_OR
msg['results'][0]['sign_url']= ObjectName_AI
msg['results'][0]['category_id']= nameID_dic[typename]
msg['results'][0]['description']= typename
msg['results'][0]['analyse_time']= time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
msg = update_json(taskInfos,msg)
time_s2 = time.time()
else:
time_s1 = time.time()
if onLineType!='off':
msg = copy.deepcopy(msg_dict_on)
msg["request_id"]=requestId ;msg['type']=str(1)
msg['results'][0]['original_url']= "yourAddess"
msg['results'][0]['sign_url']= "yourAddess"###最新的视频文件
upCnt=1;upLoaded=False
while upCnt<4:
try:
videoUrl=os.path.join(videoBakDir,requestId+'_AI.MP4')
uploadVideoRequest = UploadVideoRequest(videoUrl, 'offLineVideo')
videoId = uploader.uploadLocalVideo(uploadVideoRequest)
VideoId_AI=str(videoId['VideoId'])
videoUrl=os.path.join(videoBakDir,requestId+'_OR.MP4')
uploadVideoRequest = UploadVideoRequest(videoUrl, 'offLineVideo')
videoId = uploader.uploadLocalVideo(uploadVideoRequest)
VideoId_OR=str(videoId['VideoId'])
outstr=VideoId_OR+','+VideoId_AI
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
msg['results'][0]['sign_url']=VideoId_AI
msg['results'][0]['original_url']=VideoId_OR
upCnt=4;upLoaded=True
except Exception as e:
writeELK_log(msg='video uploading error:%s, times:%d'%(e,upCnt),fp=fp_log,thread=thread,level='WARNING',line=sys._getframe().f_lineno,logger=logger);
upCnt+=1;upLoaded=False
if not upLoaded:
msg['error_msg']='video uploading failure' ; msg['error_code']='101' ;
else:
msg = copy.deepcopy(msg_dict_off)
msg['type']=str(2)
msg["request_id"]=requestId
msg['results'][0]['original_url']= taskInfos['original_url']
videoUrl=os.path.join(videoBakDir,requestId+'.MP4')
upCnt=1;upLoaded=False
while upCnt<4:
try:
uploadVideoRequest = UploadVideoRequest(videoUrl, 'offLineVideo')
videoId = uploader.uploadLocalVideo(uploadVideoRequest)
outstr=' oss upload video over %s '%(str(videoId['VideoId']))
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
msg['results'][0]['sign_url']= str(videoId['VideoId'])###最新的视频文件
upCnt=4;upLoaded=True
except Exception as e:
writeELK_log(msg='video uploading error:%s, times:%d'%(e,upCnt),fp=fp_log,thread=thread,level='WARNING',line=sys._getframe().f_lineno,logger=logger);
upCnt+=1;upLoaded=False
if not upLoaded:
msg['error_msg']='video uploading failure' ; msg['error_code']='101' ;
if upLoaded:
if typename=='结束': msg["status"]="success"
else: msg["status"]="timeout"
else:
msg["status"]='failed'
time_s2 = time.time()
msg = json.dumps(msg, ensure_ascii=False)
future = producer.send(
kafkaPar['topic'],
msg
)
try:
record_metadata = future.get()
outstr='kafka send:%s msg:%s producer status:%s'%(onLineType,msg,producer.bootstrap_connected())
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
except Exception as e:
outstr='kafka ERROR:%s'%(str(e))
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='WARNING',line=sys._getframe().f_lineno,logger=logger)
producer.close()
producer = KafkaProducer(
bootstrap_servers=kafkaPar['boostServer'],#tencent yun
value_serializer=lambda v: v.encode('utf-8')
)
try:
future = producer.send(kafkaPar['topic'], msg).get()
except Exception as e:
outstr='kafka resend ERROR:%s'%(str(e))
#poutstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,level='ERROR',line=sys._getframe().f_lineno,logger=logger)
time_s3 = time.time()
##上传后的图片,移走到另外一个文件夹###
cmd = 'mv \'%s\' \'%s\' '%(filename,outdir); os.system(cmd)
cmd = 'mv \'%s\' \'%s\' '%(filename_OR,outdir); os.system(cmd)
time_s4 = time.time()
print('-'*50)
else:
time.sleep(1)
time_b1=time.time()
if time_b1-time_b0>120:
writeELK_log(msg='send main process sleeping',fp=fp_log,thread=thread,level='INFO',line=sys._getframe().f_lineno,logger=logger)
time_b0=time_b1
fp_log.close()
if __name__=='__main__':

masterFile="conf/send_oss.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
par=json.load(fp)

test5(par)

+ 595
- 0
DSP_master.py View File

@@ -0,0 +1,595 @@
import numpy as np
import time,ast,copy
#from flask import request, Flask,jsonify
import base64,cv2,os,sys,json
#sys.path.extend(['../yolov5'])
#from Send_tranfer import b64encode_function,JsonSend,name_dic,nameID_dic,getLogFileFp
from segutils.segmodel import SegModel,get_largest_contours
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.torch_utils import select_device, load_classifier, time_synchronized
from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
import subprocess as sp
import matplotlib.pyplot as plt
import torch,random,string
import multiprocessing
from multiprocessing import Process,Queue
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors
#torch.multiprocessing.set_start_method('spawn')
import utilsK
from utilsK.GPUtils import *
from utilsK.masterUtils import *
from utilsK.sendUtils import create_status_msg,update_json
#from utilsK.modelEval import onlineModelProcsss
import random,string
from DSP_Send_tranfer_oss import msg_dict_on,msg_dict_off
process_id=0
def onlineModelProcess(parIn ):
DEBUG=False
streamName = parIn['streamName']
childCallback=parIn['callback']
outStrList={}
object_config=parIn['object_config']
allowedList,allowedList_string=get_needed_objectsIndex(object_config)
#try:
for wan in ['test']:
jsonfile=parIn['modelJson']
with open(jsonfile,'r') as fp:
parAll = json.load(fp)
Detweights=parAll['gpu_process']['det_weights']
seg_nclass = parAll['gpu_process']['seg_nclass']
Segweights = parAll['gpu_process']['seg_weights']
StreamRecoveringTime=int(parAll['StreamRecoveringTime'])
TaskStatusQueryUrl=parAll["TaskStatusQueryUrl"]
videoSave = parAll['AI_video_save']
imageTxtFile = parAll['imageTxtFile']
taskId,msgId = streamName.split('-')[1:3]
inSource,outSource=parIn['inSource'],parIn['outSource']
##构建日志文件
if outSource != 'NO':
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
logdir = parAll['logChildProcessOffline']
waitingTime=5
logname='gpuprocess.log'
fp_log=create_logFile(logdir=logdir,name=logname)
logger=logdir.replace('/','.')+'.'+logname
kafka_par=parIn['kafka_par']
producer = KafkaProducer(bootstrap_servers=kafka_par['server'],value_serializer=lambda v: v.encode('utf-8'),metadata_max_age_ms=120000)
####要先检查视频的有效性
###开始的时候,如果在线任务没有流,要发送的心跳消息,msg_h,
msg_h= copy.deepcopy(msg_dict_off);
msg_h['status']='waiting';msg_h['request_id']=msgId
thread='master:gpuprocess-%s'%(msgId)
if outSource == 'NO':
msg_h['type']=2
Stream_ok,_= get_fps_rtmp(inSource,video=True)
else:
msg_h['type']=1
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList=get_infos(taskId, msgId,msg_h_d,key_str='waiting stream or video, send heartbeat')
Stream_ok=check_stream(inSource,producer,kafka_par,msg_h_d,outStrList,fp_log,logger,line=sys._getframe().f_lineno,thread=thread ,timeMs=waitingTime)
if Stream_ok:###发送开始信号
msg_h['status']='running'
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList= get_infos(taskId, msgId,msg_h_d,key_str='informing stream/video is ok')
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log,line=sys._getframe().f_lineno,logger=logger,thread=thread );
else:
####检测离线视频是否有效,无效要报错
outstr='offline vedio or live stream Error:%s '%(inSource)
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,level='ERROR',line=sys._getframe().f_lineno,logger=logger)
msg_h['error_msg']='Stream or video ERROR';msg_h['error_code']='102' ;msg_h['status']='failed';
msg_h_d = json.dumps(msg_h, ensure_ascii=False);
outStrList= get_infos(taskId, msgId,msg_h_d,key_str='informing invaid video or stream success')
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log ,line=sys._getframe().f_lineno,logger=logger,thread=thread );
childCallback.send(' offline vedio or live stream Error')
continue
allowedList_string='allow index are:'+ allowedList_string
writeELK_log(msg=allowedList_string,fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')):
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=True)[1][0:4];
else:
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=False)[1][0:4]
fps = int(fps+0.5)
if fps>30: fps=25 ###线下测试时候,有时候读帧率是9000,明显不符合实际,所以加这个判断。
if outSource != 'NO':
command=['/usr/bin/ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24',
'-s', "{}x{}".format(outW,outH),# 图片分辨率
'-r', str(fps),# 视频帧率
'-i', '-','-c:v',
'libx264',
'-pix_fmt', 'yuv420p',
'-f', 'flv',outSource
]
video_flag = videoSave['onLine']
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
video_flag = videoSave['offLine'] ;logdir = parAll['logChildProcessOffline']
waitingTime=5
device = select_device(parIn['device'])
half = device.type != 'cpu' # half precision only supported on CUDA
model = attempt_load(Detweights, map_location=device) # load FP32 model
if half: model.half()
segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)
##后处理参数
par=parAll['post_process']
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
outImaDir = par['outImaDir']
outVideoDir = par['outVideoDir']
labelnames=par['labelnames']
rainbows=par['rainbows']
fpsample = par['fpsample']
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
#dataset = LoadStreams(inSource, img_size=640, stride=32)
childCallback.send('####model load success####')
print('#####line153:',outVideoDir,video_flag)
if (outVideoDir!='NO') : ####2022.06.27新增在线任务也要传AI视频和原始视频
if video_flag:
request_id = streamName.split('-')[2]
save_path = os.path.join(outVideoDir,request_id+'.MP4')
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
if vid_writer.isOpened(): outstr='touch video success:%s'%(save_path);level='INFO'
else:outstr='touch video failed:%s'%(save_path);level='ERROR'
writeELK_log(msg=outstr,fp=fp_log,level=level,line=sys._getframe().f_lineno,logger=logger)
else:
request_id = streamName.split('-')[2]
save_path_OR = os.path.join(outVideoDir,request_id+'_OR.MP4')
vid_writer_OR = cv2.VideoWriter(save_path_OR, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
save_path_AI = os.path.join(outVideoDir,request_id+'_AI.MP4')
vid_writer_AI = cv2.VideoWriter(save_path_AI, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
if vid_writer_AI.isOpened() and vid_writer_OR.isOpened() :outstr='touch video success:%s,%s'%(save_path_OR,save_path_AI);level='INFO'
else:outstr='touch video failed:%s,%s, fps:%d ,%d , %d'%(save_path_OR,save_path_AI,fps,outW,outH);level='ERROR'
writeELK_log(msg=outstr,fp=fp_log,level=level,line=sys._getframe().f_lineno,logger=logger)
iframe = 0;post_results=[];time_beg=time.time()
t00=time.time()
time_kafka0=time.time()
Pushed_Flag=False
while True:
try:
dataset = LoadStreams(inSource, img_size=640, stride=32)
# 管道配置,其中用到管道
if outSource !='NO' and (not Pushed_Flag):
ppipe = sp.Popen(command, stdin=sp.PIPE);Pushed_Flag = True
for path, img, im0s, vid_cap in dataset:
t0= time_synchronized()
if outSource == 'NO':###如果不推流,则显示进度条。离线不推流
view_bar(iframe,totalcnt,time_beg ,parIn['process_uid'] )
streamCheckCnt=0
###直播和离线都是1分钟发一次消息
time_kafka1 = time.time()
if time_kafka1 - time_kafka0 >60:
time_kafka0 = time_kafka1
###发送状态信息waiting
msg = copy.deepcopy(msg_dict_off);
msg['request_id']= msgId;
if outSource == 'NO':
msg['progress']= '%.4f'%(iframe*1.0/totalcnt)
msg['type']=2
else:
msg['progressOn']= str(iframe)
msg['type']=1
msg = json.dumps(msg, ensure_ascii=False)
outStrList= get_infos(taskId, msgId,msg,key_str='processing send progressbar or online heartbeat')
send_kafka(producer,kafka_par,msg,outStrList,fp_log,line=sys._getframe().f_lineno,logger=logger,thread=thread );
time0=time.time()
iframe +=1
time1=time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
timeseg0 = time.time()
seg_pred,segstr = segmodel.eval(im0s[0] )
timeseg1 = time.time()
t1= time_synchronized()
pred = model(img,augment=False)[0]
time4 = time.time()
datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
# "labelnames":["排口","排口","水生植被","漂浮物","其它"]
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,object_config=allowedList)
t2= time_synchronized()
#print('###line138:',timeOut,outSource,outVideoDir)
##每隔 fpsample帧处理一次,如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
parImage=save_problem_images(post_results,iframe,names,streamName=streamName,outImaDir='problems/images_tmp',imageTxtFile=imageTxtFile)
post_results=[]
if len(p_result[2] )>0: ##
post_results.append(p_result)
t3= time_synchronized()
image_array = p_result[1]
if outSource!='NO':
ppipe.stdin.write(image_array.tobytes())
if (outVideoDir!='NO'):
if video_flag: ret = vid_writer.write(image_array)
else:
time_w0=time.time()
ret = vid_writer_AI.write(image_array)
ret = vid_writer_OR.write(im0s[0])
time_w1=time.time()
#if not ret:
# print('\n write two videos time:%f ms'%(time_w1-time_w0)*1000,ret)
t4= time_synchronized()
timestr2 = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if iframe%100==0:
outstr='%s,,read:%.1f ms,copy:%.1f, infer:%.1f ms, detinfer:%.1f ms,draw:%.1f ms, save:%.1f ms total:%.1f ms \n'%(timestr2,(t0 - t00)*1000,(timeseg0-t0)*1000, (t1 - timeseg0)*1000,(t2-t1)*1000, (t3 - t2)*1000,(t4-t3)*1000, (t4-t00)*1000)
#wrtiteLog(fp_log,outstr);
writeELK_log(msg=outstr,fp=fp_log,line=sys._getframe().f_lineno,logger=logger,printFlag=False)
#print(outstr)
t00 = t4;
except Exception as e:
#if outSource:###推流才有如下
streamCheckCnt+=1;taskEnd=False
if streamCheckCnt==1:timeBreak0=time.time();time_kafka0 = time.time()
timeBreak1=time.time();
if timeBreak1-timeBreak0 >5 and Pushed_Flag:###流断开5秒后,要关闭推流
ppipe.kill();Pushed_Flag=False
writeELK_log(msg='stream pip is killed ',fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
###读接口,看看任务有没有结束
query_url='%s/%s/status'%(TaskStatusQueryUrl,msgId)
requestInfos,taskEnd=query_request_status(query_url)
#requestInfos,taskEnd='this line 274 test',False #############
####taskEnd######################DEBUG
#taskEnd=False
if timeBreak1-timeBreak0 >StreamRecoveringTime : ##默认30分钟内,流没有恢复的话,就断开。
taskEnd=True
outstr_channel='%s ,taskEnd:%s'%(requestInfos,taskEnd)
writeELK_log(msg=outstr_channel,fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
if outSource == 'NO':#离线没有推流
taskEnd=True
if taskEnd:
if timeBreak1-timeBreak0 > 60:###超时结束
writeTxtEndFlag(outImaDir,streamName,imageTxtFile,endFlag='超时结束')
else:
writeTxtEndFlag(outImaDir,streamName,imageTxtFile,endFlag='结束')
if (outVideoDir!='NO'):
if video_flag:vid_writer.release()
else:
vid_writer_OR.release();
vid_writer_AI.release();
outstr='Task ends:%.1f , msgid:%s,taskID:%s '%(timeBreak1-timeBreak0,taskId,msgId)
writeELK_log(msg=outstr,fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
break
##执行到这里的一定是在线任务,在等待流的过程中要发送waiting
time_kafka1 = time.time()
if time_kafka1-time_kafka0>60:
msg_res = copy.deepcopy(msg_dict_off);
msg_res['request_id']= msgId; msg_res['type']=1
msg_res = json.dumps(msg_res, ensure_ascii=False)
outStrList= get_infos(taskId, msgId,msg_res,key_str='Waiting stream restoring heartbeat')
send_kafka(producer,kafka_par,msg_res,outStrList,fp_log,line=sys._getframe().f_lineno,logger=logger,thread=thread );
outstr='Waiting stream recovering:%.1f s'%(timeBreak1-timeBreak0)
writeELK_log(msg=outstr,fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
writeELK_log(msg=outstr_channel,fp=fp_log,line=sys._getframe().f_lineno,logger=logger)
time_kafka0 = time_kafka1
#break###断流或者到终点
time.sleep(5)
print('Waiting stream for ',e)
def lauch_process(gpuid,inSource,outSource,taskId,msgId,modelJson,kafka_par,object_config=[ { 'id':"0",'config':{}}, { 'id':"1",'config':{}} ]):
if outSource=='NO':
streamName='off-%s-%s'%(taskId,msgId)
else:
streamName='live-%s-%s'%(taskId,msgId)
dataPar ={
'imgData':'',
'imgName':'testW',
'streamName':streamName,
'taskId':taskId,
'msgId':msgId,
'object_config':object_config,
'device':str(gpuid),
'modelJson':modelJson,
'kafka_par':kafka_par,
}
#dataPar['inSource'] = 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4';dataPar['outSource'] = None
dataPar['inSource'] = inSource;dataPar['outSource'] = outSource
process_uid=''.join(random.sample(string.ascii_letters + string.digits, 16));dataPar['process_uid']=process_uid
parent_conn, child_conn = multiprocessing.Pipe();dataPar['callback']=child_conn
gpuProcess=Process(target=onlineModelProcess,name='process:%s'%( process_uid ),args=(dataPar,))
gpuProcess.start()
gpuProcess.join()
#print(dir(gpuProcess))
#child_return = parent_conn.recv()
#timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
#print(timestr2,'-'*20,'progress:%s ,msgId:%s , taskId:%s return:'%(process_uid,msgId,taskId),child_return)
return gpuProcess
msg_dict_offline = {
"biz_id":"hehuzhang",
"mod_id":"ai",
"request_id":'bb'+''.join(random.sample(string.ascii_letters ,30) ) ,
"offering_id":"http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4",
"offering_type":"mp4",
"results_base_dir": "XJRW202203171535"+str(random.randint(10,99)),
'outSource':'NO'
}
taskStatus={}
taskStatus['onLine'] = Queue(100)
taskStatus['offLine']= Queue(100)
taskStatus['pidInfos']= {}
def get_msg_from_kafka(par):
thread='master:readingKafka'
outStrList={}
fp_log = par['fp_log']
logger=par['logger']
consumer = KafkaConsumer(bootstrap_servers=par['server'],client_id='AI_server',group_id=par['group_id'],auto_offset_reset='latest')
consumer.subscribe( par['topic'][0:2])
outstr='reading kafka process starts'
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
for ii,msg in enumerate(consumer):
##读取消息
try:
taskInfos = eval(msg.value.decode('utf-8') )
except:
outstr='%s msg format error,value:%s,offset:%d partition:%s topic:%s'%('#'*20,msg.value,msg.offset,msg.topic,msg.topic)
continue
if msg.topic == par['topic'][0]: ##
taskInfos['inSource']= taskInfos['pull_url'];
taskInfos['outSource']= taskInfos['push_url'] ;
taskInfos['object_config']= taskInfos['models']
taskStatus['onLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)
###发送状态信息waiting
msg = create_status_msg(msg_dict_on,taskInfos,sts='waiting')
outStrList=get_infos(taskInfos['results_base_dir'], taskInfos['request_id'],msg,key_str='read msgs from kafka online task and response to kafka')
send_kafka(producer,kafka_par,msg,outStrList,fp_log,line=sys._getframe().f_lineno,logger=logger,thread=thread);
else:
try:
taskInfos['inSource']= taskInfos['original_url'];
taskInfos['outSource']= 'NO'
taskInfos['object_config']= taskInfos['models']
taskStatus['offLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)
###发送状态信息waiting
msg = create_status_msg(msg_dict_off,taskInfos,sts='waiting')
outStrList=get_infos(taskInfos['results_base_dir'], taskInfos['request_id'],msg,key_str='read msgs from kafka offline task and response to kafka')
send_kafka(producer,kafka_par,msg,outStrList,fp_log ,line=sys._getframe().f_lineno,logger=logger,thread=thread );
except Exception as e:
print('######msg Error######',msg,e)
def detector(par):
####初始化信息列表
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
time_interval=par['logPrintInterval']
logname='detector.log';thread='master:detector'
fp_log=create_logFile(logdir=par['logDir'],name=logname)
##准备日志函数所需参数
logger=par['logDir'].replace('/','.')+'.'+logname
#wrtiteLog(fp_log,'########### detector process starts ######\n');
outstr='detector process starts';sys._getframe().f_lineno
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
###开启kafka consumer 进程##
parIn=copy.deepcopy(par);parIn['fp_log']=fp_log ;parIn['logger']=logger
HeartProcess=Process(target=get_msg_from_kafka,name='process-consumer-kafka',args=(parIn,))
HeartProcess.start()
timeSleep=1
time0=time.time()
time0_kafQuery=time.time()
time0_taskQuery=time.time()
time0_sleep=time.time()
outStrList={}
while True:###每隔timeSleep秒,轮询一次
time0_taskQuery,printFlag = check_time_interval(time0_taskQuery,time_interval)
outstr_task= ' task queue onLine cnt:%d offLine:%d'%(taskStatus['onLine'].qsize(), taskStatus['offLine'].qsize())
if (taskStatus['onLine'].qsize()>0) or (taskStatus['offLine'].qsize()>0):
#outstr_task=wrtiteLog(fp_log,outstr_task);print( outstr_task);
writeELK_log(msg=outstr_task,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
##2-更新显卡信息
gpuStatus = getGPUInfos()
##3-优先考虑在线任务
if not taskStatus['onLine'].empty():
###3.1-先判断有没有空闲显卡:
cuda = get_available_gpu(gpuStatus)
###获取在线任务信息,并执行,lauch process
taskInfos = taskStatus['onLine'].get()
outstr='start to process onLine taskId:%s msgId:%s'%( taskInfos['results_base_dir'],taskInfos['request_id'] )
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
if cuda: ###3.1.1 -有空余显卡
#lauch process
msg= copy.deepcopy(msg_dict_on);
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['request_id'],par['modelJson'],kafka_par,taskInfos['object_config'])
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
else:###3.1.2-没有显卡
##判断有没有显卡上面都是离线进程的
cuda_pid = get_potential_gpu(gpuStatus,taskStatus['pidInfos'])
if cuda_pid:#3.1.2.1 - ##如果有可以杀死的进程
cuda = cuda_pid['cuda']
pids = cuda_pid['pids']
##kill 离线进程,并更新离线任务表
cnt_off_0 = taskStatus['offLine'].qsize()
for pid in pids:
##kill 离线进程
taskStatus['pidInfos'][pid]['gpuProcess'].kill()
##更新离线任务表
taskStatus['offLine'].put( taskStatus['pidInfos'][pid]['taskInfos'] )
taskInfos_off=taskStatus['pidInfos'][pid]['taskInfos']
##发送离线数据,说明状态变成waiting
msg= msg_dict_off;
msg=update_json(taskInfos_off,msg,offkeys=["request_id","biz_id" ,"mod_id"] )
msg['results'][0]['original_url']=taskInfos_off['inSource']
msg['results'][0]['sign_url']=get_boradcast_address(taskInfos_off['outSource'])
msg['status']='waiting'
msg = json.dumps(msg, ensure_ascii=False)
outStrList=get_infos(taskInfos_off['results_base_dir'], taskInfos_off['request_id'],msg,key_str='start online task after kill offline tasks')
send_kafka(producer,kafka_par,msg,outStrList,fp_log ,line=sys._getframe().f_lineno,logger=logger,thread=thread );
cnt_off_1 = taskStatus['offLine'].qsize()
outstr='before killing process, offtask cnt:%d ,after killing, offtask cnt:%d '%(cnt_off_0,cnt_off_1)
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['request_id'],par['modelJson'],kafka_par,taskInfos['object_config'])
###更新pidinfos,update pidInfos
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
else:
outstr='No available GPUs for onLine task'
#outstr=wrtiteLog(fp_log,outstr);print(outstr);
writeELK_log(msg=outstr,fp=fp_log,level='ERROR',thread=thread,line=sys._getframe().f_lineno,logger=logger)
##4-更新显卡信息
gpuStatus = getGPUInfos()
##5-考虑离线任务
if not taskStatus['offLine'].empty():
cudaArrange= arrange_offlineProcess(gpuStatus,taskStatus['pidInfos'],modelMemory=1500)
outstr='IN OFF LINE TASKS available cudas:%s'%(cudaArrange)
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
for cuda in cudaArrange:
if not taskStatus['offLine'].empty():
taskInfos = taskStatus['offLine'].get()
outstr='start to process offLine taskId:%s msgId:%s'%( taskInfos['results_base_dir'],taskInfos['request_id'] )
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['request_id'],par['modelJson'],kafka_par,taskInfos['object_config'])
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'offLine','taskInfos':taskInfos}
if get_whether_gpuProcess():
time0_sleep,printFlag = check_time_interval(time0_sleep,time_interval)
if printFlag:
outstr= '*'*20 +'sleep '+'*'*20;
#outstr=wrtiteLog(fp_log,outstr);print( outstr);
writeELK_log(msg=outstr,fp=fp_log,thread=thread,line=sys._getframe().f_lineno,logger=logger)
time.sleep(timeSleep)
####检查gpu子进程是否结束,如果结束要join(),否则会产生僵尸进程###
#taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
for key in list(taskStatus['pidInfos'].keys()):
if not taskStatus['pidInfos'][key]['gpuProcess'].is_alive():
taskStatus['pidInfos'][key]['gpuProcess'].join()
taskStatus['pidInfos'].pop(key)
print('########Program End#####')
if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线
#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';
#101.132.127.1:19092
'''
par['server']='101.132.127.1:19092 ';par['topic']=('alg-online-tasks','alg-offline-tasks','alg-task-results');par['group_id']='test';
par['kafka']='mintors/kafka'
par['modelJson']='conf/model.json'
'''
masterFile="conf/master.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
data=json.load(fp)
par=data['par']
print(par)
detector(par)

+ 77
- 0
GetUploadDetails.py View File

@@ -0,0 +1,77 @@
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
import sys

from typing import List

from alibabacloud_vod20170321.client import Client as vod20170321Client
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_vod20170321 import models as vod_20170321_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_tea_util.client import Client as UtilClient


class Sample:
def __init__(self):
pass

@staticmethod
def create_client(
access_key_id: str,
access_key_secret: str,
) -> vod20170321Client:
"""
使用AK&SK初始化账号Client
@param access_key_id:
@param access_key_secret:
@return: Client
@throws Exception
"""
config = open_api_models.Config(
# 您的 AccessKey ID,
access_key_id=access_key_id,
# 您的 AccessKey Secret,
access_key_secret=access_key_secret
)
# 访问的域名
config.endpoint = f'vod.cn-shanghai.aliyuncs.com'
return vod20170321Client(config)

@staticmethod
def main(
args: List[str],
) -> None:
client = Sample.create_client('LTAI5tSJ62TLMUb4SZuf285A', 'MWYynm30filZ7x0HqSHlU3pdLVNeI7')
get_upload_details_request = vod_20170321_models.GetUploadDetailsRequest(
media_ids='6d040a5749f74b80afac476563250d6d',
media_type='video'
)
runtime = util_models.RuntimeOptions()
try:
# 复制代码运行请自行打印 API 的返回值
ret=client.get_upload_details_with_options(get_upload_details_request, runtime)
print(ret)
except Exception as error:
# 如有需要,请打印 error
UtilClient.assert_as_string(error.message)

@staticmethod
async def main_async(
args: List[str],
) -> None:
client = Sample.create_client('accessKeyId', 'accessKeySecret')
get_upload_details_request = vod_20170321_models.GetUploadDetailsRequest(
media_ids='6d040a5749f74b80afac476563250d6d',
media_type='video'
)
runtime = util_models.RuntimeOptions()
try:
# 复制代码运行请自行打印 API 的返回值
await client.get_upload_details_with_options_async(get_upload_details_request, runtime)
except Exception as error:
# 如有需要,请打印 error
UtilClient.assert_as_string(error.message)


if __name__ == '__main__':
Sample.main(sys.argv[1:])

BIN
__pycache__/DSP_Send_tranfer_oss.cpython-38.pyc View File


BIN
__pycache__/Send_tranfer.cpython-38.pyc View File


BIN
__pycache__/Send_tranfer_oss.cpython-38.pyc View File


BIN
__pycache__/queRiver.cpython-38.pyc View File


BIN
__pycache__/subprocess.cpython-38.pyc View File


+ 119
- 0
code_bak/Send_debug.py View File

@@ -0,0 +1,119 @@

import numpy as np

import base64
import io,os
import requests
import time,json
import string,random
import glob,string,sys

import oss2,copy
from kafka import KafkaProducer, KafkaConsumer



from kafka.errors import kafka_errors

msg_dict_off={
"msg_id":"bblvgyntTsZCamqjuLArkiSYIbKXEeWx",#消息ID标识
"biz_id":"hehuzhang",#业务标识
"mod_id":"ai",#模型标识
"status":"running",#任务状态
"type":str(1),#数据类型:1图片 2视频
"error":str(9999),#错误信息
"results":[#问题结果
{
"original_url":"",#原图地址
"sign_url":"",#AI标记地址
"category_id":"",#分类标识
"description":"",#问题描述
"time":"",#时间戳
}
]
}

def test5(par):
indir,outdir,logdir,jsonDir = par['indir'],par['outdir'],par['logdir'],par['jsonDir']
videoBakDir,ossPar,vodPar,kafkaPar = par['videoBakDir'], par['ossPar'],par['vodPar'],par['kafkaPar']

'''producer = KafkaProducer(
bootstrap_servers=kafkaPar['boostServer'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
#request_timeout_ms=3,
#connections_max_idle_ms=60000
)'''
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode())

#while True:
for i in range(5):
for j in range(10):
msg = copy.deepcopy(msg_dict_off)
biz_id = 'i-%d-j-%d'%(i,j)
print(biz_id,msg['biz_id'])
msg['biz_id'] = biz_id
msg = json.dumps(msg, ensure_ascii=False)
'''future = producer.send(
kafkaPar['topic'],
msg
)'''
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=biz_id) # 向分区1发送消息
try:
record_metadata = future.get(timeout=30)
print('-'*10,biz_id,' send')
except Exception as e:
print('#'*10,biz_id,' error:',str(e))
for ii in range(30):
time.sleep(10)
print('sleep %d s'%(ii*10))
for j in range(10,20):
msg = copy.deepcopy(msg_dict_off)
biz_id = 'i-%d-j%d'%(i,j)
msg['biz_id'] = biz_id
msg = json.dumps(msg, ensure_ascii=False)
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=biz_id) # 向分区1发送消息
try:
record_metadata = future.get(timeout=30)
print('-'*10,biz_id,' send')
except Exception as e:
print('#'*10,biz_id,' error:',str(e))
if __name__=='__main__':

masterFile="conf/send_oss_debug.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
par=json.load(fp)

test5(par)

+ 117
- 0
code_bak/Send_debugF.py View File

@@ -0,0 +1,117 @@
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
import traceback
import json
import time

def producer_demo():
# 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode(),
)
# 发送三条消息
for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except Exception as e: # 发送失败抛出kafka_errors
print(e)
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode(),
)
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
try:
future.get() # 监控是否发送成功
print("re send {}".format(str(i)))
except Exception as e:
print('resend error:',e)

for ii in range(30):
time.sleep(10)
print('sleep %d s'%(ii*10))

for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get() # 监控是否发送成功
except Exception as e: # 发送失败抛出kafka_errors
print(e)
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode(),
)
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
try:
future.get() # 监控是否发送成功
print("re send {}".format(str(i)))
except Exception as e:
print('resend error:',e)

def prod():
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
metadata_max_age_ms=120000,
value_serializer=lambda v: json.dumps(v).encode()
)
# 发送三条消息
for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get() # 监控是否发送成功
except kafka_errors: # 发送失败抛出kafka_errors
traceback.format_exc()

for ii in range(30):
time.sleep(10)
print('sleep %d s'%(ii*10))

for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get() # 监控是否发送成功
except Exception as e:
print('resend error:',e)
if __name__=='__main__':
print('########demo1 2nd############')
prod()
print('########demo2 1st############')
producer_demo()

+ 77
- 0
code_bak/Send_debugF2Wrong.py View File

@@ -0,0 +1,77 @@
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
import traceback
import json
import time

def producer_demo():
# 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode())
# 发送三条消息
for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except kafka_errors: # 发送失败抛出kafka_errors
traceback.format_exc()
for ii in range(30):
time.sleep(10)
print('sleep %d s'%(ii*10))
for i in range(10, 20):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except : # 发送失败抛出kafka_errors
traceback.format_exc()
def prod():
producer = KafkaProducer(
bootstrap_servers=['101.132.127.1:19092'],
key_serializer=lambda k: json.dumps(k).encode(),
value_serializer=lambda v: json.dumps(v).encode())
# 发送三条消息
for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except kafka_errors: # 发送失败抛出kafka_errors
traceback.format_exc()

for ii in range(30):
time.sleep(10)
print('sleep %d s'%(ii*10))

for i in range(0, 3):
future = producer.send(
'alg-task-results',
key='count_num', # 同一个key值,会被送至同一个分区
value=str(i)) # 向分区1发送消息
print("send {}".format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except kafka_errors: # 发送失败抛出kafka_errors
traceback.format_exc()
if __name__=='__main__':
print('########demo2 1st############')
prod()
print('########demo1 2nd############')
prod()

+ 192
- 0
code_bak/client.py View File

@@ -0,0 +1,192 @@
from PIL import Image
import numpy as np
import cv2
import base64
import io,os
import requests
import time

def test():
image_path='test/P0017.png'
image_array=cv2.imread(image_path)
print(image_array.shape)

image_encode=base64.b64encode(image_array).decode('utf-8')
image_bytes=bytes(image_encode,encoding='utf-8')
image_decode=np.frombuffer(base64.decodebytes(image_bytes),dtype=np.uint8)
print(image_decode.shape)
request_url='http://192.168.109.49:5000/'
headers={'content-type':'application/json'}
data={'image':image_encode}
#response=requests.post(request_url,data=data,headers=headers)
response=requests.post(request_url,"POST",files=data)
print(response)
#image=open(image_path,'rb').read()
#image=Image.open(io.BytesIO(image))
#image=image.resize((224,224))
#image=np.asarray(image
def test2():
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
api = 'http://192.168.16.45:8000/detector'
img = cv2.imencode('.jpg', image_ori)[-1]
#image_encode=base64.b64encode(image_ori).decode('utf-8')
#img=bytes(image_encode,encoding='utf-8')
#h,w,c=image_ori.shape
files = {'file': img}
files = {'file': img,'name':'P0017'
}
res = requests.request("POST", api, files=files).json()

if res['msg'] == 'success':
bboxes = res['data']['bboxes']
print(bboxes)
#bboxes = np.asarray(bboxes, dtype=np.int32)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
def test3():
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
api = 'http://192.168.16.45:8000/detector'
#img = cv2.imencode('.jpg', img_ori)[-1]
input_ ={
'img_data':'',
'img_width':512,
'img_height':512,
'img_chs':3
}
with open(image_path,'rb') as f:
input_['img_data']=base64.b64encode(f.read()).decode('utf-8')
image_encode=base64.b64encode(image_ori).decode('utf-8')
image_bytes=bytes(image_encode,encoding='utf-8')
#input_['img_data']=image_bytes
response=requests.post(api,json=input_).json()
print(response['msg'],response['data']['bboxes'])

###---客户端:读取图片:image_ori=cv2.imread(image_path)--->编码base64.b64encode(f.read()).decode('utf-8') ,(C,H,W)
###---服务器端:字节化image_bytes=bytes(img_data,encoding='utf-8')-->解码img_data=np.frombuffer(base64.decodebytes(image_bytes),dtype=np.uint8)-->reshape:mg_data=img_data.reshape(h,w,3)
def test4():
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
api = 'http://192.168.16.45:8000/detector'
#img = cv2.imencode('.jpg', img_ori)[-1]
h,w,c = image_ori.shape
input_ ={
'img_data':'',
'img_width':h,
'img_height':w,
'img_chs':c
}
print('input:',input_)
##decode('utf-8'),有没有好像都行。
input_['img_data']=base64.b64encode(image_ori).decode('utf-8')
response=requests.post(api,json=input_).json()
print(response['msg'],response['data']['bboxes'])
def decode_encode():
##对文件b64编码,b64解码存储
image_path='test/P0017.png'
with open(image_path,'rb') as fp:
encode_img = base64.b64encode(fp.read())
with open('t2.png','wb') as f2:
f2.write(base64.b64decode(encode_img))
##输入时数组-->opencv编码jpg字节流-->存储图片文件
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
image_bytes = cv2.imencode(".png",image_ori)[1].tobytes()
with open('bytes2image.png') as fp:
fp.write(image_bytes)
###字节流到数组用cv2.imdecode(np.frombuffer(image_bytes,np.uint8),1)
with open(image_path,'rb') as fp:
image_bytes2 = fp.read()
image_array = cv2.imdecode(np.frombuffer(image_bytes2,np.uint8),1)
##输入时数组-->opencv编码jpg字节流-->存储图片文件
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
image_bytes = cv2.imencode(".png",image_ori)[1].tobytes()
with open('bytes2image.png') as fp:
fp.write(image_bytes)
##image_array--
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
image_encode=base64.b64encode(image_ori).decode('utf-8')
image_bytes=bytes(image_encode,encoding='utf-8')
img_data=np.frombuffer(base64.decodebytes(image_bytes),dtype=np.uint8)
def test5():
import json
##输入时数组-->opencv编码jpg字节流-->存储图片文件
image_path='imgs/DJI_0445.JPG'
image_dir = '/home/thsw2/WJ/data/THexit/val/images/'
filelist = os.listdir(image_dir)
time0 = time.time()
for filename in filelist:
image_path = os.path.join(image_dir,filename)
image_ori=cv2.imread(image_path)
image_ori = cv2.resize(image_ori, (0, 0), fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)
image_pngcode = cv2.imencode(".jpg",image_ori)[-1]

api = 'http://192.168.10.10:8000/detector'
#api = 'http://47.98.157.120:9040/api/taskFile/submitUAVKHQuestion'
#api = 'http://192.168.0.100:9040'

h,w,c = image_ori.shape
input_ ={
'imgData':'',
#'img_width':h,
#'img_height':w,
#'img_chs':c,
'imgName':filename
}
#print('input:',input_ )
t1 = time.time()
image_code = str(base64.b64encode(image_pngcode))[2:-1]
#print( image_code)
input_['imgData']=image_code
t2 = time.time()
response=requests.post(api,json=input_).json()
t3 = time.time()
print('bs encodetime:%.5f request time:%.5f \n'%(t2-t1,t3-t2))
t1_bytes = bytes(image_code,encoding='utf-8')
t2_bs64decode = base64.b64decode(t1_bytes)
img_data = cv2.imdecode(np.frombuffer(base64.b64decode( bytes(image_code,encoding='utf-8')),dtype=np.uint8),1)
#print(response['code'],response['data']['bboxes'])
print('Return:',response['data']['bboxes'],' img data shape:',img_data.shape)

time2 = time.time()
print('average time:',(time2-time0)/len(filelist))
if __name__=='__main__':
test5()

+ 59
- 0
code_bak/consumer.py View File

@@ -0,0 +1,59 @@
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
import traceback
import json,time,random,string
import utilsK
from utilsK.modelEval import onlineModelProcess
import multiprocessing
from multiprocessing import Process,Queue

def consumer_demo(par):

consumer = KafkaConsumer(
par['topic'],
bootstrap_servers=par['server'],
group_id=par['group_id'],
auto_offset_reset='latest',
enable_auto_commit=False
)
itest = 0
'''
for message in consumer:
itest+=1
if itest>1:break;
print("receive value: {}, partition:{} offset:{}".format(
json.loads(message.value.decode()), message.partition,message.offset
)
)
consumer.commit()
'''
dataPar ={
'imgData':'',
'imgName':'testW',
'streamName':'THSA_HD5M'
}
dataPar['inSource'] = 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4';dataPar['outSource'] = None
process_uid=''.join(random.sample(string.ascii_letters + string.digits, 16))
parent_conn, child_conn = multiprocessing.Pipe();
dataPar['callback']=child_conn
gpuProcess=Process(target=onlineModelProcess,name='process:%s'%( process_uid ),args=(dataPar,))
gpuProcess.start()
child_return = parent_conn.recv()
returnData={'bboxes': 9999};
returnData['gpu']=str(child_return)
returnData['pid']=gpuProcess.pid
returnData['pidName']=gpuProcess.name
print( '#####consumer main:',returnData )

if __name__=='__main__':
par={};
par['server']='212.129.223.66:9092';par['topic']='thsw';par['group_id']='test';
consumer_demo(par)


+ 67
- 0
code_bak/consumer2.py View File

@@ -0,0 +1,67 @@
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors
import json
def get_left_cnt(consumer,topic):
partitions = [TopicPartition(topic, p) for p in consumer.partitions_for_topic(topic)]

# total
toff = consumer.end_offsets(partitions)
toff = [(key.partition, toff[key]) for key in toff.keys()]
toff.sort()
# current
coff = [(x.partition, consumer.committed(x)) for x in partitions]
coff.sort()

# cal sum and left
toff_sum = sum([x[1] for x in toff])
cur_sum = sum([x[1] for x in coff if x[1] is not None])
left_sum = toff_sum - cur_sum

return left_sum
def getAllRecords(consumer,topics):
leftCnt = 0
for topic in topics[0:2]:
leftCnt+=get_left_cnt(consumer,topic)
out = []
if leftCnt == 0:
return []
for ii,msg in enumerate(consumer):
consumer.commit()
out.append(msg)
if ii== (leftCnt-1):
break###断流或者到终点
return out
def detector(par):
consumer = KafkaConsumer(
bootstrap_servers=par['server'],
group_id=par['group_id'],
#auto_offset_reset='earliest',
auto_offset_reset='latest',
#isolation_level = 'read_committed',
#enable_auto_commit=True
)
consumer.subscribe( par['topic'][0:2])
print( ' Start kafka ')
msgs = getAllRecords(consumer,par['topic'])
print( 'getover cnt',len(msgs))
for ii,msg in enumerate(msgs):
print(msg)
try:
print('##'*10,ii)
taskInfos = eval(msg.value.decode('utf-8'))
print(taskInfos )
except:
print('**'*10,'wrong',ii)
print(msg.value.decode('utf-8'))
if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线

#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';

par['server']='101.132.127.1:19092';par['topic']=('alg-online-tasks', 'alg-task-results','alg-offline-tasks');par['group_id']='testww';
#par['server']='101.132.127.1:19092';par['topic']=('alg-online-tasks','alg-task-results','alg-offline-tasks');par['group_id']='testW11';
par['kafka']='mintors/kafka'
detector(par)

+ 420
- 0
code_bak/consumer_sleep.py View File

@@ -0,0 +1,420 @@
import numpy as np
import time,ast,copy
from flask import request, Flask,jsonify
import base64,cv2,os,sys,json
sys.path.extend(['../yolov5'])
#from Send_tranfer import b64encode_function,JsonSend,name_dic,nameID_dic,getLogFileFp
from segutils.segmodel import SegModel,get_largest_contours
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.torch_utils import select_device, load_classifier, time_synchronized
from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
import subprocess as sp
import matplotlib.pyplot as plt
import torch,random,string
import multiprocessing
from multiprocessing import Process,Queue
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors

#torch.multiprocessing.set_start_method('spawn')
import utilsK
from utilsK.GPUtils import *
from utilsK.masterUtils import *
from utilsK.sendUtils import create_status_msg,update_json

#from utilsK.modelEval import onlineModelProcess
import random,string
from Send_tranfer_oss import msg_dict_on,msg_dict_off
process_id=0

def onlineModelProcess(parIn ):
DEBUG=False
streamName = parIn['streamName']
childCallback=parIn['callback']
#try:
for wan in ['test']:
jsonfile=parIn['modelJson']
with open(jsonfile,'r') as fp:
parAll = json.load(fp)
Detweights=parAll['gpu_process']['det_weights']
seg_nclass = parAll['gpu_process']['seg_nclass']
Segweights = parAll['gpu_process']['seg_weights']
videoSave = parAll['AI_video_save']
imageTxtFile = parAll['imageTxtFile']
inSource,outSource=parIn['inSource'],parIn['outSource']
kafka_par=parIn['kafka_par']
producer = KafkaProducer(bootstrap_servers=kafka_par['server'],value_serializer=lambda v: v.encode('utf-8'),metadata_max_age_ms=120000)
device = select_device(parIn['device'])
half = device.type != 'cpu' # half precision only supported on CUDA
model = attempt_load(Detweights, map_location=device) # load FP32 model
if half: model.half()
#print('###line116:,',len(dataset),dataset)
if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')):
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=True)[0:4]
else:
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=False)[0:4]
fps = int(fps+0.5)
segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)

if outSource != 'NO':
command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24',
'-s', "{}x{}".format(outW,outH),# 图片分辨率
'-r', str(fps),# 视频帧率
'-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p',
'-f', 'flv',outSource
]
video_flag = videoSave['onLine']
logdir = parAll['logChildProcessOnline']
#print('*'*20,'###line82',command)
else:
video_flag = videoSave['offLine'] ;logdir = parAll['logChildProcessOffline']

fp_log=create_logFile(logdir=logdir)
# 管道配置,其中用到管道
if outSource !='NO' :
ppipe = sp.Popen(command, stdin=sp.PIPE)
##后处理参数
par=parAll['post_process']
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
outImaDir = par['outImaDir']
outVideoDir = par['outVideoDir']
labelnames=par['labelnames']
rainbows=par['rainbows']
fpsample = par['fpsample']
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
dataset = LoadStreams(inSource, img_size=640, stride=32)
childCallback.send('####model load success####')
if (outVideoDir!='NO') and video_flag:
msg_id = streamName.split('-')[2]
save_path = os.path.join(outVideoDir,msg_id+'.MP4')
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
iframe = 0;post_results=[];time_beg=time.time()
t00=time.time()
time_kafka0=time.time()
for path, img, im0s, vid_cap in dataset:
t0= time_synchronized()
if not path:
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_AI.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
img_end=np.zeros((100,100),dtype=np.uint8);cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_OR.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
ret = cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
#print(EndUrl,ret)
childCallback.send('####strem ends####')
if (outVideoDir!='NO') and video_flag:
vid_writer.release()
break###断流或者到终点
if outSource == 'NO':###如果不推流,则显示进度条
view_bar(iframe,totalcnt,time_beg ,parIn['process_uid'] )
###直播和离线都是1分钟发一次消息。直播发
time_kafka1 = time.time()
if time_kafka1 - time_kafka0 >60:
time_kafka0 = time_kafka1
###发送状态信息waiting
msg = copy.deepcopy(msg_dict_off);taskId,msgId = streamName.split('-')[1:3]
msg['msg_id']= msgId; msg
if outSource == 'NO':
msg['progressbar']= '%.4f'%(iframe*1.0/totalcnt)
msg['type']=1
else:
msg['progressbarOn']= str(iframe)
msg['type']=2
msg = json.dumps(msg, ensure_ascii=False)

try:
record_metadata = producer.send(kafka_par['topic'], msg).get()
outstr='%s processing send progressbar or heartBeat to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
except Exception as e:
outstr='#######kafka ERROR when processing sending progressbar or heartBeat:, error: %s'%(str(e))
wrtiteLog(fp_log,outstr);print( outstr);
try:
producer = KafkaProducer(bootstrap_servers=par['server'], value_serializer=lambda v: v.encode('utf-8')).get()
future = producer.send(par['topic'][2], msg).get()
except Exception as e:
outstr='%s re-send progressbar or heartBeat kafka,processing video or stream: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
time0=time.time()
iframe +=1
time1=time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
timeseg0 = time.time()
seg_pred,segstr = segmodel.eval(im0s[0] )
timeseg1 = time.time()
t1= time_synchronized()
pred = model(img,augment=False)[0]
time4 = time.time()
datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe)
t2= time_synchronized()
#print('###line138:',timeOut,outSource,outVideoDir)
##每隔 fpsample帧处理一次,如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
parImage=save_problem_images(post_results,iframe,names,streamName=streamName,outImaDir='problems/images_tmp',imageTxtFile=imageTxtFile)
post_results=[]

if len(p_result[2] )>0: ##
post_results.append(p_result)
t3= time_synchronized()
image_array = p_result[1]
if outSource!='NO':
ppipe.stdin.write(image_array.tobytes())
if (outVideoDir!='NO') and video_flag:
ret = vid_writer.write(image_array)
t4= time_synchronized()
timestr2 = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if iframe%100==0:
outstr='%s,,read:%.1f ms,copy:%.1f, infer:%.1f ms, detinfer:%.1f ms,draw:%.1f ms, save:%.1f ms total:%.1f ms \n'%(timestr2,(t0 - t00)*1000,(timeseg0-t0)*1000, (t1 - timeseg0)*1000,(t2-t1)*1000, (t3 - t2)*1000,(t4-t3)*1000, (t4-t00)*1000)
wrtiteLog(fp_log,outstr);
#print(outstr)
t00 = t4;
##模型加载之类的错误
#except Exception as e:
# print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) ,'*'*20,'###line177 ERROR:',e)
# childCallback.send(e) #将异常通过管道送出
def lauch_process(gpuid,inSource,outSource,taskId,msgId,modelJson,kafka_par):

if outSource=='NO':
streamName='off-%s-%s'%(taskId,msgId)
else:
streamName='live-%s-%s'%(taskId,msgId)
dataPar ={
'imgData':'',
'imgName':'testW',
'streamName':streamName,
'taskId':taskId,
'msgId':msgId,
'device':str(gpuid),
'modelJson':modelJson,
'kafka_par':kafka_par,
}
#dataPar['inSource'] = 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4';dataPar['outSource'] = None
dataPar['inSource'] = inSource;dataPar['outSource'] = outSource
process_uid=''.join(random.sample(string.ascii_letters + string.digits, 16));dataPar['process_uid']=process_uid
parent_conn, child_conn = multiprocessing.Pipe();dataPar['callback']=child_conn
gpuProcess=Process(target=onlineModelProcess,name='process:%s'%( process_uid ),args=(dataPar,))
gpuProcess.start()
#print(dir(gpuProcess))
child_return = parent_conn.recv()
timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
print(timestr2,'-'*20,'progress:%s ,msgId:%s , taskId:%s return:'%(process_uid,msgId,taskId),child_return)
return gpuProcess

msg_dict_offline = {
"biz_id":"hehuzhang",
"mod_id":"ai",
"msg_id":'bb'+''.join(random.sample(string.ascii_letters ,30) ) ,
"offering_id":"http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4",
"offering_type":"mp4",
"results_base_dir": "XJRW202203171535"+str(random.randint(10,99)),
'outSource':'NO'
}



def detector_0(par):
####初始化信息列表
consumer = KafkaConsumer(
bootstrap_servers=par['server'],
group_id=par['group_id'],
auto_offset_reset='earliest',
#max_poll_interval_ms = 1000*60*6,
#session_timeout_ms=1000*60*5,
request_timeout_ms=15000,
#enable_auto_commit=True
)
consumer.subscribe( par['topic'][0:2])
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
taskStatus={}
taskStatus['onLine'] = Queue(100)
taskStatus['offLine']= Queue(100)
taskStatus['pidInfos']= {}
fp_log=create_logFile(logdir=par['logDir'])
wrtiteLog(fp_log,'###########masster starts in line222######\n')
timeSleep=1
#taskStatus['pidInfos'][31897]={'gpuProcess':'onlineProcess','type':'onLine'}
time0=time.time()
time0_kafQuery=time.time()
time0_taskQuery=time.time()
time0_sleep=time.time()
time_interval=10; outStrList={}
isleep=0
while True:###每隔timeSleep秒,轮询一次
#for isleep in range(1):

##1-读取kafka,更新任务类别
try:
#msgs = getAllRecords(consumer,par['topic'])
msgs=[]
for ii,msg in enumerate(consumer):
consumer.commit()
msgs.append(msg)
except Exception as e:
outstr='%s kafka connecting error:%s '%('#'*20,e)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
time.sleep(timeSleep)
continue
#if get_whether_gpuProcess():
for it in range(30):
timestr=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
print('%s i=%d sleep:%s '%(timestr,isleep,it*10))
time.sleep(10)
isleep+=1
print('########Program End#####')
def detector(par):
####初始化信息列表
consumer = KafkaConsumer(
bootstrap_servers=par['server'],
group_id=par['group_id'],
auto_offset_reset='earliest',
#max_poll_interval_ms = 1000*60*6,
#session_timeout_ms=1000*60*5,
#request_timeout_ms=11000,
#enable_auto_commit=True
)
consumer.subscribe( par['topic'][0:2])
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
taskStatus={}
taskStatus['onLine'] = Queue(100)
taskStatus['offLine']= Queue(100)
taskStatus['pidInfos']= {}
timeSleep=1
#taskStatus['pidInfos'][31897]={'gpuProcess':'onlineProcess','type':'onLine'}
time0=time.time()
time0_kafQuery=time.time()
time0_taskQuery=time.time()
time0_sleep=time.time()
time_interval=10; outStrList={}
isleep=0
for ii,msg in enumerate(consumer):
try:
taskInfos = eval(msg.value.decode('utf-8') )
except:
outstr='%s msg format error,value:%s,offset:%d partition:%s topic:%s'%('#'*20,msg.value,msg.offset,msg.topic,msg.topic)
continue
outstr='%s value:%s,offset:%d partition:%s topic:%s'%('#'*20,msg.value,msg.offset,msg.partition,msg.topic)
print(outstr)
def get_file():
print("文件名 :",__file__,sys._getframe().f_lineno)
print("函数名: ", sys._getframe().f_code.co_name)
print("模块名: ", sys._getframe().f_back.f_code.co_name)

if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线

#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';
#101.132.127.1:19092
'''
par['server']='101.132.127.1:19092 ';par['topic']=('alg-online-tasks','alg-offline-tasks','alg-task-results');par['group_id']='test';
par['kafka']='mintors/kafka'
par['modelJson']='conf/model.json'
'''
masterFile="conf/master_ten.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
data=json.load(fp)
get_file()
par=data['par']
print(par)
detector(par)




+ 79
- 0
code_bak/decode.py View File

@@ -0,0 +1,79 @@
from PIL import Image
import numpy as np
import cv2
import base64
import io
import requests


def test():
image_path='test/P0017.png'
image_array=cv2.imread(image_path)
print(image_array.shape)
image_encode=base64.b64encode(image_array).decode('utf-8')
image_bytes=bytes(image_encode,encoding='utf-8')
image_decode=np.frombuffer(base64.decodebytes(image_bytes),dtype=np.uint8)
print(image_decode.shape)
request_url='http://192.168.109.49:5000/'
headers={'content-type':'application/json'}
data={'image':image_encode}
#response=requests.post(request_url,data=data,headers=headers)
response=requests.post(request_url,"POST",files=data)
print(response)
#image=open(image_path,'rb').read()
#image=Image.open(io.BytesIO(image))
#image=image.resize((224,224))
#image=np.asarray(image
def test2():
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
api = 'http://192.168.109.49:8000/detector'
#img = cv2.imencode('.jpg', img_ori)[-1]
image_encode=base64.b64encode(image_ori).decode('utf-8')
img=bytes(image_encode,encoding='utf-8')
h,w,c=image_ori.shape
files = {'file': img}
files = {'file': img,'name':'P0017',
'img_width':w,
'img_height':h,
'img_chs':c
}
res = requests.request("POST", api, files=files).json()

if res['msg'] == 'success':
bboxes = res['data']['bboxes']
print(bboxes)
#bboxes = np.asarray(bboxes, dtype=np.int32)
def test3():
image_path='test/P0017.png'
image_ori=cv2.imread(image_path)
api = 'http://192.168.109.49:8000/detector'
#img = cv2.imencode('.jpg', img_ori)[-1]
input_ ={
'img_data':'',
'img_width':512,
'img_height':512,
'img_chs':3
}
with open(image_path,'rb') as f:
input_['img_data']=base64.b64encode(f.read()).decode()
image_encode=base64.b64encode(image_ori).decode('utf-8')
image_bytes=bytes(image_encode,encoding='utf-8')
input_['img_data']=image_bytes:
response=requests.post(api,json=input_)
print(response.text)
if __name__=='__main__':
test2()

+ 645
- 0
code_bak/master_0508.py View File

@@ -0,0 +1,645 @@
import numpy as np
import time,ast,copy
from flask import request, Flask,jsonify
import base64,cv2,os,sys,json
sys.path.extend(['../yolov5'])
#from Send_tranfer import b64encode_function,JsonSend,name_dic,nameID_dic,getLogFileFp
from segutils.segmodel import SegModel,get_largest_contours
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.torch_utils import select_device, load_classifier, time_synchronized
from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
import subprocess as sp
import matplotlib.pyplot as plt
import torch,random,string
import multiprocessing
from multiprocessing import Process,Queue
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors

#torch.multiprocessing.set_start_method('spawn')
import utilsK
from utilsK.GPUtils import *
from utilsK.masterUtils import *
from utilsK.sendUtils import create_status_msg,update_json

#from utilsK.modelEval import onlineModelProcess
import random,string
from Send_tranfer_oss import msg_dict_on,msg_dict_off
import pykafka
from pykafka import KafkaClient
process_id=0

def onlineModelProcess(parIn ):
DEBUG=False
streamName = parIn['streamName']
childCallback=parIn['callback']
outStrList={}
#try:
for wan in ['test']:
jsonfile=parIn['modelJson']
with open(jsonfile,'r') as fp:
parAll = json.load(fp)
Detweights=parAll['gpu_process']['det_weights']
seg_nclass = parAll['gpu_process']['seg_nclass']
Segweights = parAll['gpu_process']['seg_weights']
videoSave = parAll['AI_video_save']
imageTxtFile = parAll['imageTxtFile']
taskId,msgId = streamName.split('-')[1:3]
inSource,outSource=parIn['inSource'],parIn['outSource']
##构建日志文件
if outSource != 'NO':
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
logdir = parAll['logChildProcessOffline']
waitingTime=5
fp_log=create_logFile(logdir=logdir)
kafka_par=parIn['kafka_par']
producer = KafkaProducer(bootstrap_servers=kafka_par['server'],value_serializer=lambda v: v.encode('utf-8'),metadata_max_age_ms=120000)

####要先检查视频的有效性
###开始的时候,如果在线任务没有流,要发送的心跳消息,msg_h,
msg_h= copy.deepcopy(msg_dict_off);
msg_h['status']='waiting';msg_h['msg_id']=msgId

if outSource == 'NO':
msg_h['type']=1
Stream_ok= get_fps_rtmp(inSource,video=True)
else:
msg_h['type']=2
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList['success']= '%s waiting stream or video, send heartbeat: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR waiting stream or video, send heartbeat'
outStrList['Refailure']='##############kafka ERROR waiting stream or video, Re-send heartbeat'
Stream_ok=check_stream(inSource,producer,kafka_par,msg_h_d,outStrList,fp_log ,timeMs=waitingTime)
if Stream_ok:###发送开始信号
msg_h['status']='running'
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList['success']= '%s informing stream/video is ok, taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR ,when informing stream/video is ok'
outStrList['Refailure']='##############kafka ERROR, when re-informing stream/video is ok'
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log );
else:
####检测离线视频是否有效,无效要报错
outstr='############# offline vedio or live stream Error:%s #################'%(inSource)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
msg_h['error']=str(1001);msg_h['status']='failed';
msg_h_d = json.dumps(msg_h, ensure_ascii=False);
outStrList['success']= '%s informing invaid video or stream success : taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR, when informing invaid video or stream'
outStrList['Refailure']='##############kafka ERROR,when re-informing invaid video or stream'
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log );
childCallback.send(' offline vedio or live stream Error')
continue
if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')):
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=True)[0:4]
else:
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=False)[0:4]
fps = int(fps+0.5)
if outSource != 'NO':
command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24',
'-s', "{}x{}".format(outW,outH),# 图片分辨率
'-r', str(fps),# 视频帧率
'-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p',
'-f', 'flv',outSource
]
video_flag = videoSave['onLine']
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
video_flag = videoSave['offLine'] ;logdir = parAll['logChildProcessOffline']
waitingTime=5
fp_log=create_logFile(logdir=logdir)

device = select_device(parIn['device'])
half = device.type != 'cpu' # half precision only supported on CUDA
model = attempt_load(Detweights, map_location=device) # load FP32 model
if half: model.half()

segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)

# 管道配置,其中用到管道
if outSource !='NO' :
ppipe = sp.Popen(command, stdin=sp.PIPE)
##后处理参数
par=parAll['post_process']
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
outImaDir = par['outImaDir']
outVideoDir = par['outVideoDir']
labelnames=par['labelnames']
rainbows=par['rainbows']
fpsample = par['fpsample']
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
dataset = LoadStreams(inSource, img_size=640, stride=32)
childCallback.send('####model load success####')
if (outVideoDir!='NO') and video_flag:
msg_id = streamName.split('-')[2]
save_path = os.path.join(outVideoDir,msg_id+'.MP4')
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
iframe = 0;post_results=[];time_beg=time.time()
t00=time.time()
time_kafka0=time.time()
for path, img, im0s, vid_cap in dataset:
t0= time_synchronized()
if not path:
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_AI.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
img_end=np.zeros((100,100),dtype=np.uint8);cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_OR.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
ret = cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
#print(EndUrl,ret)
childCallback.send('####strem ends####')
if (outVideoDir!='NO') and video_flag:
vid_writer.release()
break###断流或者到终点
if outSource == 'NO':###如果不推流,则显示进度条
view_bar(iframe,totalcnt,time_beg ,parIn['process_uid'] )
###直播和离线都是1分钟发一次消息。直播发
time_kafka1 = time.time()
if time_kafka1 - time_kafka0 >60:
time_kafka0 = time_kafka1
###发送状态信息waiting
msg = copy.deepcopy(msg_dict_off);
msg['msg_id']= msgId; msg
if outSource == 'NO':
msg['progressbar']= '%.4f'%(iframe*1.0/totalcnt)
msg['type']=1
else:
msg['progressbarOn']= str(iframe)
msg['type']=2
msg = json.dumps(msg, ensure_ascii=False)
'''
try:
record_metadata = producer.send(kafka_par['topic'], msg).get()
outstr='%s processing send progressbar or heartBeat to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
except Exception as e:
outstr='#######kafka ERROR when processing sending progressbar or heartBeat:, error: %s'%(str(e))
wrtiteLog(fp_log,outstr);print( outstr);
try:
producer = KafkaProducer(bootstrap_servers=par['server'], value_serializer=lambda v: v.encode('utf-8')).get()
future = producer.send(par['topic'][2], msg).get()
except Exception as e:
outstr='%s re-send progressbar or heartBeat kafka,processing video or stream: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
'''
###发送状态信息waiting
outStrList['success']= '%s processing send progressbar or heartBeat to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
outStrList['failure']='#######kafka ERROR when processing sending progressbar or heartBeat'
outStrList['Refailure']='%s re-send progressbar or heartBeat kafka,processing video or stream: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
time0=time.time()
iframe +=1
time1=time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
timeseg0 = time.time()
seg_pred,segstr = segmodel.eval(im0s[0] )
timeseg1 = time.time()
t1= time_synchronized()
pred = model(img,augment=False)[0]
time4 = time.time()
datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe)
t2= time_synchronized()
#print('###line138:',timeOut,outSource,outVideoDir)
##每隔 fpsample帧处理一次,如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
parImage=save_problem_images(post_results,iframe,names,streamName=streamName,outImaDir='problems/images_tmp',imageTxtFile=imageTxtFile)
post_results=[]

if len(p_result[2] )>0: ##
post_results.append(p_result)
t3= time_synchronized()
image_array = p_result[1]
if outSource!='NO':
ppipe.stdin.write(image_array.tobytes())
if (outVideoDir!='NO') and video_flag:
ret = vid_writer.write(image_array)
t4= time_synchronized()
timestr2 = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if iframe%100==0:
outstr='%s,,read:%.1f ms,copy:%.1f, infer:%.1f ms, detinfer:%.1f ms,draw:%.1f ms, save:%.1f ms total:%.1f ms \n'%(timestr2,(t0 - t00)*1000,(timeseg0-t0)*1000, (t1 - timeseg0)*1000,(t2-t1)*1000, (t3 - t2)*1000,(t4-t3)*1000, (t4-t00)*1000)
wrtiteLog(fp_log,outstr);
#print(outstr)
t00 = t4;
##模型加载之类的错误
#except Exception as e:
# print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) ,'*'*20,'###line177 ERROR:',e)
# childCallback.send(e) #将异常通过管道送出
def lauch_process(gpuid,inSource,outSource,taskId,msgId,modelJson,kafka_par):

if outSource=='NO':
streamName='off-%s-%s'%(taskId,msgId)
else:
streamName='live-%s-%s'%(taskId,msgId)
dataPar ={
'imgData':'',
'imgName':'testW',
'streamName':streamName,
'taskId':taskId,
'msgId':msgId,
'device':str(gpuid),
'modelJson':modelJson,
'kafka_par':kafka_par,
}
#dataPar['inSource'] = 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4';dataPar['outSource'] = None
dataPar['inSource'] = inSource;dataPar['outSource'] = outSource
process_uid=''.join(random.sample(string.ascii_letters + string.digits, 16));dataPar['process_uid']=process_uid
parent_conn, child_conn = multiprocessing.Pipe();dataPar['callback']=child_conn
gpuProcess=Process(target=onlineModelProcess,name='process:%s'%( process_uid ),args=(dataPar,))
gpuProcess.start()
#print(dir(gpuProcess))
#child_return = parent_conn.recv()
#timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
#print(timestr2,'-'*20,'progress:%s ,msgId:%s , taskId:%s return:'%(process_uid,msgId,taskId),child_return)
return gpuProcess

msg_dict_offline = {
"biz_id":"hehuzhang",
"mod_id":"ai",
"msg_id":'bb'+''.join(random.sample(string.ascii_letters ,30) ) ,
"offering_id":"http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4",
"offering_type":"mp4",
"results_base_dir": "XJRW202203171535"+str(random.randint(10,99)),
'outSource':'NO'
}



def detector(par):
####初始化信息列表
consumer = KafkaConsumer(bootstrap_servers=par['server'],client_id='AI_server',group_id=par['group_id'],auto_offset_reset='earliest')
consumer.subscribe( par['topic'][0:2])
'''
client = KafkaClient(hosts=par['server'])
consumer_pys=[]
for topic_name in par['topic'][0:2]:
consumer_pys.append(client.topics[ topic_name ].get_simple_consumer(consumer_group=par['group_id'],timeout=30))
'''
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
taskStatus={}
taskStatus['onLine'] = Queue(100)
taskStatus['offLine']= Queue(100)
taskStatus['pidInfos']= {}
fp_log=create_logFile(logdir=par['logDir'])
wrtiteLog(fp_log,'###########masster starts in line222######\n')
timeSleep=1
#taskStatus['pidInfos'][31897]={'gpuProcess':'onlineProcess','type':'onLine'}
time0=time.time()
time0_kafQuery=time.time()
time0_taskQuery=time.time()
time0_sleep=time.time()
time_interval=10; outStrList={}
while True:###每隔timeSleep秒,轮询一次
#for isleep in range(1):

##1-读取kafka,更新任务类别
try:
'''
msgs=[]
for consumer in consumer_pys:
for msg in consumer:
if msg is None:break
else:msgs.append(msg)
'''
msgs = getAllRecords(consumer,par['topic'])
except Exception as e:
outstr='%s kafka connecting error:%s '%('#'*20,e)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
time.sleep(timeSleep)
continue
#if get_whether_gpuProcess():
time0_kafQuery,printFlag = check_time_interval(time0_kafQuery,time_interval)
if printFlag:
outstr_kafka=' ##### kafka Left %d records####'%(len(msgs));
outstr_kafka=wrtiteLog(fp_log,outstr_kafka)

for ii,msg in enumerate(msgs):

try:
#taskInfos = eval(json.loads(msg.value ))
taskInfos = eval(msg.value.decode('utf-8') )
except:
outstr='%s msg format error,value:%s,offset:%d partition:%s topic:%s'%('#'*20,msg.value,msg.offset,msg.topic,msg.topic)
continue
if msg.topic == par['topic'][0]: ##
taskInfos['inSource']= taskInfos['pull_channel'];
taskInfos['outSource']= get_push_address(taskInfos['push_channel']) ;
taskStatus['onLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)
###发送状态信息waiting
msg = create_status_msg(msg_dict_on,taskInfos,sts='waiting')
outStrList['success']= '%s read from kafka online task and back to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when read from kafka online task and back to kafka'
outStrList['Refailure']='##############kafka ERROR when read from kafka online task and resend back to kafka:'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
else:
taskInfos['inSource']= taskInfos['offering_id'];
taskInfos['outSource']= 'NO'
taskStatus['offLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)

###发送状态信息waiting
msg = create_status_msg(msg_dict_off,taskInfos,sts='waiting')


outStrList['success']= '%s read from kafka offline task and back to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kkafka ERROR when read from kafka offline task and back to kafka:,'
outStrList['Refailure']='##############kafka ERROR when read from kafka offline task and resend back to kafka:'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
#if get_whether_gpuProcess():
time0_taskQuery,printFlag = check_time_interval(time0_taskQuery,time_interval)
outstr_task= ' task queue onLine cnt:%d offLine:%d'%(taskStatus['onLine'].qsize(), taskStatus['offLine'].qsize())
##2-更新显卡信息
gpuStatus = getGPUInfos()
##3-优先考虑在线任务
if not taskStatus['onLine'].empty():
###3.1-先判断有没有空闲显卡:
cuda = get_available_gpu(gpuStatus)
###获取在线任务信息,并执行,lauch process
taskInfos = taskStatus['onLine'].get()
'''
#如果是在线任务,则先检测流,如若流,每10秒更新1次,两分钟内没流就断掉
msg_h= copy.deepcopy(msg_dict_on);
msg_h['status']='waiting';msg_h['msg_id']=taskInfos['msg_id'];msg_h = json.dumps(msg_h, ensure_ascii=False)
outStrList['success']= '%s waiting stream, send heartbeat, msgId:%s, taskID:%s ,%s'%('-'*20, taskInfos['msg_id'],taskInfos['results_base_dir'],msg_h)
outStrList['failure']='#######kafka ERROR waiting stream, send heartbeat'
outStrList['Refailure']='##############kafka ERROR waiting stream, Re-send heartbeat'
print('################line389')
Stream_ok=check_stream(taskInfos['inSource'],producer,par,msg_h,outStrList,fp_log ,timeMs=par['StreamWaitingTime'])
if not Stream_ok:
outstr='##############live Stream ERROR #################'
outstr=wrtiteLog(fp_log,outstr);print( outstr);

continue
'''
print('################396',cuda)
if cuda: ###3.1.1 -有空余显卡
#lauch process
msg= copy.deepcopy(msg_dict_on);

gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
'''
##返回kafka消息
msg=update_json(taskInfos,msg,offkeys=["msg_id","biz_id" ,"mod_id" ])
msg['results'][0]['original_url']=taskInfos['inSource']
msg['results'][0]['sign_url']=get_boradcast_address(taskInfos['outSource'])
msg['status']='running'
msg = json.dumps(msg, ensure_ascii=False)
outStrList['success']= '%s start online task from free gpu and back to kafka: pid:%d taskId:%s msgId:%s send:%s'%('-'*20,gpuProcess.pid,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when start online task from free gpu and back to kafka'
outStrList['Refailure']='##############kafka ERROR when start online task from free gpu and resend back to kafka'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
'''
else:###3.1.2-没有显卡
##判断有没有显卡上面都是离线进程的
cuda_pid = get_potential_gpu(gpuStatus,taskStatus['pidInfos'])
if cuda_pid:#3.1.2.1 - ##如果有可以杀死的进程
cuda = cuda_pid['cuda']
pids = cuda_pid['pids']
##kill 离线进程,并更新离线任务表
cnt_off_0 = taskStatus['offLine'].qsize()
for pid in pids:
##kill 离线进程
taskStatus['pidInfos'][pid]['gpuProcess'].kill()
##更新离线任务表
taskStatus['offLine'].put( taskStatus['pidInfos'][pid]['taskInfos'] )
taskInfos_off=taskStatus['pidInfos'][pid]['taskInfos']
##发送离线数据,说明状态变成waiting
msg= msg_dict_off;
msg=update_json(taskInfos_off,msg,offkeys=["msg_id","biz_id" ,"mod_id"] )
msg['results'][0]['original_url']=taskInfos_off['inSource']
msg['results'][0]['sign_url']=get_boradcast_address(taskInfos_off['outSource'])
msg['status']='waiting'
msg = json.dumps(msg, ensure_ascii=False)

outStrList['success']= '%s start online task after kill offline tasks and back to kafka: pid:%d taskId:%s msgId:%s send:%s'%('-'*20,gpuProcess.pid,taskInfos_off['results_base_dir'], taskInfos_off['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when start online task after kill offline tasks and back to kafka'
outStrList['Refailure']='##############kkafka ERROR when start online task after kill offline tasks and back to kafka'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
cnt_off_1 = taskStatus['offLine'].qsize()
outstr='%s before killing process, offtask cnt:%d ,after killing, offtask cnt:%d %s'%('-'*20 ,cnt_off_0,cnt_off_1,'*'*20)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
###更新pidinfos,update pidInfos
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
'''
##返回kafka消息
msg= copy.deepcopy(msg_dict_on);
msg=update_json(taskInfos,msg,offkeys=["msg_id","biz_id" ,"mod_id"] )
msg['results'][0]['original_url']=taskInfos['inSource']
msg['results'][0]['sign_url']=get_boradcast_address(taskInfos['outSource'])
msg['status']='running'
msg = json.dumps(msg, ensure_ascii=False)

outStrList['success']= '%s start online task after kill offline tasks and back to kafka: pid:%d taskId:%s msgId:%s send:%s'%('-'*20,gpuProcess.pid,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when start online task after kill offline tasks and back to kafka'
outStrList['Refailure']='##############kkafka ERROR when start online task after kill offline tasks and back to kafka'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
'''


else:
outstr='######No available GPUs for onLine####'
outstr=wrtiteLog(fp_log,outstr);print( outstr);
##4-更新显卡信息
gpuStatus = getGPUInfos()
##5-考虑离线任务
if not taskStatus['offLine'].empty():
cudaArrange= arrange_offlineProcess(gpuStatus,taskStatus['pidInfos'],modelMemory=1500)
outstr='###line342 IN OFF LINE TASKS available cudas:%s'%(cudaArrange)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
for cuda in cudaArrange:
if not taskStatus['offLine'].empty():
taskInfos = taskStatus['offLine'].get()
'''
####检测离线视频是否有效,无效要报错
Stream_ok= get_fps_rtmp(taskInfos['inSource'],video=True)
if not Stream_ok:
outstr='############# offline vedio Error:%s #################'%(taskInfos['inSource'])
outstr=wrtiteLog(fp_log,outstr);print( outstr);
msg_h= copy.deepcopy(msg_dict_off);msg_h['error']=str(1001)###
msg_h['status']='failed';msg_h['msg_id']=taskInfos['msg_id'];msg_h = json.dumps(msg_h, ensure_ascii=False);
outStrList['success']= '%s video invalid msg sending success , msgId:%s, taskID:%s ,%s'%('-'*20, taskInfos['msg_id'],taskInfos['results_base_dir'],msg_h)
outStrList['failure']='#######kafka ERROR when sending invalid msg'
outStrList['Refailure']='##############kafka ERROR when Re-sending invalid msg'
send_kafka(producer,kafka_par,msg_h,outStrList,fp_log );
continue
'''
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'offLine','taskInfos':taskInfos}
'''
msg = create_status_msg(msg_dict_off,taskInfos,sts='running')
outStrList['success']= '---------start offline task and back to kafka: pid:%d taskId:%s msgId:%s send:%s'%(gpuProcess.pid,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when start offline task and back to kafka'
outStrList['Refailure']='##############kafka ERROR when start offline task and resend back to kafka'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
'''
if get_whether_gpuProcess():
time0_sleep,printFlag = check_time_interval(time0_sleep,time_interval)
if printFlag:
timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
outstr= timestr2 + '*'*20 +'sleep '+'*'*20;
outstr=wrtiteLog(fp_log,outstr);print( outstr);
outstr_task=wrtiteLog(fp_log,outstr_task);print( outstr_task);
time.sleep(timeSleep)
print('########Program End#####')

if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线

#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';
#101.132.127.1:19092
'''
par['server']='101.132.127.1:19092 ';par['topic']=('alg-online-tasks','alg-offline-tasks','alg-task-results');par['group_id']='test';
par['kafka']='mintors/kafka'
par['modelJson']='conf/model.json'
'''
masterFile="conf/master.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
data=json.load(fp)
par=data['par']
print(par)
detector(par)




+ 571
- 0
code_bak/master_0509.py View File

@@ -0,0 +1,571 @@
import numpy as np
import time,ast,copy
from flask import request, Flask,jsonify
import base64,cv2,os,sys,json
sys.path.extend(['../yolov5'])
#from Send_tranfer import b64encode_function,JsonSend,name_dic,nameID_dic,getLogFileFp
from segutils.segmodel import SegModel,get_largest_contours
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.torch_utils import select_device, load_classifier, time_synchronized
from queRiver import get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
import subprocess as sp
import matplotlib.pyplot as plt
import torch,random,string
import multiprocessing
from multiprocessing import Process,Queue
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors

#torch.multiprocessing.set_start_method('spawn')
import utilsK
from utilsK.GPUtils import *
from utilsK.masterUtils import *
from utilsK.sendUtils import create_status_msg,update_json

#from utilsK.modelEval import onlineModelProcess
import random,string
from Send_tranfer_oss import msg_dict_on,msg_dict_off
import pykafka
from pykafka import KafkaClient
process_id=0

def onlineModelProcess(parIn ):
DEBUG=False
streamName = parIn['streamName']
childCallback=parIn['callback']
outStrList={}
#try:
for wan in ['test']:
jsonfile=parIn['modelJson']
with open(jsonfile,'r') as fp:
parAll = json.load(fp)
Detweights=parAll['gpu_process']['det_weights']
seg_nclass = parAll['gpu_process']['seg_nclass']
Segweights = parAll['gpu_process']['seg_weights']
videoSave = parAll['AI_video_save']
imageTxtFile = parAll['imageTxtFile']
taskId,msgId = streamName.split('-')[1:3]
inSource,outSource=parIn['inSource'],parIn['outSource']
##构建日志文件
if outSource != 'NO':
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
logdir = parAll['logChildProcessOffline']
waitingTime=5
fp_log=create_logFile(logdir=logdir)
kafka_par=parIn['kafka_par']
producer = KafkaProducer(bootstrap_servers=kafka_par['server'],value_serializer=lambda v: v.encode('utf-8'),metadata_max_age_ms=120000)

####要先检查视频的有效性
###开始的时候,如果在线任务没有流,要发送的心跳消息,msg_h,
msg_h= copy.deepcopy(msg_dict_off);
msg_h['status']='waiting';msg_h['msg_id']=msgId

if outSource == 'NO':
msg_h['type']=1
Stream_ok= get_fps_rtmp(inSource,video=True)
else:
msg_h['type']=2
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList['success']= '%s waiting stream or video, send heartbeat: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR waiting stream or video, send heartbeat'
outStrList['Refailure']='##############kafka ERROR waiting stream or video, Re-send heartbeat'
Stream_ok=check_stream(inSource,producer,kafka_par,msg_h_d,outStrList,fp_log ,timeMs=waitingTime)
if Stream_ok:###发送开始信号
msg_h['status']='running'
msg_h_d = json.dumps(msg_h, ensure_ascii=False)
outStrList['success']= '%s informing stream/video is ok, taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR ,when informing stream/video is ok'
outStrList['Refailure']='##############kafka ERROR, when re-informing stream/video is ok'
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log );
else:
####检测离线视频是否有效,无效要报错
outstr='############# offline vedio or live stream Error:%s #################'%(inSource)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
msg_h['error']=str(1001);msg_h['status']='failed';
msg_h_d = json.dumps(msg_h, ensure_ascii=False);
outStrList['success']= '%s informing invaid video or stream success : taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg_h);
outStrList['failure']='#######kafka ERROR, when informing invaid video or stream'
outStrList['Refailure']='##############kafka ERROR,when re-informing invaid video or stream'
send_kafka(producer,kafka_par,msg_h_d,outStrList,fp_log );
childCallback.send(' offline vedio or live stream Error')
continue
if (inSource.endswith('.MP4')) or (inSource.endswith('.mp4')):
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=True)[0:4]
else:
fps,outW,outH,totalcnt=get_fps_rtmp(inSource,video=False)[0:4]
fps = int(fps+0.5)
if outSource != 'NO':
command=['ffmpeg','-y','-f', 'rawvideo','-vcodec','rawvideo','-pix_fmt', 'bgr24',
'-s', "{}x{}".format(outW,outH),# 图片分辨率
'-r', str(fps),# 视频帧率
'-i', '-','-c:v', 'libx264','-pix_fmt', 'yuv420p',
'-f', 'flv',outSource
]
video_flag = videoSave['onLine']
logdir = parAll['logChildProcessOnline']
waitingTime=parAll['StreamWaitingTime']
else:
video_flag = videoSave['offLine'] ;logdir = parAll['logChildProcessOffline']
waitingTime=5
fp_log=create_logFile(logdir=logdir)

device = select_device(parIn['device'])
half = device.type != 'cpu' # half precision only supported on CUDA
model = attempt_load(Detweights, map_location=device) # load FP32 model
if half: model.half()

segmodel = SegModel(nclass=seg_nclass,weights=Segweights,device=device)

# 管道配置,其中用到管道
if outSource !='NO' :
ppipe = sp.Popen(command, stdin=sp.PIPE)
##后处理参数
par=parAll['post_process']
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
outImaDir = par['outImaDir']
outVideoDir = par['outVideoDir']
labelnames=par['labelnames']
rainbows=par['rainbows']
fpsample = par['fpsample']
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
dataset = LoadStreams(inSource, img_size=640, stride=32)
childCallback.send('####model load success####')
if (outVideoDir!='NO') and video_flag:
msg_id = streamName.split('-')[2]
save_path = os.path.join(outVideoDir,msg_id+'.MP4')
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (outW,outH))
iframe = 0;post_results=[];time_beg=time.time()
t00=time.time()
time_kafka0=time.time()
for path, img, im0s, vid_cap in dataset:
t0= time_synchronized()
if not path:
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_AI.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
img_end=np.zeros((100,100),dtype=np.uint8);cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
EndUrl='%s/%s_frame-9999-9999_type-结束_9999999999999999_s-%s_OR.jpg'%(outImaDir,time_str(),streamName)
EndUrl = EndUrl.replace(' ','-').replace(':','-')
ret = cv2.imwrite(EndUrl,img_end)
if imageTxtFile:
EndUrl_txt = EndUrl.replace('.jpg','.txt')
fp_t=open(EndUrl_txt,'w');fp_t.write(EndUrl+'\n');fp_t.close()
#print(EndUrl,ret)
childCallback.send('####strem ends####')
if (outVideoDir!='NO') and video_flag:
vid_writer.release()
break###断流或者到终点
if outSource == 'NO':###如果不推流,则显示进度条
view_bar(iframe,totalcnt,time_beg ,parIn['process_uid'] )
###直播和离线都是1分钟发一次消息。直播发
time_kafka1 = time.time()
if time_kafka1 - time_kafka0 >60:
time_kafka0 = time_kafka1
###发送状态信息waiting
msg = copy.deepcopy(msg_dict_off);
msg['msg_id']= msgId; msg
if outSource == 'NO':
msg['progressbar']= '%.4f'%(iframe*1.0/totalcnt)
msg['type']=1
else:
msg['progressbarOn']= str(iframe)
msg['type']=2
msg = json.dumps(msg, ensure_ascii=False)
'''
try:
record_metadata = producer.send(kafka_par['topic'], msg).get()
outstr='%s processing send progressbar or heartBeat to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
except Exception as e:
outstr='#######kafka ERROR when processing sending progressbar or heartBeat:, error: %s'%(str(e))
wrtiteLog(fp_log,outstr);print( outstr);
try:
producer = KafkaProducer(bootstrap_servers=par['server'], value_serializer=lambda v: v.encode('utf-8')).get()
future = producer.send(par['topic'][2], msg).get()
except Exception as e:
outstr='%s re-send progressbar or heartBeat kafka,processing video or stream: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
wrtiteLog(fp_log,outstr);print( outstr);
'''
###发送状态信息waiting
outStrList['success']= '%s processing send progressbar or heartBeat to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
outStrList['failure']='#######kafka ERROR when processing sending progressbar or heartBeat'
outStrList['Refailure']='%s re-send progressbar or heartBeat kafka,processing video or stream: taskId:%s msgId:%s send:%s'%('-'*20,taskId, msgId,msg);
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
time0=time.time()
iframe +=1
time1=time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
timeseg0 = time.time()
seg_pred,segstr = segmodel.eval(im0s[0] )
timeseg1 = time.time()
t1= time_synchronized()
pred = model(img,augment=False)[0]
time4 = time.time()
datas = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe)
t2= time_synchronized()
#print('###line138:',timeOut,outSource,outVideoDir)
##每隔 fpsample帧处理一次,如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
parImage=save_problem_images(post_results,iframe,names,streamName=streamName,outImaDir='problems/images_tmp',imageTxtFile=imageTxtFile)
post_results=[]

if len(p_result[2] )>0: ##
post_results.append(p_result)
t3= time_synchronized()
image_array = p_result[1]
if outSource!='NO':
ppipe.stdin.write(image_array.tobytes())
if (outVideoDir!='NO') and video_flag:
ret = vid_writer.write(image_array)
t4= time_synchronized()
timestr2 = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
if iframe%100==0:
outstr='%s,,read:%.1f ms,copy:%.1f, infer:%.1f ms, detinfer:%.1f ms,draw:%.1f ms, save:%.1f ms total:%.1f ms \n'%(timestr2,(t0 - t00)*1000,(timeseg0-t0)*1000, (t1 - timeseg0)*1000,(t2-t1)*1000, (t3 - t2)*1000,(t4-t3)*1000, (t4-t00)*1000)
wrtiteLog(fp_log,outstr);
#print(outstr)
t00 = t4;
##模型加载之类的错误
#except Exception as e:
# print(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime()) ,'*'*20,'###line177 ERROR:',e)
# childCallback.send(e) #将异常通过管道送出
def lauch_process(gpuid,inSource,outSource,taskId,msgId,modelJson,kafka_par):

if outSource=='NO':
streamName='off-%s-%s'%(taskId,msgId)
else:
streamName='live-%s-%s'%(taskId,msgId)
dataPar ={
'imgData':'',
'imgName':'testW',
'streamName':streamName,
'taskId':taskId,
'msgId':msgId,
'device':str(gpuid),
'modelJson':modelJson,
'kafka_par':kafka_par,
}
#dataPar['inSource'] = 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4';dataPar['outSource'] = None
dataPar['inSource'] = inSource;dataPar['outSource'] = outSource
process_uid=''.join(random.sample(string.ascii_letters + string.digits, 16));dataPar['process_uid']=process_uid
parent_conn, child_conn = multiprocessing.Pipe();dataPar['callback']=child_conn
gpuProcess=Process(target=onlineModelProcess,name='process:%s'%( process_uid ),args=(dataPar,))
gpuProcess.start()
#print(dir(gpuProcess))
#child_return = parent_conn.recv()
#timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
#print(timestr2,'-'*20,'progress:%s ,msgId:%s , taskId:%s return:'%(process_uid,msgId,taskId),child_return)
return gpuProcess

msg_dict_offline = {
"biz_id":"hehuzhang",
"mod_id":"ai",
"msg_id":'bb'+''.join(random.sample(string.ascii_letters ,30) ) ,
"offering_id":"http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4",
"offering_type":"mp4",
"results_base_dir": "XJRW202203171535"+str(random.randint(10,99)),
'outSource':'NO'
}



def detector(par):
####初始化信息列表
consumer = KafkaConsumer(bootstrap_servers=par['server'],client_id='AI_server',group_id=par['group_id'],auto_offset_reset='earliest')
consumer.subscribe( par['topic'][0:2])
'''
client = KafkaClient(hosts=par['server'])
consumer_pys=[]
for topic_name in par['topic'][0:2]:
consumer_pys.append(client.topics[ topic_name ].get_simple_consumer(consumer_group=par['group_id'],timeout=30))
'''
kafka_par ={ 'server':par['server'],'topic':par['topic'][2] }
producer = KafkaProducer(
bootstrap_servers=par['server'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'),
metadata_max_age_ms=120000)
taskStatus={}
taskStatus['onLine'] = Queue(100)
taskStatus['offLine']= Queue(100)
taskStatus['pidInfos']= {}
fp_log=create_logFile(logdir=par['logDir'])
wrtiteLog(fp_log,'###########masster starts in line222######\n')
timeSleep=1
#taskStatus['pidInfos'][31897]={'gpuProcess':'onlineProcess','type':'onLine'}
time0=time.time()
time0_kafQuery=time.time()
time0_taskQuery=time.time()
time0_sleep=time.time()
time_interval=10; outStrList={}
while True:###每隔timeSleep秒,轮询一次
#for isleep in range(1):
'''
##1-读取kafka,更新任务类别
try:
msgs = getAllRecords(consumer,par['topic'])
except Exception as e:
outstr='%s kafka connecting error:%s '%('#'*20,e)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
time.sleep(timeSleep)
continue
#if get_whether_gpuProcess():
time0_kafQuery,printFlag = check_time_interval(time0_kafQuery,time_interval)
if printFlag:
outstr_kafka=' ##### kafka Left %d records####'%(len(msgs));
outstr_kafka=wrtiteLog(fp_log,outstr_kafka)
'''
for ii,msg in enumerate(consumer):
#for ii,msg in enumerate(msgs):
##读取消息
try:
taskInfos = eval(msg.value.decode('utf-8') )
except:
outstr='%s msg format error,value:%s,offset:%d partition:%s topic:%s'%('#'*20,msg.value,msg.offset,msg.topic,msg.topic)
continue
if msg.topic == par['topic'][0]: ##
taskInfos['inSource']= taskInfos['pull_channel'];
taskInfos['outSource']= get_push_address(taskInfos['push_channel']) ;
taskStatus['onLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)
###发送状态信息waiting
msg = create_status_msg(msg_dict_on,taskInfos,sts='waiting')
outStrList['success']= '%s read from kafka online task and back to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when read from kafka online task and back to kafka'
outStrList['Refailure']='##############kafka ERROR when read from kafka online task and resend back to kafka:'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
else:
taskInfos['inSource']= taskInfos['offering_id'];
taskInfos['outSource']= 'NO'
taskStatus['offLine'].put( taskInfos )
save_message(par['kafka'],taskInfos)

###发送状态信息waiting
msg = create_status_msg(msg_dict_off,taskInfos,sts='waiting')


outStrList['success']= '%s read from kafka offline task and back to kafka: taskId:%s msgId:%s send:%s'%('-'*20,taskInfos['results_base_dir'], taskInfos['msg_id'],msg)
outStrList['failure']='#######kkafka ERROR when read from kafka offline task and back to kafka:,'
outStrList['Refailure']='##############kafka ERROR when read from kafka offline task and resend back to kafka:'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );

time0_taskQuery,printFlag = check_time_interval(time0_taskQuery,time_interval)
outstr_task= ' task queue onLine cnt:%d offLine:%d'%(taskStatus['onLine'].qsize(), taskStatus['offLine'].qsize())
##2-更新显卡信息
gpuStatus = getGPUInfos()
##3-优先考虑在线任务
if not taskStatus['onLine'].empty():
###3.1-先判断有没有空闲显卡:
cuda = get_available_gpu(gpuStatus)
###获取在线任务信息,并执行,lauch process
taskInfos = taskStatus['onLine'].get()
print('################396',cuda)
if cuda: ###3.1.1 -有空余显卡
#lauch process
msg= copy.deepcopy(msg_dict_on);

gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}
else:###3.1.2-没有显卡
##判断有没有显卡上面都是离线进程的
cuda_pid = get_potential_gpu(gpuStatus,taskStatus['pidInfos'])
if cuda_pid:#3.1.2.1 - ##如果有可以杀死的进程
cuda = cuda_pid['cuda']
pids = cuda_pid['pids']
##kill 离线进程,并更新离线任务表
cnt_off_0 = taskStatus['offLine'].qsize()
for pid in pids:
##kill 离线进程
taskStatus['pidInfos'][pid]['gpuProcess'].kill()
##更新离线任务表
taskStatus['offLine'].put( taskStatus['pidInfos'][pid]['taskInfos'] )
taskInfos_off=taskStatus['pidInfos'][pid]['taskInfos']
##发送离线数据,说明状态变成waiting
msg= msg_dict_off;
msg=update_json(taskInfos_off,msg,offkeys=["msg_id","biz_id" ,"mod_id"] )
msg['results'][0]['original_url']=taskInfos_off['inSource']
msg['results'][0]['sign_url']=get_boradcast_address(taskInfos_off['outSource'])
msg['status']='waiting'
msg = json.dumps(msg, ensure_ascii=False)

outStrList['success']= '%s start online task after kill offline tasks and back to kafka: pid:%d taskId:%s msgId:%s send:%s'%('-'*20,gpuProcess.pid,taskInfos_off['results_base_dir'], taskInfos_off['msg_id'],msg)
outStrList['failure']='#######kafka ERROR when start online task after kill offline tasks and back to kafka'
outStrList['Refailure']='##############kkafka ERROR when start online task after kill offline tasks and back to kafka'
send_kafka(producer,kafka_par,msg,outStrList,fp_log );
cnt_off_1 = taskStatus['offLine'].qsize()
outstr='%s before killing process, offtask cnt:%d ,after killing, offtask cnt:%d %s'%('-'*20 ,cnt_off_0,cnt_off_1,'*'*20)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
###更新pidinfos,update pidInfos
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'onLine','taskInfos':taskInfos}

else:
outstr='######No available GPUs for onLine####'
outstr=wrtiteLog(fp_log,outstr);print( outstr);
##4-更新显卡信息
gpuStatus = getGPUInfos()
##5-考虑离线任务
if not taskStatus['offLine'].empty():
cudaArrange= arrange_offlineProcess(gpuStatus,taskStatus['pidInfos'],modelMemory=1500)
outstr='###line342 IN OFF LINE TASKS available cudas:%s'%(cudaArrange)
outstr=wrtiteLog(fp_log,outstr);print( outstr);
for cuda in cudaArrange:
if not taskStatus['offLine'].empty():
taskInfos = taskStatus['offLine'].get()
gpuProcess=lauch_process(cuda,taskInfos['inSource'],taskInfos['outSource'],taskInfos['results_base_dir'],taskInfos['msg_id'],par['modelJson'],kafka_par)
taskStatus['pidInfos'][gpuProcess.pid] = {'gpuProcess':gpuProcess,'type':'offLine','taskInfos':taskInfos}
if get_whether_gpuProcess():
time0_sleep,printFlag = check_time_interval(time0_sleep,time_interval)
if printFlag:
timestr2=time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime())
outstr= timestr2 + '*'*20 +'sleep '+'*'*20;
outstr=wrtiteLog(fp_log,outstr);print( outstr);
outstr_task=wrtiteLog(fp_log,outstr_task);print( outstr_task);
time.sleep(timeSleep)
print('########sleep 1s #####')
print('########Program End#####')

if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线

#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';
#101.132.127.1:19092
'''
par['server']='101.132.127.1:19092 ';par['topic']=('alg-online-tasks','alg-offline-tasks','alg-task-results');par['group_id']='test';
par['kafka']='mintors/kafka'
par['modelJson']='conf/model.json'
'''
masterFile="conf/master.json"
assert os.path.exists(masterFile)
with open(masterFile,'r') as fp:
data=json.load(fp)
par=data['par']
print(par)
detector(par)




+ 1
- 0
code_bak/source.txt View File

@@ -0,0 +1 @@
rtmp://demoplay.yunhengzhizao.cn/live/THSA_HD5M 1935

+ 76
- 0
code_bak/source_query.py View File

@@ -0,0 +1,76 @@

import cv2
import base64
import io,os,sys
sys.path.append('/home/thsw2/WJ/src/yolov5/utils')
from get_offline_url import update_websource_offAndLive
import requests
import time,json
import string,random
def SendPost(txtSource):

api = 'http://212.129.223.66:8888/detector'
filename='THWangJin'
for info in txtSource:
input_ ={
'imgData':'',
'imgName':filename
}
if 'live' in info['name']:
input_['outSource'] = "rtmp://127.0.0.1:%s/live/test"%(info['port'])
else:
input_['outSource'] = None
input_['inSource'] = info['url']
input_['streamName'] = info['name']

time0=time.time()
response=requests.post(api,json=input_).json()
time1=time.time()
print('###source_query:',response,'time:%.3f s'%(time1-time0))
if 'live' in info['name']:
print('#####process online video stream######')
break
def getLogFp(pollLogFile):
#print(pollLogFile)
#print(os.path.pardir(pollLogFile))
if not os.path.exists(pollLogFile):
os.makedirs(os.path.dirname(pollLogFile),exist_ok=True)
fp_log = open(pollLogFile,'w')
else:
fp_log = open(pollLogFile,'a+')
return fp_log

if __name__=='__main__':

##建立日志文件
pollLogFile='logs/poll/poll.txt'
fp_log = getLogFp(pollLogFile)

platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo'
sourceFile='/home/thsw2/WJ/src/yolov5/config/source.txt'
offlineFile='/home/thsw2/WJ/src/yolov5/mintors/offlines/doneCodes.txt'
txtSource=update_websource_offAndLive(platform_query_url,sourceFile,offlineFile)
txtSource=[
#{'url': 'rtmp://demoplay.yunhengzhizao.cn/live/THSA_HD5M', 'port': '1935', 'name': 'live-THSAHD5M'}]
{'url': 'http://images.5gai.taauav.com/video/DJI_20220111115516_0001_ZBJ.MP4', 'port': '1935', 'name': 'off-202202190001'},
{'url': 'http://images.5gai.taauav.com/video/8bc32984dd893930dabb2856eb92b4d1.mp4', 'port': '1935', 'name': 'off-202202190002'},
{'url': 'http://images.5gai.taauav.com/video/DJI_20220110145546_0003_W.MP4', 'port': '1935', 'name': 'off-202202190002'}
]
print(txtSource)
SendPost(txtSource)

+ 30
- 0
code_bak/test_multiprocess.py View File

@@ -0,0 +1,30 @@
import multiprocessing

def heavy_load_func(N, child_conn):
'''function do heavy computing'''
try:

#do_some_heavy_computing
return_value=99
fp=open('wangjin.txt','r')
fp.close()
child_conn.send(return_value) #return something
except Exception as e:
child_conn.send(e) #将异常通过管道送出

if __name__=='__main__':
'''main function'''
try:
parent_conn, child_conn = multiprocessing.Pipe()
child_process = multiprocessing.Process(target=heavy_load_func, args=(10, child_conn))
child_process.start()
#child_process.join()
child_return = parent_conn.recv()

print('#####main try:',child_return,type(child_return),dir(child_return),child_return.args)
print(str( child_return ))
except Exception as e:
print('####main exception:',e)

+ 16
- 0
conf/bak/model_5class.json View File

@@ -0,0 +1,16 @@
{

"gpu_process":{"det_weights":"../yolov5/weights/best_5classes.pt","seg_nclass":2,"seg_weights": "../yolov5/weights/segmentation/BiSeNet/checkpoint.pth" },

"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"../yolov5/config/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" },

"push_process":{ "OutVideoW":1920, "OutVideoH":1080 },
"AI_video_save": {"onLine":false,"offLine":true },
"imageTxtFile":true,
"logChildProcessOffline":"logs/logChildProcess/offline",
"logChildProcessOnline":"logs/logChildProcess/online",
"StreamWaitingTime":240,
"StreamRecoveringTime":180


}

+ 16
- 0
conf/bak/model_9class.json View File

@@ -0,0 +1,16 @@
{

"gpu_process":{"det_weights":"../weights/yolov5/class9/weights/best.pt","seg_nclass":2,"seg_weights": "../yolov5/weights/segmentation/BiSeNet/checkpoint.pth" },

"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"../weights/yolov5/class9/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" },

"push_process":{ "OutVideoW":1920, "OutVideoH":1080 },
"AI_video_save": {"onLine":false,"offLine":true },
"imageTxtFile":true,
"logChildProcessOffline":"logs/logChildProcess/offline",
"logChildProcessOnline":"logs/logChildProcess/online",
"StreamWaitingTime":240,
"StreamRecoveringTime":180


}

+ 6
- 0
conf/errorDic.json View File

@@ -0,0 +1,6 @@
{
"101":"video uploading failure",
"102":"Stream or video ERROR",
"":

}

+ 14
- 0
conf/master.json View File

@@ -0,0 +1,14 @@
{
"par":{
"server1":"212.129.223.66:19092",
"server2":"101.132.127.1:19092",
"server":"192.168.11.242:9092",
"topic": ["dsp-alg-online-tasks","dsp-alg-offline-tasks","dsp-alg-task-results"],
"group_id":"testWw",
"kafka":"mintors/kafka",
"modelJson":"conf/model.json",
"logDir":"logs/master",
"StreamWaitingTime":240,
"logPrintInterval":60
}
}

+ 17
- 0
conf/model.json View File

@@ -0,0 +1,17 @@
{

"gpu_process":{"det_weights":"weights/yolov5/class5/best_5classes.pt","seg_nclass":2,"seg_weights": "weights/BiSeNet/checkpoint.pth" },

"post_process":{ "name":"post_process","conf_thres":0.25,"iou_thres":0.45,"classes":5,"labelnames":"weights/yolov5/class5/labelnames.json","fpsample":240,"debug":false , "rainbows":[ [0,0,255],[0,255,0],[255,0,0],[255,0,255],[255,255,0],[255,129,0],[255,0,127],[127,255,0],[0,255,127],[0,127,255],[127,0,255],[255,127,255],[255,255,127],[127,255,255],[0,255,255],[255,127,255],[127,255,255], [0,127,0],[0,0,127],[0,255,255]],"outImaDir":"problems/images_tmp","outVideoDir":"problems/videos_save" },

"push_process":{ "OutVideoW":1920, "OutVideoH":1080 },
"AI_video_save": {"onLine":false,"offLine":true },
"imageTxtFile":true,
"logChildProcessOffline":"logs/logChildProcess/offline",
"logChildProcessOnline":"logs/logChildProcess/online",
"TaskStatusQueryUrl":"http://192.168.11.241:1011/api/web/serviceInst",
"StreamWaitingTime":240,
"StreamRecoveringTime":600


}

BIN
conf/platech.ttf View File


+ 20
- 0
conf/send_oss.json View File

@@ -0,0 +1,20 @@
{
"indir":"problems/images_tmp",
"outdir":"problems/images_save",
"jsonDir" : "mintors/kafka/",
"hearBeatTimeMs":30,
"logdir":"logs/send",
"videoBakDir":"problems/videos_save",
"ossPar":{"Epoint":"http://oss-cn-shanghai.aliyuncs.com",
"AId":"LTAI5tSJ62TLMUb4SZuf285A",
"ASt":"MWYynm30filZ7x0HqSHlU3pdLVNeI7",
"bucketName":"ta-tech-image"
},
"vodPar":{
"AId":"LTAI5tE7KWN9fsuGU7DyfYF4",
"ASt":"yPPCyfsqWgrTuoz5H4sisY0COclx8E"
},
"kafkaPar":{"boostServer":["192.168.11.242:9092"] ,"boostServer2":["101.132.127.1:19092"], "boostServer3":["212.129.223.66:19092"] ,"topic":"dsp-alg-task-results"},
"labelnamesFile":"weights/yolov5/class5/labelnames.json"

}

+ 68
- 0
consumer2.py View File

@@ -0,0 +1,68 @@
import traceback
from kafka import KafkaProducer, KafkaConsumer,TopicPartition
from kafka.errors import kafka_errors
import json
def get_left_cnt(consumer,topic):
partitions = [TopicPartition(topic, p) for p in consumer.partitions_for_topic(topic)]

# total
toff = consumer.end_offsets(partitions)
toff = [(key.partition, toff[key]) for key in toff.keys()]
toff.sort()
# current
coff = [(x.partition, consumer.committed(x)) for x in partitions]
coff.sort()

# cal sum and left
toff_sum = sum([x[1] for x in toff])
cur_sum = sum([x[1] for x in coff if x[1] is not None])
left_sum = toff_sum - cur_sum

return left_sum
def getAllRecords(consumer,topics):
leftCnt = 0
for topic in topics[0:2]:
leftCnt+=get_left_cnt(consumer,topic)
out = []
if leftCnt == 0:
return []
for ii,msg in enumerate(consumer):
consumer.commit()
out.append(msg)
if ii== (leftCnt-1):
break###断流或者到终点
return out
def detector(par):
consumer = KafkaConsumer(
bootstrap_servers=par['server'],
group_id=par['group_id'],
auto_offset_reset='earliest',
#auto_offset_reset='latest',
#isolation_level = 'read_committed',
#enable_auto_commit=True
)
consumer.subscribe( par['topic'][0:2])
print( ' Start kafka ')
#msgs = getAllRecords(consumer,par['topic'])
#print( 'getover cnt',len(msgs))
#for ii,msg in enumerate(msgs):
for ii,msg in enumerate(consumer):
print(msg)
try:
print('##'*10,ii)
taskInfos = eval(msg.value.decode('utf-8'))
print(taskInfos )
except:
print('**'*10,'wrong',ii)
print(msg.value.decode('utf-8'))
if __name__ == '__main__':
par={};
###topic0--在线,topic1--离线

#par['server']='212.129.223.66:9092';par['topic']=('thsw','thsw2','testReturn');par['group_id']='test';

par['server']='192.168.11.242:9092';par['topic']=('alg-online-tasks', 'alg-task-results','alg-offline-tasks');par['group_id']='testww2';
#par['server']='101.132.127.1:19092';par['topic']=('alg-online-tasks','alg-task-results','alg-offline-tasks');par['group_id']='testW11';
par['kafka']='mintors/kafka'
detector(par)

+ 10
- 0
create.sh View File

@@ -0,0 +1,10 @@
mkdir logs/send/ -p
mkdir logs/logChildProcess logs/master
mkdir logs/logChildProcess/offline logs/logChildProcess/online
touch logs/logChildProcess/offline/gpuprocess.log
touch logs/logChildProcess/online/gpuprocess.log
touch logs/master/detector.log
touch logs/send/SendPics.log
mkdir mintors/kafka -p
mkdir problems/images_save -p
mkdir problems/images_tmp problems/videos_save

+ 0
- 0
debut.txt View File


+ 2
- 0
detect.sh View File

@@ -0,0 +1,2 @@
python master_temp.py&
python Send_tranfer_oss.py&

+ 3019
- 0
logs/logChildProcess/offline/gpuprocess.log
File diff suppressed because it is too large
View File


+ 15798
- 0
logs/logChildProcess/online/gpuprocess.log
File diff suppressed because it is too large
View File


+ 6480
- 0
logs/master/detector.log
File diff suppressed because it is too large
View File


+ 11133
- 0
logs/send/SendPics.log
File diff suppressed because it is too large
View File


+ 2667
- 0
master.log
File diff suppressed because it is too large
View File


+ 0
- 0
models/__init__.py View File


BIN
models/__pycache__/__init__.cpython-37.pyc View File


BIN
models/__pycache__/__init__.cpython-38.pyc View File


BIN
models/__pycache__/common.cpython-37.pyc View File


BIN
models/__pycache__/common.cpython-38.pyc View File


BIN
models/__pycache__/experimental.cpython-37.pyc View File


BIN
models/__pycache__/experimental.cpython-38.pyc View File


BIN
models/__pycache__/yolo.cpython-38.pyc View File


+ 405
- 0
models/common.py View File

@@ -0,0 +1,405 @@
# YOLOv5 common modules

import math
from copy import copy
from pathlib import Path

import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp

from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh
from utils.plots import color_list, plot_one_box
from utils.torch_utils import time_synchronized

import warnings

class SPPF(nn.Module):
# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
super().__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * 4, c2, 1, 1)
self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)

def forward(self, x):
x = self.cv1(x)
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
y1 = self.m(x)
y2 = self.m(y1)
return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))


def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p


def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)


class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())

def forward(self, x):
return self.act(self.bn(self.conv(x)))

def fuseforward(self, x):
return self.act(self.conv(x))


class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)

def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x


class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2

def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e

x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x


class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2

def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])

def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))


class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])

def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))


class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)


class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])

def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))


class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)

def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))


class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain

def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)


class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain

def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)


class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension

def forward(self, x):
return torch.cat(x, self.d)


class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class

def __init__(self):
super(NMS, self).__init__()

def forward(self, x):
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)


class autoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class

def __init__(self, model):
super(autoShape, self).__init__()
self.model = model.eval()

def autoshape(self):
print('autoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self

@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images

t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference

# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())

with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())

# Post-process
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])

t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)


class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape

def display(self, pprint=False, show=False, save=False, render=False, save_dir=''):
colors = color_list()
for i, (img, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
plot_one_box(box, img, label=label, color=colors[int(cls) % 10])
img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np
if pprint:
print(str.rstrip(', '))
if show:
img.show(self.files[i]) # show
if save:
f = self.files[i]
img.save(Path(save_dir) / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(img)

def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)

def show(self):
self.display(show=True) # show results

def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir
Path(save_dir).mkdir(parents=True, exist_ok=True)
self.display(save=True, save_dir=save_dir) # save results

def render(self):
self.display(render=True) # render results
return self.imgs

def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new

def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x

def __len__(self):
return self.n


class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()

def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2)

+ 134
- 0
models/experimental.py View File

@@ -0,0 +1,134 @@
# YOLOv5 experimental modules

import numpy as np
import torch
import torch.nn as nn

from models.common import Conv, DWConv
from utils.google_utils import attempt_download


class CrossConv(nn.Module):
# Cross Convolution Downsample
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
super(CrossConv, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, (1, k), (1, s))
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
self.add = shortcut and c1 == c2

def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


class Sum(nn.Module):
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
def __init__(self, n, weight=False): # n: number of inputs
super(Sum, self).__init__()
self.weight = weight # apply weights boolean
self.iter = range(n - 1) # iter object
if weight:
self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights

def forward(self, x):
y = x[0] # no weight
if self.weight:
w = torch.sigmoid(self.w) * 2
for i in self.iter:
y = y + x[i + 1] * w[i]
else:
for i in self.iter:
y = y + x[i + 1]
return y


class GhostConv(nn.Module):
# Ghost Convolution https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
super(GhostConv, self).__init__()
c_ = c2 // 2 # hidden channels
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)

def forward(self, x):
y = self.cv1(x)
return torch.cat([y, self.cv2(y)], 1)


class GhostBottleneck(nn.Module):
# Ghost Bottleneck https://github.com/huawei-noah/ghostnet
def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
super(GhostBottleneck, self).__init__()
c_ = c2 // 2
self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()

def forward(self, x):
return self.conv(x) + self.shortcut(x)


class MixConv2d(nn.Module):
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
super(MixConv2d, self).__init__()
groups = len(k)
if equal_ch: # equal c_ per group
i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
else: # equal weight.numel() per group
b = [c2] + [0] * groups
a = np.eye(groups + 1, groups, k=-1)
a -= np.roll(a, 1, axis=1)
a *= np.array(k) ** 2
a[0] = 1
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b

self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
self.bn = nn.BatchNorm2d(c2)
self.act = nn.LeakyReLU(0.1, inplace=True)

def forward(self, x):
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))


class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()

def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output


def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model

# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility

if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble

+ 123
- 0
models/export.py View File

@@ -0,0 +1,123 @@
"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats

Usage:
$ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1
"""

import argparse
import sys
import time

sys.path.append('./') # to run '$ python *.py' files in subdirectories

import torch
import torch.nn as nn

import models
from models.experimental import attempt_load
from utils.activations import Hardswish, SiLU
from utils.general import colorstr, check_img_size, check_requirements, set_logging
from utils.torch_utils import select_device

if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
parser.add_argument('--grid', action='store_true', help='export Detect() layer grid')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only
parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only
opt = parser.parse_args()
opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand
print(opt)
set_logging()
t = time.time()

# Load PyTorch model
device = select_device(opt.device)
model = attempt_load(opt.weights, map_location=device) # load FP32 model
labels = model.names

# Checks
gs = int(max(model.stride)) # grid size (max stride)
opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples

# Input
img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection

# Update model
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if isinstance(m, models.common.Conv): # assign export-friendly activations
if isinstance(m.act, nn.Hardswish):
m.act = Hardswish()
elif isinstance(m.act, nn.SiLU):
m.act = SiLU()
# elif isinstance(m, models.yolo.Detect):
# m.forward = m.forward_export # assign forward (optional)
model.model[-1].export = not opt.grid # set Detect() layer grid export
y = model(img) # dry run

# TorchScript export -----------------------------------------------------------------------------------------------
prefix = colorstr('TorchScript:')
try:
print(f'\n{prefix} starting export with torch {torch.__version__}...')
f = opt.weights.replace('.pt', '.torchscript.pt') # filename
ts = torch.jit.trace(model, img, strict=False)
ts.save(f)
print(f'{prefix} export success, saved as {f}')
except Exception as e:
print(f'{prefix} export failure: {e}')

# ONNX export ------------------------------------------------------------------------------------------------------
prefix = colorstr('ONNX:')
try:
import onnx

print(f'{prefix} starting export with onnx {onnx.__version__}...')
f = opt.weights.replace('.pt', '.onnx') # filename
torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'],
output_names=['classes', 'boxes'] if y is None else ['output'],
dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640)
'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None)

# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# print(onnx.helper.printable_graph(model_onnx.graph)) # print

# Simplify
if opt.simplify:
try:
check_requirements(['onnx-simplifier'])
import onnxsim

print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
model_onnx, check = onnxsim.simplify(model_onnx,
dynamic_input_shape=opt.dynamic,
input_shapes={'images': list(img.shape)} if opt.dynamic else None)
assert check, 'assert check failed'
onnx.save(model_onnx, f)
except Exception as e:
print(f'{prefix} simplifier failure: {e}')
print(f'{prefix} export success, saved as {f}')
except Exception as e:
print(f'{prefix} export failure: {e}')

# CoreML export ----------------------------------------------------------------------------------------------------
prefix = colorstr('CoreML:')
try:
import coremltools as ct

print(f'{prefix} starting export with coremltools {onnx.__version__}...')
# convert model from torchscript and apply pixel scaling as per detect.py
model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])])
f = opt.weights.replace('.pt', '.mlmodel') # filename
model.save(f)
print(f'{prefix} export success, saved as {f}')
except Exception as e:
print(f'{prefix} export failure: {e}')

# Finish
print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.')

+ 58
- 0
models/hub/anchors.yaml View File

@@ -0,0 +1,58 @@
# Default YOLOv5 anchors for COCO data


# P5 -------------------------------------------------------------------------------------------------------------------
# P5-640:
anchors_p5_640:
- [ 10,13, 16,30, 33,23 ] # P3/8
- [ 30,61, 62,45, 59,119 ] # P4/16
- [ 116,90, 156,198, 373,326 ] # P5/32


# P6 -------------------------------------------------------------------------------------------------------------------
# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
anchors_p6_640:
- [ 9,11, 21,19, 17,41 ] # P3/8
- [ 43,32, 39,70, 86,64 ] # P4/16
- [ 65,131, 134,130, 120,265 ] # P5/32
- [ 282,180, 247,354, 512,387 ] # P6/64

# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
anchors_p6_1280:
- [ 19,27, 44,40, 38,94 ] # P3/8
- [ 96,68, 86,152, 180,137 ] # P4/16
- [ 140,301, 303,264, 238,542 ] # P5/32
- [ 436,615, 739,380, 925,792 ] # P6/64

# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
anchors_p6_1920:
- [ 28,41, 67,59, 57,141 ] # P3/8
- [ 144,103, 129,227, 270,205 ] # P4/16
- [ 209,452, 455,396, 358,812 ] # P5/32
- [ 653,922, 1109,570, 1387,1187 ] # P6/64


# P7 -------------------------------------------------------------------------------------------------------------------
# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
anchors_p7_640:
- [ 11,11, 13,30, 29,20 ] # P3/8
- [ 30,46, 61,38, 39,92 ] # P4/16
- [ 78,80, 146,66, 79,163 ] # P5/32
- [ 149,150, 321,143, 157,303 ] # P6/64
- [ 257,402, 359,290, 524,372 ] # P7/128

# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
anchors_p7_1280:
- [ 19,22, 54,36, 32,77 ] # P3/8
- [ 70,83, 138,71, 75,173 ] # P4/16
- [ 165,159, 148,334, 375,151 ] # P5/32
- [ 334,317, 251,626, 499,474 ] # P6/64
- [ 750,326, 534,814, 1079,818 ] # P7/128

# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
anchors_p7_1920:
- [ 29,34, 81,55, 47,115 ] # P3/8
- [ 105,124, 207,107, 113,259 ] # P4/16
- [ 247,238, 222,500, 563,227 ] # P5/32
- [ 501,476, 376,939, 749,711 ] # P6/64
- [ 1126,489, 801,1222, 1618,1227 ] # P7/128

+ 51
- 0
models/hub/yolov3-spp.yaml View File

@@ -0,0 +1,51 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# darknet53 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [32, 3, 1]], # 0
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
[-1, 1, Bottleneck, [64]],
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
[-1, 2, Bottleneck, [128]],
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
[-1, 8, Bottleneck, [256]],
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
[-1, 8, Bottleneck, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
[-1, 4, Bottleneck, [1024]], # 10
]

# YOLOv3-SPP head
head:
[[-1, 1, Bottleneck, [1024, False]],
[-1, 1, SPP, [512, [5, 9, 13]]],
[-1, 1, Conv, [1024, 3, 1]],
[-1, 1, Conv, [512, 1, 1]],
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)

[-2, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 8], 1, Concat, [1]], # cat backbone P4
[-1, 1, Bottleneck, [512, False]],
[-1, 1, Bottleneck, [512, False]],
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)

[-2, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P3
[-1, 1, Bottleneck, [256, False]],
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)

[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 41
- 0
models/hub/yolov3-tiny.yaml View File

@@ -0,0 +1,41 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,14, 23,27, 37,58] # P4/16
- [81,82, 135,169, 344,319] # P5/32

# YOLOv3-tiny backbone
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [16, 3, 1]], # 0
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
[-1, 1, Conv, [32, 3, 1]],
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
[-1, 1, Conv, [64, 3, 1]],
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
[-1, 1, Conv, [128, 3, 1]],
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
[-1, 1, Conv, [256, 3, 1]],
[-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
[-1, 1, Conv, [512, 3, 1]],
[-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
[-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
]

# YOLOv3-tiny head
head:
[[-1, 1, Conv, [1024, 3, 1]],
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)

[-2, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 8], 1, Concat, [1]], # cat backbone P4
[-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)

[[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
]

+ 51
- 0
models/hub/yolov3.yaml View File

@@ -0,0 +1,51 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# darknet53 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [32, 3, 1]], # 0
[-1, 1, Conv, [64, 3, 2]], # 1-P1/2
[-1, 1, Bottleneck, [64]],
[-1, 1, Conv, [128, 3, 2]], # 3-P2/4
[-1, 2, Bottleneck, [128]],
[-1, 1, Conv, [256, 3, 2]], # 5-P3/8
[-1, 8, Bottleneck, [256]],
[-1, 1, Conv, [512, 3, 2]], # 7-P4/16
[-1, 8, Bottleneck, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
[-1, 4, Bottleneck, [1024]], # 10
]

# YOLOv3 head
head:
[[-1, 1, Bottleneck, [1024, False]],
[-1, 1, Conv, [512, [1, 1]]],
[-1, 1, Conv, [1024, 3, 1]],
[-1, 1, Conv, [512, 1, 1]],
[-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)

[-2, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 8], 1, Concat, [1]], # cat backbone P4
[-1, 1, Bottleneck, [512, False]],
[-1, 1, Bottleneck, [512, False]],
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)

[-2, 1, Conv, [128, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P3
[-1, 1, Bottleneck, [256, False]],
[-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)

[[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 42
- 0
models/hub/yolov5-fpn.yaml View File

@@ -0,0 +1,42 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, Bottleneck, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, BottleneckCSP, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, BottleneckCSP, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 6, BottleneckCSP, [1024]], # 9
]

# YOLOv5 FPN head
head:
[[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large)

[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 1, Conv, [512, 1, 1]],
[-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium)

[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 1, Conv, [256, 1, 1]],
[-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small)

[[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 54
- 0
models/hub/yolov5-p2.yaml View File

@@ -0,0 +1,54 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors: 3

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
[ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 9
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 13

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)

[ -1, 1, Conv, [ 128, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2
[ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall)

[ -1, 1, Conv, [ 128, 3, 2 ] ],
[ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3
[ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large)

[ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5)
]

+ 56
- 0
models/hub/yolov5-p6.yaml View File

@@ -0,0 +1,56 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors: 3

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 11
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 15

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 19

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge)

[ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

+ 67
- 0
models/hub/yolov5-p7.yaml View File

@@ -0,0 +1,67 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors: 3

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 3, C3, [ 1024 ] ],
[ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128
[ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ],
[ -1, 3, C3, [ 1280, False ] ], # 13
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 1024, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6
[ -1, 3, C3, [ 1024, False ] ], # 17

[ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 21

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 25

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge)

[ -1, 1, Conv, [ 1024, 3, 2 ] ],
[ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7
[ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge)

[ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7)
]

+ 48
- 0
models/hub/yolov5-panet.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, BottleneckCSP, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, BottleneckCSP, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, BottleneckCSP, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, BottleneckCSP, [1024, False]], # 9
]

# YOLOv5 PANet head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, BottleneckCSP, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 60
- 0
models/hub/yolov5l6.yaml View File

@@ -0,0 +1,60 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [ 19,27, 44,40, 38,94 ] # P3/8
- [ 96,68, 86,152, 180,137 ] # P4/16
- [ 140,301, 303,264, 238,542 ] # P5/32
- [ 436,615, 739,380, 925,792 ] # P6/64

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 11
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 15

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 19

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)

[ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

+ 60
- 0
models/hub/yolov5m6.yaml View File

@@ -0,0 +1,60 @@
# parameters
nc: 80 # number of classes
depth_multiple: 0.67 # model depth multiple
width_multiple: 0.75 # layer channel multiple

# anchors
anchors:
- [ 19,27, 44,40, 38,94 ] # P3/8
- [ 96,68, 86,152, 180,137 ] # P4/16
- [ 140,301, 303,264, 238,542 ] # P5/32
- [ 436,615, 739,380, 925,792 ] # P6/64

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 11
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 15

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 19

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)

[ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

+ 48
- 0
models/hub/yolov5s-transformer.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module
]

# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 60
- 0
models/hub/yolov5s6.yaml View File

@@ -0,0 +1,60 @@
# parameters
nc: 80 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple

# anchors
anchors:
- [ 19,27, 44,40, 38,94 ] # P3/8
- [ 96,68, 86,152, 180,137 ] # P4/16
- [ 140,301, 303,264, 238,542 ] # P5/32
- [ 436,615, 739,380, 925,792 ] # P6/64

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 11
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 15

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 19

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)

[ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

+ 60
- 0
models/hub/yolov5x6.yaml View File

@@ -0,0 +1,60 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.33 # model depth multiple
width_multiple: 1.25 # layer channel multiple

# anchors
anchors:
- [ 19,27, 44,40, 38,94 ] # P3/8
- [ 96,68, 86,152, 180,137 ] # P4/16
- [ 140,301, 303,264, 238,542 ] # P5/32
- [ 436,615, 739,380, 925,792 ] # P6/64

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2
[ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
[ -1, 3, C3, [ 128 ] ],
[ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
[ -1, 9, C3, [ 256 ] ],
[ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
[ -1, 9, C3, [ 512 ] ],
[ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32
[ -1, 3, C3, [ 768 ] ],
[ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64
[ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ],
[ -1, 3, C3, [ 1024, False ] ], # 11
]

# YOLOv5 head
head:
[ [ -1, 1, Conv, [ 768, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5
[ -1, 3, C3, [ 768, False ] ], # 15

[ -1, 1, Conv, [ 512, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
[ -1, 3, C3, [ 512, False ] ], # 19

[ -1, 1, Conv, [ 256, 1, 1 ] ],
[ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
[ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
[ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small)

[ -1, 1, Conv, [ 256, 3, 2 ] ],
[ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4
[ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium)

[ -1, 1, Conv, [ 512, 3, 2 ] ],
[ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5
[ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large)

[ -1, 1, Conv, [ 768, 3, 2 ] ],
[ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6
[ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge)

[ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6)
]

+ 277
- 0
models/yolo.py View File

@@ -0,0 +1,277 @@
# YOLOv5 YOLO-specific modules

import argparse
import logging
import sys
from copy import deepcopy

sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)

from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr

try:
import thop # for FLOPS computation
except ImportError:
thop = None


class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export

def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv

def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()

if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)

y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))

return x if self.training else (torch.cat(z, 1), x)

@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()


class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict

# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])

# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())

# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')

def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train

def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers

if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))

x = m(x) # run
y.append(x if m.i in self.save else None) # save output

if profile:
print('%.1fms total' % sum(dt))
return x

def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)

def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))

# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights

def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self

def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self

def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m

def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)


def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)

layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass

n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)

args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]

m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)

# Create model
model = Model(opt.cfg).to(device)
model.train()

# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)

# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard

+ 48
- 0
models/yolov5l.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.0 # model depth multiple
width_multiple: 1.0 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
]

# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 48
- 0
models/yolov5m.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 0.67 # model depth multiple
width_multiple: 0.75 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
]

# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 48
- 0
models/yolov5s.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 0.33 # model depth multiple
width_multiple: 0.50 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
]

# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 48
- 0
models/yolov5x.yaml View File

@@ -0,0 +1,48 @@
# parameters
nc: 80 # number of classes
depth_multiple: 1.33 # model depth multiple
width_multiple: 1.25 # layer channel multiple

# anchors
anchors:
- [10,13, 16,30, 33,23] # P3/8
- [30,61, 62,45, 59,119] # P4/16
- [116,90, 156,198, 373,326] # P5/32

# YOLOv5 backbone
backbone:
# [from, number, module, args]
[[-1, 1, Focus, [64, 3]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 9, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 1, SPP, [1024, [5, 9, 13]]],
[-1, 3, C3, [1024, False]], # 9
]

# YOLOv5 head
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13

[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)

[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)

[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)

[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]

+ 303
- 0
oss.py View File

@@ -0,0 +1,303 @@
from PIL import Image
import numpy as np
import cv2
import base64
import io,os
import requests
import time,json
import string,random
import glob,string,sys
from multiprocessing import Process,Queue
import oss2
from kafka import KafkaProducer, KafkaConsumer
##for CeKanYuan
#10月21日,通过图像名称判断,是那个平台。方式不好。
#10月22日,改成访问固定的地址,从地址中读取,平台的名称与地址。每隔2分钟访问一次。
#3月18日,采用OSS阿里云存储桶
#platform_query_url='http://47.96.182.154:9051/api/suanfa/getPlatformInfo'
platform_query_url='SendLog/platformQuery.json'
api = 'http://121.40.249.52:9050/api/taskFile/submitUAVKHQuestion'
#api = 'http://47.98.157.120:9040/api/taskFile/submitUAVKHQuestion'

##这套名字,是联通的。
name_dic={
"排口":"入河、湖排口",
"疑似污口": "入河、湖排口",
"水生植被": "水生植物",
"漂浮物": "有大面积漂物",
"结束": "结束",
'其它' :'其它'
}
## for TH river
##这套代码是河长制度的。
nameID_dic={
"排口":'00000',
"疑似污口": '8378',
"水生植被": '8380',
"漂浮物": '8368',
"结束":'9999',
'其它':'8888'
}

def get_time(filename):
#2021-10-09-11-44-51_frame-598-720_type-水生植被.jpg
sps=filename.strip().split('_')[0]
tsps=sps.split('-')
return '%s-%s-%s %s:%s:%s'%(tsps[0],tsps[1],tsps[2],tsps[3],tsps[4],tsps[5])
def get_ms(time0,time1):
str_time ='%.2f ms'%((time1-time0)*1000)
return str_time

def get_urls( platform_query_url,fp_log ):
try:
if os.path.exists(platform_query_url):
#print('###line49')
with open('SendLog/platformQuery.json','r') as fp:
res = json.load(fp)
else:
res = requests.get(platform_query_url,timeout=10).json()
#print('###line54')
questionUrl = res['data']['questionUrl'] ###直播流时,问题图片的推送地址
offlineUrl = res['data']['offlineUrl'] ###http离线视频时,问题图片的推送地址
except Exception as ee:
timestr=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print('###### %s: file:send_transfer: error %s ,url:%s #####'%(timestr,ee,platform_query_url))
outstr = '\n %s ###### get url platform error : update error:%s , url:%s'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,ee,platform_query_url)
fp_log.write(outstr);fp_log.flush()
questionUrl="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion"
offlineUrl ="http://47.96.182.154:9040/api/taskFile/submitUAVKHQuestion"
return questionUrl,offlineUrl
def parse_filename(filename_base):
#etc:2022-01-13-16-04-17_frame-823-1440_type-水生植被_hgYFEulc0dPIrG1S_s-off-XJRW20220113154959_AI.jpg
uid =filename_base.split('.')[0].split('_')[3].strip()
sourceType=filename_base.split('_')[4].split('-')[1]
sourceId=filename_base.split('_')[4].split('-')[2]
typename=filename_base.split('.')[0].split('_')[2].split('-')[1].strip()
return uid,sourceType,sourceId,typename
def b64encode_function(filename, filename_OR):
if os.path.exists(filename):
image_ori=cv2.imread(filename)
image_ori_OR=cv2.imread(filename_OR)
else:
image_ori = filename.copy()
image_ori_OR = image_ori_OR.copy()
image_pngcode = cv2.imencode('.jpg',image_ori)[-1]
image_pngcode_OR = cv2.imencode('.jpg',image_ori_OR)[-1]
image_code = str(base64.b64encode(image_pngcode))[2:-1]
image_code_OR = str(base64.b64encode(image_pngcode_OR))[2:-1]
return image_code, image_code_OR
def JsonSend(parIn):
fp_log = parIn['fp_log']
try:
response=requests.post(parIn['api'],json=parIn['input_'],timeout=10).json()
t3 = time.time()
print('\n file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s, size:%.2f M \n'%(parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],api,response['code'],parIn['sizeImage']))
outstr = '%s file:%s encodetime:%.5f request time:%.5f,send to %s ,return code:%s,size:%.2f M ,%s\n'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),parIn['filename_base'],parIn['t2']-parIn['t1'],t3-parIn['t2'],parIn['api'],response['code'],parIn['sizeImage'],parIn['dic_str'])
fp_log.write(outstr);fp_log.flush()
except Exception as ee:
print('\n ######file:%s: upload error:%s,size:%.2f M'%(parIn['filename_base'],ee, parIn['sizeImage']))
outstr = '\n%s ###### file:%s: upload error:%s , size:%.2f M'%( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ,parIn['filename_base'],ee,parIn['sizeImage'])
fp_log.write(outstr);fp_log.flush()
def dic2str(dic):
st=''
for key in dic.keys():
st='%s %s:%s,'%(st,key,dic[key])
return st
def createJsonInput(filename,offlineUrl,questionUrl):
flag = True
filename_base = os.path.basename(filename)
filename_OR=filename.replace('_AI.','_OR.')
if not os.path.exists(filename_OR ):
return False
uid,sourceType, sourceId,typename = parse_filename(filename_base)
if (typename not in name_dic.keys()) or (typename == '排口'):
return False
api = questionUrl if sourceType=='live' else offlineUrl
time_str = get_time(filename_base)
input_ ={
'imgName':os.path.basename(filename),
'imgNameOriginal':os.path.basename(filename_OR),
'time':time_str,
'fid':uid, ###随机16位字符
'type':name_dic[typename],###这次先采用 ["排口","污口","水生植被","漂浮物","其它"]
'typeId':nameID_dic[typename]
}
if sourceType!='live':
input_['code']=sourceId;###只有离线视频才需要code,
dic_str = dic2str(input_)
t1 = time.time()
image_code, image_code_OR = b64encode_function(filename, filename_OR)
input_['imgData']=image_code
input_['imgDataOriginal']=image_code_OR
sizeImage = (len(image_code) + len(image_code_OR) )/1000000.0
parOut={};parOut['flag']=True;parOut['input_']=input_;
parOut['sizeImage']=sizeImage;parOut['dic_str']=dic_str;
parOut['filename']=filename;parOut['filename_OR']=filename_OR;
parOut['api']=api ; parOut['t1']=t1 ; parOut['filename_base']= filename_base
return parOut
def getLogFileFp(streamName):
logname ='SendLog/'+ time.strftime("%Y-%m-%d", time.localtime())+'_%s.txt'%(streamName)
if os.path.exists(logname):
fp_log = open(logname,'a+')
else:
fp_log = open(logname,'w')
return

def lodaMsgInfos(jsonDir,msgId):
jsonUrl = os.path.join(jsonDir,msgId+'.json')
with open(jsonUrl,'r') as fp:
data=json.load(fp)
return data

def parse_filename_for_oss(name):
splts=name.split('_')
typename=splts[2].split('-')[1].strip()
msgId=splts[4].split('-')[3]
onLineType=splts[4].split('-')[1]
return typename,msgId,onLineType

msg_dict_off={
"msg_id":"bblvgyntTsZCamqjuLArkiSYIbKXEeWx",#消息ID标识
"biz_id":"hehuzhang",#业务标识
"mod_id":"ai",#模型标识
"status":"running",#任务状态
"type":str(1),#数据类型:1图片 2视频
"error":9999,#错误信息
"results":[#问题结果
{
"original_url":"",#原图地址
"sign_url":"",#AI标记地址
"category_id":"",#分类标识
"description":"",#问题描述
"time":"",#时间戳
}
]
}

msg_dict_on={
"msg_id":"bblvgyntTsZCamqjuLArkiSYIbKXEeWx",#消息ID标识
"biz_id":"hehuzhang",#业务标识
"mod_id":"qi",#模型标识
"status":"running",#任务状态
"type":str(2),#数据类型:1图片 2视频
"error":9999,#错误信息
"results":[#问题结果
{
"original_url":"",#原视频地址(离线识别时为空不传,实时识别时需要上传)
"sign_url":"",#识别后视频地址
}
]
}

def update_json(jsonOri,jsonNew,offkeys=["msg_id","biz_id" ,"mod_id" ]):
#{'biz_id': 'hehuzhang', 'mod_id': 'ai', 'msg_id': 'bblvgyntTsZCamqjuLArkiSYIbKXEeWx', 'offering_id': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'offering_type': 'mp4', 'results_base_dir': 'XJRW20220317153547', 'inSource': 'http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4', 'outSource': 'NO'}
for key in offkeys:
jsonNew[key] = jsonOri[key]
return jsonNew
def test5(indir,outdir,jsonDir,videoBakDir,ossPar,kafkaPar):


time0_0 = time.time()
logname ='SendLog/'+ time.strftime("%Y-%m-%d.txt", time.localtime())
if os.path.exists(logname):
fp_log = open(logname,'a+')
else:
fp_log = open(logname,'w')
ifind=0
time0_0 = time.time()

producer = KafkaProducer(
bootstrap_servers=kafkaPar['boostServer'],#tencent yun
value_serializer=lambda v: v.encode('utf-8'))
###登陆准备存储桶
auth = oss2.Auth(ossPar['AId'], ossPar['ASt'])
# Endpoint以杭州为例,其它Region请按实际情况填写。
bucket = oss2.Bucket(auth, ossPar['Epoint'], ossPar['bucketName'])

while True:
#filelist = os.listdir(indir)
filelist_AI = sorted(glob.glob('%s/*_AI.*'%(indir)),key=os.path.getmtime)
filelist = filelist_AI
if len(filelist)!=0:
time0 = time.time()
for filename in filelist[0:2]:
filename_base = os.path.basename(filename)
##解析文件名
typename,msgId,onLineType = parse_filename_for_oss(filename_base)
##存储文件
filename_OR=filename.replace('_AI.','_OR.')
if typename!='结束':
ObjectName_AI=os.path.join(ossPar['bucketName'],os.path.basename(filename))
ObjectName_OR=os.path.join(ossPar['bucketName'],os.path.basename(filename_OR))
bucket.put_object_from_file(ObjectName_AI, filename)
bucket.put_object_from_file(ObjectName_OR, filename_OR)
taskInfos = lodaMsgInfos(jsonDir,msgId)
#print(taskInfos)
##发送返回信息
#if onLineType=='off':
msg = msg_dict_off
msg['results'][0]['original_url']= ObjectName_OR
msg['results'][0]['sign_url']= ObjectName_AI
msg['results'][0]['category_id']= nameID_dic[typename]
msg['results'][0]['description']= typename
msg['results'][0]['time']= time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
msg = update_json(taskInfos,msg)
else:
msg = msg_dict_on
videoList = sorted(glob.glob('%s/*'%(videoBakDir)),key=os.path.getmtime)
videoName = os.path.basename(videoList[0])
msg["status"]="success";msg["msg_id"]=msgId
ObjectName_AI=os.path.join(ossPar['bucketName'],videoName)
bucket.put_object_from_file(ObjectName_AI, videoList[0])
msg['results'][0]['original_url']= ObjectName_AI
msg['results'][0]['sign_url']= ObjectName_AI###最新的视频文件
print('###'*3,'Send:',filename)
msg = json.dumps(msg, ensure_ascii=False)
future = producer.send(
kafkaPar['topic'],
msg
)
print('***'*20,' Send transfer ',onLineType,msg)
##上传后的图片,移走到另外一个文件夹###
cmd = 'mv \'%s\' \'%s\' '%(filename,outdir); os.system(cmd)
cmd = 'mv \'%s\' \'%s\' '%(filename_OR,outdir); os.system(cmd)

else:
time.sleep(1)
fp_log.close()
if __name__=='__main__':
indir='problems/images_tmp'
outdir='problems/images_save'
jsonDir = 'mintors/kafka/'
videoBakDir='../../data/video_live_bak/1945'
ossPar={'Epoint':'http://oss-cn-shanghai.aliyuncs.com',
'AId':'LTAI5tSJ62TLMUb4SZuf285A',
'ASt':'MWYynm30filZ7x0HqSHlU3pdLVNeI7',
'bucketName':'ta-tech-image',
}
#kafkaPar={'boostServer':['212.129.223.66:9092'],'topic':'testReturn'}
kafkaPar={'boostServer':['101.132.127.1:19092'],'topic':'alg-task-results'}
test5(indir,outdir,jsonDir,videoBakDir,ossPar,kafkaPar)

+ 94
- 0
producer.py View File

@@ -0,0 +1,94 @@
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
import traceback
import json
import time
import random,string
def producer_demo():


cnt_online=1;cnt_offline=0
Tecent=False;
#topic_on='thsw';topic_off='thsw2';
#server=['212.129.223.66:19092'];
server=["192.168.11.242:9092"]
#server=['101.132.127.1:19092']

topic_on='dsp-alg-online-tasks';topic_off='dsp-alg-offline-tasks'
# 假设生产的消息为键值对(不是一定要键值对),且序列化方式为json
producer = KafkaProducer(
bootstrap_servers=server,#tencent yun
value_serializer=lambda v: v.encode('utf-8'))
# 发送三条消息
if not Tecent:
#pull_channel = "rtmp://live.play.t-aaron.com/live/THSA"
#push_channel = 'rtmp://live.push.t-aaron.com/live/THSB'
#pull_channel = 'rtmp://live.play.t-aaron.com/live/THSAa_hd'
pull_channel = 'http://live.play.t-aaron.com/live/THSAc_hd.m3u8'
push_channel = "rtmp://live.push.t-aaron.com/live/THSBc"
else:
pull_channel = "rtmp://demoplay.yunhengzhizao.cn/live/THSA_HD5M"
push_channel = "rtmp://127.0.0.1:1935/live/test"
#pull_channel = 'rtmp://live.play.t-aaron.com/live/THSAa'
#push_channel = 'rtmp://127.0.0.1:1975/live/test'
for i in range(0, cnt_online):
time.sleep(0.0001)
#''.join(random.sample(string.ascii_letters ,16) )
msg_dict = {
"request_id":'nn'+''.join(random.sample(string.ascii_letters ,30) ) ,
"models":[
{
"id":"001",
"config":{"0":"0", "1":"1","2":"1","3":"1","4":"1"
}
}
],
"pull_url":pull_channel,
"push_url":push_channel,
"results_base_dir": "XJRW202111291703"+str(random.randint(10,99)),
}
msg = json.dumps(msg_dict)
#msg = msg_dict
future = producer.send(
topic_on,
#key='count_num', # 同一个key值,会被送至同一个分区
msg
)


print('online send {}'.format(str(i)))
try:
future.get(timeout=10) # 监控是否发送成功
except kafka_errors: # 发送失败抛出kafka_errors
traceback.format_exc()

for i in range(0, cnt_offline):
msg_dict = {
"request_id":'bb'+''.join(random.sample(string.ascii_letters ,30) ) ,
"models":[
{
"id":"001",
"config":{"0":"1", "1":"1","2":"1","3":"1","4":"1"}
}
],
"original_url":"http://vod.play.t-aaron.com/customerTrans/c49a2c620795d124f2ae4b10197b8d0e/303b7a58-17f3ef4494e-0004-f90c-f2c-7ec68.mp4",
"original_type":"mp4",
"results_base_dir": "XJRW202203171535"+str(random.randint(10,99)),
}
msg = json.dumps(msg_dict)
#msg = msg_dict
future = producer.send(topic_off,msg)
print('offline send {}'.format(str(i)))
try:
future.get(timeout=10)
except kafka_errors:
traceback.format_exc()

if __name__=='__main__':
producer_demo()

+ 307
- 0
queRiver.py View File

@@ -0,0 +1,307 @@
from kafka import KafkaProducer, KafkaConsumer
from kafka.errors import kafka_errors
import traceback
import json, base64,os
import numpy as np
from multiprocessing import Process,Queue
import time,cv2,string,random
import subprocess as sp

import matplotlib.pyplot as plt
from utils.datasets import LoadStreams, LoadImages
from models.experimental import attempt_load
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
import torch,sys
#from segutils.segmodel import SegModel,get_largest_contours
#sys.path.extend(['../yolov5/segutils'])

from segutils.segWaterBuilding import SegModel,get_largest_contours,illBuildings

#from segutils.core.models.bisenet import BiSeNet
from segutils.core.models.bisenet import BiSeNet_MultiOutput

from utils.plots import plot_one_box,plot_one_box_PIL,draw_painting_joint,get_label_arrays,get_websource
from collections import Counter
#import matplotlib
import matplotlib.pyplot as plt
# get_labelnames,get_label_arrays,post_process_,save_problem_images,time_str
FP_DEBUG=open('debut.txt','w')
def bsJpgCode(image_ori):
jpgCode = cv2.imencode('.jpg',image_ori)[-1]###np.array,(4502009,1)
bsCode = str(base64.b64encode(jpgCode))[2:-1] ###str,长6002680
return bsCode
def bsJpgDecode(bsCode):
bsDecode = base64.b64decode(bsCode)###types,长4502009
npString = np.frombuffer(bsDecode,np.uint8)###np.array,(长4502009,)
jpgDecode = cv2.imdecode(npString,cv2.IMREAD_COLOR)###np.array,(3000,4000,3)
return jpgDecode
def get_ms(time0,time1):
str_time ='%.2f ms'%((time1-time0)*1000)
return str_time
rainbows=[
(0,0,255),(0,255,0),(255,0,0),(255,0,255),(255,255,0),(255,127,0),(255,0,127),
(127,255,0),(0,255,127),(0,127,255),(127,0,255),(255,127,255),(255,255,127),
(127,255,255),(0,255,255),(255,127,255),(127,255,255),
(0,127,0),(0,0,127),(0,255,255)
]


def get_labelnames(labelnames):
with open(labelnames,'r') as fp:
namesjson=json.load(fp)
names_fromfile=namesjson['labelnames']
names = names_fromfile
return names

def check_stream(stream):
cap = cv2.VideoCapture(stream)
if cap.isOpened():
return True
else:
return False
#####
def drawWater(pred,image_array0):####pred是模型的输出,只有水分割的任务
##画出水体区域
contours, hierarchy = cv2.findContours(pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
water = pred.copy(); water[:,:] = 0
if len(contours)==0:
return image_array0,water
max_id = get_largest_contours(contours);
cv2.fillPoly(water, [contours[max_id][:,0,:]], 1)
cv2.drawContours(image_array0,contours,max_id,(0,255,255),3)
return image_array0,water


def post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe,object_config=[0,1,2,3,4]):
##输入dataset genereate 生成的数据,model预测的结果pred,nms参数
##主要操作NMS ---> 坐标转换 ---> 画图
##输出原图、AI处理后的图、检测结果
time0=time.time()
path, img, im0s, vid_cap ,pred,seg_pred= datas[0:6];
segmodel=True
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=None, agnostic=False)
time1=time.time()
i=0;det=pred[0]###一次检测一张图片
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
det_xywh=[];
#im0_brg=cv2.cvtColor(im0,cv2.COLOR_RGB2BGR);
if len(seg_pred)==2:
im0,water = illBuildings(seg_pred,im0)
else:
im0,water = drawWater(seg_pred,im0)
time2=time.time()
#plt.imshow(im0);plt.show()
if len(det)>0:
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4],im0.shape).round()
#用seg模型,确定有效检测匡及河道轮廓线
if segmodel:
'''contours, hierarchy = cv2.findContours(seg_pred,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>0:
max_id = get_largest_contours(contours)
seg_pred[:,:] = 0
cv2.fillPoly(seg_pred, [contours[max_id][:,0,:]], 1)
cv2.drawContours(im0,contours,max_id,(0,255,255),3)'''
det_c = det.clone(); det_c=det_c.cpu().numpy()
area_factors = np.array([np.sum(water[int(x[1]):int(x[3]), int(x[0]):int(x[2])] )/((x[2]-x[0])*(x[3]-x[1])) for x in det_c] )
det = det[area_factors>0.1]
#对检测匡绘图
for *xyxy, conf, cls in reversed(det):
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
cls_c = cls.cpu().numpy()
if int(cls_c) not in object_config: ###如果不是所需要的目标,则不显示
continue
conf_c = conf.cpu().numpy()
line = [float(cls_c), *xywh, float(conf_c)] # label format
det_xywh.append(line)
label = f'{names[int(cls)]} {conf:.2f}'
im0 = draw_painting_joint(xyxy,im0,label_arraylist[int(cls)],score=conf,color=rainbows[int(cls)%20],line_thickness=None)
time3=time.time()
strout='nms:%s illBuilding:%s detDraw:%s '%(get_ms(time0,time1),get_ms(time1,time2), get_ms(time2,time3) )
return [im0s[0],im0,det_xywh,iframe],strout

def preprocess(par):
print('#####process:',par['name'])
##负责读取视频,生成原图及供检测的使用图,numpy格式
#source='rtmp://liveplay.yunhengzhizao.cn/live/demo_HD5M'
#img_size=640; stride=32
while True:
cap = cv2.VideoCapture(par['source'])
iframe = 0
if cap.isOpened():
print( '#### read %s success!'%(par['source']))
try:
dataset = LoadStreams(par['source'], img_size=640, stride=32)
for path, img, im0s, vid_cap in dataset:
datas=[path, img, im0s, vid_cap,iframe]
par['queOut'].put(datas)
iframe +=1
except Exception as e:
print('###read error:%s '%(par['source']))
time.sleep(10)
iframe = 0
else:
print('###read error:%s '%(par['source'] ))
time.sleep(10)
iframe = 0

def gpu_process(par):
print('#####process:',par['name'])
half=True
##gpu运算,检测模型
weights = par['weights']
device = par['device']
print('###line127:',par['device'])
model = attempt_load(par['weights'], map_location=par['device']) # load FP32 model
if half:
model.half()

##gpu运算,分割模型
seg_nclass = par['seg_nclass']
seg_weights = par['seg_weights']

#segmodel = SegModel(nclass=seg_nclass,weights=seg_weights,device=device)

nclass = [2,2]
Segmodel = BiSeNet_MultiOutput(nclass)
weights='weights/segmentation/WaterBuilding.pth'
segmodel = SegModel(model=Segmodel,nclass=nclass,weights=weights,device='cuda:0',multiOutput=True)
while True:
if not par['queIn'].empty():
time0=time.time()
datas = par['queIn'].get()
path, img, im0s, vid_cap,iframe = datas[0:5]
time1=time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
time2 = time.time()
pred = model(img,augment=False)[0]
time3 = time.time()
seg_pred = segmodel.eval(im0s[0],outsize=None,smooth_kernel=20)
time4 = time.time()
fpStr= 'process:%s ,iframe:%d,getdata:%s,copygpu:%s,dettime:%s,segtime:%s , time:%s, queLen:%d '%( par['name'],iframe,get_ms(time0,time1) ,get_ms(time1,time2) ,get_ms(time2,time3) ,get_ms(time3,time4),get_ms(time0,time4) ,par['queIn'].qsize() )
FP_DEBUG.write( fpStr+'\n' )
datasOut = [path, img, im0s, vid_cap,pred,seg_pred,iframe]
par['queOut'].put(datasOut)
if par['debug']:
print('#####process:',par['name'],' line107')
else:
time.sleep(1/300)
def get_cls(array):
dcs = Counter(array)
keys = list(dcs.keys())
values = list(dcs.values())
max_index = values.index(max(values))
cls = int(keys[max_index])
return cls
def save_problem_images(post_results,iimage_cnt,names,streamName='live-THSAHD5M',outImaDir='problems/images_tmp',imageTxtFile=False):
## [cls, x,y,w,h, conf]
problem_image=[[] for i in range(6)]

dets_list = [x[2] for x in post_results]

mean_scores=[ np.array(x)[:,5].mean() for x in dets_list ] ###mean conf

best_index = mean_scores.index(max(mean_scores)) ##获取该批图片里,问题图片的index
best_frame = post_results[ best_index][3] ##获取绝对帧号
img_send = post_results[best_index][1]##AI处理后的图
img_bak = post_results[best_index][0]##原图
cls_max = get_cls( x[0] for x in dets_list[best_index] )


time_str = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
uid=''.join(random.sample(string.ascii_letters + string.digits, 16))
#ori_name = '2022-01-20-15-57-36_frame-368-720_type-漂浮物_qVh4zI08ZlwJN9on_s-live-THSAHD5M_OR.jpg'
#2022-01-13-15-07-57_frame-9999-9999_type-结束_9999999999999999_s-off-XJRW20220110115904_AI.jpg
outnameOR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_AI.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName)
outnameAR= '%s/%s_frame-%d-%d_type-%s_%s_s-%s_OR.jpg'%(outImaDir,time_str,best_frame,iimage_cnt,names[cls_max],uid,streamName)
cv2.imwrite(outnameOR,img_send)
cv2.imwrite(outnameAR,img_bak)
if imageTxtFile:
outnameOR_txt = outnameOR.replace('.jpg','.txt')
fp=open(outnameOR_txt,'w');fp.write(outnameOR+'\n');fp.close()
outnameAI_txt = outnameAR.replace('.jpg','.txt')
fp=open(outnameAI_txt,'w');fp.write(outnameAR+'\n');fp.close()
parOut = {}; parOut['imgOR'] = img_send; parOut['imgAR'] = img_send; parOut['uid']=uid
parOut['imgORname']=os.path.basename(outnameOR);parOut['imgARname']=os.path.basename(outnameAR);
parOut['time_str'] = time_str;parOut['type'] = names[cls_max]
return parOut

def post_process(par):
print('#####process:',par['name'])
###post-process参数
conf_thres,iou_thres,classes=par['conf_thres'],par['iou_thres'],par['classes']
labelnames=par['labelnames']
rainbows=par['rainbows']
fpsample = par['fpsample']
names=get_labelnames(labelnames)
label_arraylist = get_label_arrays(names,rainbows,outfontsize=40)
iimage_cnt = 0
post_results=[]
while True:
if not par['queIn'].empty():
time0=time.time()
datas = par['queIn'].get()
iframe = datas[6]
if par['debug']:
print('#####process:',par['name'],' line129')
p_result,timeOut = post_process_(datas,conf_thres, iou_thres,names,label_arraylist,rainbows,iframe)
par['queOut'].put(p_result)
##输出结果

##每隔 fpsample帧处理一次,如果有问题就保存图片
if (iframe % fpsample == 0) and (len(post_results)>0) :
#print('####line204:',iframe,post_results)
save_problem_images(post_results,iframe,names)
post_results=[]

if len(p_result[2] )>0: ##
#post_list = p_result.append(iframe)
post_results.append(p_result)
#print('####line201:',type(p_result))
time1=time.time()
outstr='process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() )
FP_DEBUG.write(outstr +'\n')
#print( 'process:%s ,iframe:%d,%s , time:%s, queLen:%d '%( par['name'],iframe,timeOut,get_ms(time0,time1) ,par['queIn'].qsize() ) )
else:
time.sleep(1/300)

def save_logfile(name,txt):
if os.path.exists(name):
fp=open(name,'r+')
else:
fp=open(name,'w')
fp.write('%s %s \n'%(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),txt))
fp.close()
def time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

if __name__=='__main__':
jsonfile='config/queRiver.json'
#image_encode_decode()
work_stream(jsonfile)
#par={'name':'preprocess'}
#preprocess(par)

+ 1
- 0
readme.md View File

@@ -0,0 +1 @@
thsw

+ 501
- 0
segutils/GPUtils.py View File

@@ -0,0 +1,501 @@
#@@ -1,43 +1,43 @@
# GPUtil - GPU utilization
#
# A Python module for programmically getting the GPU utilization from NVIDA GPUs using nvidia-smi
#
# Author: Anders Krogh Mortensen (anderskm)
# Date: 16 January 2017
# Web: https://github.com/anderskm/gputil
#
# LICENSE
#
# MIT License
#
# Copyright (c) 2017 anderskm
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

from subprocess import Popen, PIPE
from distutils import spawn
import os
import math
import random
import time
import sys
import platform
import subprocess
import numpy as np


__version__ = '1.4.0'
class GPU:
def __init__(self, ID, uuid, load, memoryTotal, memoryUsed, memoryFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu):
self.id = ID
self.uuid = uuid
self.load = load
self.memoryUtil = float(memoryUsed)/float(memoryTotal)
self.memoryTotal = memoryTotal
self.memoryUsed = memoryUsed
self.memoryFree = memoryFree
self.driver = driver
self.name = gpu_name
self.serial = serial
self.display_mode = display_mode
self.display_active = display_active
self.temperature = temp_gpu

def __str__(self):
return str(self.__dict__)


class GPUProcess:
def __init__(self, pid, processName, gpuId, gpuUuid, gpuName, usedMemory,
uid, uname):
self.pid = pid
self.processName = processName
self.gpuId = gpuId
self.gpuUuid = gpuUuid
self.gpuName = gpuName
self.usedMemory = usedMemory
self.uid = uid
self.uname = uname

def __str__(self):
return str(self.__dict__)

def safeFloatCast(strNumber):
try:
number = float(strNumber)
except ValueError:
number = float('nan')
return number

#def getGPUs():
def getNvidiaSmiCmd():
if platform.system() == "Windows":
# If the platform is Windows and nvidia-smi
# could not be found from the environment path,
#@@ -75,57 +94,97 @@ def getGPUs():
nvidia_smi = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ['systemdrive']
else:
nvidia_smi = "nvidia-smi"
return nvidia_smi


def getGPUs():
# Get ID, processing and memory utilization for all GPUs
nvidia_smi = getNvidiaSmiCmd()
try:
p = Popen([nvidia_smi,"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu", "--format=csv,noheader,nounits"], stdout=PIPE)
stdout, stderror = p.communicate()
p = subprocess.run([
nvidia_smi,
"--query-gpu=index,uuid,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode,temperature.gpu",
"--format=csv,noheader,nounits"
], stdout=subprocess.PIPE, encoding='utf8')
stdout, stderror = p.stdout, p.stderr
except:
return []
output = stdout;#output = stdout.decode('UTF-8')
# output = output[2:-1] # Remove b' and ' from string added by python
#print(output)
output = stdout
## Parse output
# Split on line break
lines = output.split(os.linesep)
#print(lines)
numDevices = len(lines)-1
GPUs = []
for g in range(numDevices):
line = lines[g]
#print(line)
vals = line.split(', ')
#print(vals)
for i in range(12):
# print(vals[i])
if (i == 0):
deviceIds = int(vals[i])
elif (i == 1):
uuid = vals[i]
elif (i == 2):
gpuUtil = safeFloatCast(vals[i])/100
elif (i == 3):
memTotal = safeFloatCast(vals[i])
elif (i == 4):
memUsed = safeFloatCast(vals[i])
elif (i == 5):
memFree = safeFloatCast(vals[i])
elif (i == 6):
driver = vals[i]
elif (i == 7):
gpu_name = vals[i]
elif (i == 8):
serial = vals[i]
elif (i == 9):
display_active = vals[i]
elif (i == 10):
display_mode = vals[i]
elif (i == 11):
temp_gpu = safeFloatCast(vals[i]);
deviceIds = int(vals[0])
uuid = vals[1]
gpuUtil = safeFloatCast(vals[2]) / 100
memTotal = safeFloatCast(vals[3])
memUsed = safeFloatCast(vals[4])
memFree = safeFloatCast(vals[5])
driver = vals[6]
gpu_name = vals[7]
serial = vals[8]
display_active = vals[9]
display_mode = vals[10]
temp_gpu = safeFloatCast(vals[11]);
GPUs.append(GPU(deviceIds, uuid, gpuUtil, memTotal, memUsed, memFree, driver, gpu_name, serial, display_mode, display_active, temp_gpu))
return GPUs # (deviceIds, gpuUtil, memUtil)


def getGPUProcesses():
"""Get all gpu compute processes."""
global gpuUuidToIdMap
gpuUuidToIdMap = {}
try:
gpus = getGPUs()
for gpu in gpus:
gpuUuidToIdMap[gpu.uuid] = gpu.id
del gpus
except:
pass
nvidia_smi = getNvidiaSmiCmd()
try:
p = subprocess.run([
nvidia_smi,
"--query-compute-apps=pid,process_name,gpu_uuid,gpu_name,used_memory",
"--format=csv,noheader,nounits"
], stdout=subprocess.PIPE, encoding='utf8')
stdout, stderror = p.stdout, p.stderr
except:
return []
output = stdout
## Parse output
# Split on line break
lines = output.split(os.linesep)
numProcesses = len(lines) - 1
processes = []
for g in range(numProcesses):
line = lines[g]
#print(line)
vals = line.split(', ')
#print(vals)
pid = int(vals[0])
processName = vals[1]
gpuUuid = vals[2]
gpuName = vals[3]
usedMemory = safeFloatCast(vals[4])
gpuId = gpuUuidToIdMap[gpuUuid]
if gpuId is None:
gpuId = -1

# get uid and uname owner of the pid
try:
p = subprocess.run(['ps', f'-p{pid}', '-oruid=,ruser='],
stdout=subprocess.PIPE, encoding='utf8')
uid, uname = p.stdout.split()
uid = int(uid)
except:
uid, uname = -1, ''

processes.append(GPUProcess(pid, processName, gpuId, gpuUuid,
gpuName, usedMemory, uid, uname))
return processes


def getAvailable(order = 'first', limit=1, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]):
# order = first | last | random | load | memory
# first --> select the GPU with the lowest ID (DEFAULT)
# last --> select the GPU with the highest ID
# random --> select a random available GPU
# load --> select the GPU with the lowest load
# memory --> select the GPU with the most memory available
# limit = 1 (DEFAULT), 2, ..., Inf
# Limit sets the upper limit for the number of GPUs to return. E.g. if limit = 2, but only one is available, only one is returned.
# Get device IDs, load and memory usage
GPUs = getGPUs()
# Determine, which GPUs are available
GPUavailability = getAvailability(GPUs, maxLoad=maxLoad, maxMemory=maxMemory, memoryFree=memoryFree, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID)
availAbleGPUindex = [idx for idx in range(0,len(GPUavailability)) if (GPUavailability[idx] == 1)]
# Discard unavailable GPUs
GPUs = [GPUs[g] for g in availAbleGPUindex]
# Sort available GPUs according to the order argument
if (order == 'first'):
GPUs.sort(key=lambda x: float('inf') if math.isnan(x.id) else x.id, reverse=False)
elif (order == 'last'):
GPUs.sort(key=lambda x: float('-inf') if math.isnan(x.id) else x.id, reverse=True)
elif (order == 'random'):
GPUs = [GPUs[g] for g in random.sample(range(0,len(GPUs)),len(GPUs))]
elif (order == 'load'):
GPUs.sort(key=lambda x: float('inf') if math.isnan(x.load) else x.load, reverse=False)
elif (order == 'memory'):
GPUs.sort(key=lambda x: float('inf') if math.isnan(x.memoryUtil) else x.memoryUtil, reverse=False)
# Extract the number of desired GPUs, but limited to the total number of available GPUs
GPUs = GPUs[0:min(limit, len(GPUs))]
# Extract the device IDs from the GPUs and return them
deviceIds = [gpu.id for gpu in GPUs]
return deviceIds
#def getAvailability(GPUs, maxLoad = 0.5, maxMemory = 0.5, includeNan = False):
# # Determine, which GPUs are available
# GPUavailability = np.zeros(len(GPUs))
# for i in range(len(GPUs)):
# if (GPUs[i].load < maxLoad or (includeNan and np.isnan(GPUs[i].load))) and (GPUs[i].memoryUtil < maxMemory or (includeNan and np.isnan(GPUs[i].memoryUtil))):
# GPUavailability[i] = 1
def getAvailability(GPUs, maxLoad=0.5, maxMemory=0.5, memoryFree=0, includeNan=False, excludeID=[], excludeUUID=[]):
# Determine, which GPUs are available
GPUavailability = [1 if (gpu.memoryFree>=memoryFree) and (gpu.load < maxLoad or (includeNan and math.isnan(gpu.load))) and (gpu.memoryUtil < maxMemory or (includeNan and math.isnan(gpu.memoryUtil))) and ((gpu.id not in excludeID) and (gpu.uuid not in excludeUUID)) else 0 for gpu in GPUs]
return GPUavailability
def getFirstAvailable(order = 'first', maxLoad=0.5, maxMemory=0.5, attempts=1, interval=900, verbose=False, includeNan=False, excludeID=[], excludeUUID=[]):
#GPUs = getGPUs()
#firstAvailableGPU = np.NaN
#for i in range(len(GPUs)):
# if (GPUs[i].load < maxLoad) & (GPUs[i].memory < maxMemory):
# firstAvailableGPU = GPUs[i].id
# break
#return firstAvailableGPU
for i in range(attempts):
if (verbose):
print('Attempting (' + str(i+1) + '/' + str(attempts) + ') to locate available GPU.')
# Get first available GPU
available = getAvailable(order=order, limit=1, maxLoad=maxLoad, maxMemory=maxMemory, includeNan=includeNan, excludeID=excludeID, excludeUUID=excludeUUID)
# If an available GPU was found, break for loop.
if (available):
if (verbose):
print('GPU ' + str(available) + ' located!')
break
# If this is not the last attempt, sleep for 'interval' seconds
if (i != attempts-1):
time.sleep(interval)
# Check if an GPU was found, or if the attempts simply ran out. Throw error, if no GPU was found
if (not(available)):
raise RuntimeError('Could not find an available GPU after ' + str(attempts) + ' attempts with ' + str(interval) + ' seconds interval.')
# Return found GPU
return available
def showUtilization(all=False, attrList=None, useOldCode=False):
GPUs = getGPUs()
if (all):
if (useOldCode):
print(' ID | Name | Serial | UUID || GPU util. | Memory util. || Memory total | Memory used | Memory free || Display mode | Display active |')
print('------------------------------------------------------------------------------------------------------------------------------')
for gpu in GPUs:
print(' {0:2d} | {1:s} | {2:s} | {3:s} || {4:3.0f}% | {5:3.0f}% || {6:.0f}MB | {7:.0f}MB | {8:.0f}MB || {9:s} | {10:s}'.format(gpu.id,gpu.name,gpu.serial,gpu.uuid,gpu.load*100,gpu.memoryUtil*100,gpu.memoryTotal,gpu.memoryUsed,gpu.memoryFree,gpu.display_mode,gpu.display_active))
else:
attrList = [[{'attr':'id','name':'ID'},
{'attr':'name','name':'Name'},
{'attr':'serial','name':'Serial'},
{'attr':'uuid','name':'UUID'}],
[{'attr':'temperature','name':'GPU temp.','suffix':'C','transform': lambda x: x,'precision':0},
{'attr':'load','name':'GPU util.','suffix':'%','transform': lambda x: x*100,'precision':0},
{'attr':'memoryUtil','name':'Memory util.','suffix':'%','transform': lambda x: x*100,'precision':0}],
[{'attr':'memoryTotal','name':'Memory total','suffix':'MB','precision':0},
{'attr':'memoryUsed','name':'Memory used','suffix':'MB','precision':0},
{'attr':'memoryFree','name':'Memory free','suffix':'MB','precision':0}],
[{'attr':'display_mode','name':'Display mode'},
{'attr':'display_active','name':'Display active'}]]
else:
if (useOldCode):
print(' ID GPU MEM')
print('--------------')
for gpu in GPUs:
print(' {0:2d} {1:3.0f}% {2:3.0f}%'.format(gpu.id, gpu.load*100, gpu.memoryUtil*100))
else:
attrList = [[{'attr':'id','name':'ID'},
{'attr':'load','name':'GPU','suffix':'%','transform': lambda x: x*100,'precision':0},
{'attr':'memoryUtil','name':'MEM','suffix':'%','transform': lambda x: x*100,'precision':0}],
]
if (not useOldCode):
if (attrList is not None):
headerString = ''
GPUstrings = ['']*len(GPUs)
for attrGroup in attrList:
#print(attrGroup)
for attrDict in attrGroup:
headerString = headerString + '| ' + attrDict['name'] + ' '
headerWidth = len(attrDict['name'])
minWidth = len(attrDict['name'])
attrPrecision = '.' + str(attrDict['precision']) if ('precision' in attrDict.keys()) else ''
attrSuffix = str(attrDict['suffix']) if ('suffix' in attrDict.keys()) else ''
attrTransform = attrDict['transform'] if ('transform' in attrDict.keys()) else lambda x : x
for gpu in GPUs:
attr = getattr(gpu,attrDict['attr'])
attr = attrTransform(attr)
if (isinstance(attr,float)):
attrStr = ('{0:' + attrPrecision + 'f}').format(attr)
elif (isinstance(attr,int)):
attrStr = ('{0:d}').format(attr)
elif (isinstance(attr,str)):
attrStr = attr;
elif (sys.version_info[0] == 2):
if (isinstance(attr,unicode)):
attrStr = attr.encode('ascii','ignore')
else:
raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'')
attrStr += attrSuffix
minWidth = max(minWidth,len(attrStr))
headerString += ' '*max(0,minWidth-headerWidth)
minWidthStr = str(minWidth - len(attrSuffix))
for gpuIdx,gpu in enumerate(GPUs):
attr = getattr(gpu,attrDict['attr'])
attr = attrTransform(attr)
if (isinstance(attr,float)):
attrStr = ('{0:'+ minWidthStr + attrPrecision + 'f}').format(attr)
elif (isinstance(attr,int)):
attrStr = ('{0:' + minWidthStr + 'd}').format(attr)
elif (isinstance(attr,str)):
attrStr = ('{0:' + minWidthStr + 's}').format(attr);
elif (sys.version_info[0] == 2):
if (isinstance(attr,unicode)):
attrStr = ('{0:' + minWidthStr + 's}').format(attr.encode('ascii','ignore'))
else:
raise TypeError('Unhandled object type (' + str(type(attr)) + ') for attribute \'' + attrDict['name'] + '\'')
attrStr += attrSuffix
GPUstrings[gpuIdx] += '| ' + attrStr + ' '
headerString = headerString + '|'
for gpuIdx,gpu in enumerate(GPUs):
GPUstrings[gpuIdx] += '|'
headerSpacingString = '-' * len(headerString)
print(headerString)
print(headerSpacingString)
for GPUstring in GPUstrings:
print(GPUstring)


# Generate gpu uuid to id map
gpuUuidToIdMap = {}
try:
gpus = getGPUs()
for gpu in gpus:
gpuUuidToIdMap[gpu.uuid] = gpu.id
del gpus
except:
pass
def getGPUInfos():
###返回gpus:list,一个GPU为一个元素-对象
###########:有属性,'id','load','memoryFree',
###########:'memoryTotal','memoryUsed','memoryUtil','name','serial''temperature','uuid',process
###其中process:每一个计算进程是一个元素--对象
############:有属性,'gpuId','gpuName','gpuUuid',
############:'gpuid','pid','processName','uid', 'uname','usedMemory'
gpus = getGPUs()
gpuUuidToIdMap={}
for gpu in gpus:
gpuUuidToIdMap[gpu.uuid] = gpu.id
gpu.process=[]
indexx = [x.id for x in gpus ]
process = getGPUProcesses()
for pre in process:
pre.gpuid = gpuUuidToIdMap[pre.gpuUuid]
gpuId = indexx.index(pre.gpuid )
gpus[gpuId].process.append(pre )
return gpus

def get_available_gpu(gpuStatus):
##判断是否有空闲的显卡,如果有返回id,没有返回None
cuda=None
for gpus in gpuStatus:
if len(gpus.process) == 0:
cuda = gpus.id
return cuda
return cuda
def get_whether_gpuProcess():
##判断是否有空闲的显卡,如果有返回id,没有返回None
gpuStatus=getGPUInfos()
gpuProcess=True
for gpus in gpuStatus:
if len(gpus.process) != 0:
gpuProcess = False
return gpuProcess
def get_offlineProcess_gpu(gpuStatus,pidInfos):
gpu_onLine = []
for gpu in gpuStatus:
for gpuProcess in gpu.process:
pid = gpuProcess.pid
if pid in pidInfos.keys():
pidType = pidInfos[pid]['type']
if pidType == 'onLine':
gpu_onLine.append(gpu)
gpu_offLine = set(gpuStatus) - set(gpu_onLine)
return list(gpu_offLine)
def arrange_offlineProcess(gpuStatus,pidInfos,modelMemory=1500):
cudaArrange=[]
gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos)
for gpu in gpu_offLine:
leftMemory = gpu.memoryTotal*0.9 - gpu.memoryUsed
modelCnt = int(leftMemory// modelMemory)

cudaArrange.extend( [gpu.id] * modelCnt )
return cudaArrange
def get_potential_gpu(gpuStatus,pidInfos):
###所有GPU上都有计算。需要为“在线任务”空出一块显卡。
###step1:查看所有显卡上是否有“在线任务”
gpu_offLine = get_offlineProcess_gpu(gpuStatus,pidInfos)
if len(gpu_offLine) == 0 :
return False
###step2,找出每张显卡上离线进程的数目
offLineCnt = [ len(gpu.process) for gpu in gpu_offLine ]
minCntIndex =offLineCnt.index( min(offLineCnt))
pids = [x.pid for x in gpu_offLine[minCntIndex].process]
return {'cuda':gpu_offLine[minCntIndex].id,'pids':pids }
if __name__=='__main__':
#pres = getGPUProcesses()
#print('###line404:',pres)
gpus = getGPUs()
for gpu in gpus:
gpuUuidToIdMap[gpu.uuid] = gpu.id
print(gpu)
print(gpuUuidToIdMap)
pres = getGPUProcesses()
print('###line404:',pres)
for pre in pres:
print('#'*20)
for ken in ['gpuName','gpuUuid','pid','processName','uid','uname','usedMemory' ]:
print(ken,' ',pre.__getattribute__(ken ))
print(' ')


BIN
segutils/__pycache__/GPUtils.cpython-38.pyc View File


BIN
segutils/__pycache__/segWaterBuilding.cpython-38.pyc View File


BIN
segutils/__pycache__/segmodel.cpython-38.pyc View File


+ 1
- 0
segutils/core/__init__.py View File

@@ -0,0 +1 @@
from . import nn, models, utils, data

BIN
segutils/core/__pycache__/__init__.cpython-36.pyc View File


BIN
segutils/core/__pycache__/__init__.cpython-38.pyc View File


+ 0
- 0
segutils/core/data/__init__.py View File


BIN
segutils/core/data/__pycache__/__init__.cpython-36.pyc View File


BIN
segutils/core/data/__pycache__/__init__.cpython-38.pyc View File


+ 23
- 0
segutils/core/data/dataloader/__init__.py View File

@@ -0,0 +1,23 @@
"""
This module provides data loaders and transformers for popular vision datasets.
"""
from .mscoco import COCOSegmentation
from .cityscapes import CitySegmentation
from .ade import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .sbu_shadow import SBUSegmentation

datasets = {
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'coco': COCOSegmentation,
'citys': CitySegmentation,
'sbu': SBUSegmentation,
}


def get_segmentation_dataset(name, **kwargs):
"""Segmentation Datasets"""
return datasets[name.lower()](**kwargs)

BIN
segutils/core/data/dataloader/__pycache__/__init__.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/ade.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/cityscapes.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/mscoco.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/pascal_aug.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/pascal_voc.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/sbu_shadow.cpython-36.pyc View File


BIN
segutils/core/data/dataloader/__pycache__/segbase.cpython-36.pyc View File


+ 172
- 0
segutils/core/data/dataloader/ade.py View File

@@ -0,0 +1,172 @@
"""Pascal ADE20K Semantic Segmentation Dataset."""
import os
import torch
import numpy as np

from PIL import Image
from .segbase import SegmentationDataset


class ADE20KSegmentation(SegmentationDataset):
"""ADE20K Semantic Segmentation Dataset.

Parameters
----------
root : string
Path to ADE20K folder. Default is './datasets/ade'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from torchvision import transforms
>>> import torch.utils.data as data
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)),
>>> ])
>>> # Create Dataset
>>> trainset = ADE20KSegmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = data.DataLoader(
>>> trainset, 4, shuffle=True,
>>> num_workers=4)
"""
BASE_DIR = 'ADEChallengeData2016'
NUM_CLASS = 150

def __init__(self, root='../datasets/ade', split='test', mode=None, transform=None, **kwargs):
super(ADE20KSegmentation, self).__init__(root, split, mode, transform, **kwargs)
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using ../datasets/ade20k.py"
self.images, self.masks = _get_ade20k_pairs(root, split)
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of:" + root + "\n")
print('Found {} images in the folder {}'.format(len(self.images), root))

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and to Tensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.images[index])

def _mask_transform(self, mask):
return torch.LongTensor(np.array(mask).astype('int32') - 1)

def __len__(self):
return len(self.images)

@property
def pred_offset(self):
return 1

@property
def classes(self):
"""Category names."""
return ("wall", "building, edifice", "sky", "floor, flooring", "tree",
"ceiling", "road, route", "bed", "windowpane, window", "grass",
"cabinet", "sidewalk, pavement",
"person, individual, someone, somebody, mortal, soul",
"earth, ground", "door, double door", "table", "mountain, mount",
"plant, flora, plant life", "curtain, drape, drapery, mantle, pall",
"chair", "car, auto, automobile, machine, motorcar",
"water", "painting, picture", "sofa, couch, lounge", "shelf",
"house", "sea", "mirror", "rug, carpet, carpeting", "field", "armchair",
"seat", "fence, fencing", "desk", "rock, stone", "wardrobe, closet, press",
"lamp", "bathtub, bathing tub, bath, tub", "railing, rail", "cushion",
"base, pedestal, stand", "box", "column, pillar", "signboard, sign",
"chest of drawers, chest, bureau, dresser", "counter", "sand", "sink",
"skyscraper", "fireplace, hearth, open fireplace", "refrigerator, icebox",
"grandstand, covered stand", "path", "stairs, steps", "runway",
"case, display case, showcase, vitrine",
"pool table, billiard table, snooker table", "pillow",
"screen door, screen", "stairway, staircase", "river", "bridge, span",
"bookcase", "blind, screen", "coffee table, cocktail table",
"toilet, can, commode, crapper, pot, potty, stool, throne",
"flower", "book", "hill", "bench", "countertop",
"stove, kitchen stove, range, kitchen range, cooking stove",
"palm, palm tree", "kitchen island",
"computer, computing machine, computing device, data processor, "
"electronic computer, information processing system",
"swivel chair", "boat", "bar", "arcade machine",
"hovel, hut, hutch, shack, shanty",
"bus, autobus, coach, charabanc, double-decker, jitney, motorbus, "
"motorcoach, omnibus, passenger vehicle",
"towel", "light, light source", "truck, motortruck", "tower",
"chandelier, pendant, pendent", "awning, sunshade, sunblind",
"streetlight, street lamp", "booth, cubicle, stall, kiosk",
"television receiver, television, television set, tv, tv set, idiot "
"box, boob tube, telly, goggle box",
"airplane, aeroplane, plane", "dirt track",
"apparel, wearing apparel, dress, clothes",
"pole", "land, ground, soil",
"bannister, banister, balustrade, balusters, handrail",
"escalator, moving staircase, moving stairway",
"ottoman, pouf, pouffe, puff, hassock",
"bottle", "buffet, counter, sideboard",
"poster, posting, placard, notice, bill, card",
"stage", "van", "ship", "fountain",
"conveyer belt, conveyor belt, conveyer, conveyor, transporter",
"canopy", "washer, automatic washer, washing machine",
"plaything, toy", "swimming pool, swimming bath, natatorium",
"stool", "barrel, cask", "basket, handbasket", "waterfall, falls",
"tent, collapsible shelter", "bag", "minibike, motorbike", "cradle",
"oven", "ball", "food, solid food", "step, stair", "tank, storage tank",
"trade name, brand name, brand, marque", "microwave, microwave oven",
"pot, flowerpot", "animal, animate being, beast, brute, creature, fauna",
"bicycle, bike, wheel, cycle", "lake",
"dishwasher, dish washer, dishwashing machine",
"screen, silver screen, projection screen",
"blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase",
"traffic light, traffic signal, stoplight", "tray",
"ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, "
"dustbin, trash barrel, trash bin",
"fan", "pier, wharf, wharfage, dock", "crt screen",
"plate", "monitor, monitoring device", "bulletin board, notice board",
"shower", "radiator", "glass, drinking glass", "clock", "flag")


def _get_ade20k_pairs(folder, mode='train'):
img_paths = []
mask_paths = []
if mode == 'train':
img_folder = os.path.join(folder, 'images/training')
mask_folder = os.path.join(folder, 'annotations/training')
else:
img_folder = os.path.join(folder, 'images/validation')
mask_folder = os.path.join(folder, 'annotations/validation')
for filename in os.listdir(img_folder):
basename, _ = os.path.splitext(filename)
if filename.endswith(".jpg"):
imgpath = os.path.join(img_folder, filename)
maskname = basename + '.png'
maskpath = os.path.join(mask_folder, maskname)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)

return img_paths, mask_paths


if __name__ == '__main__':
train_dataset = ADE20KSegmentation()

+ 137
- 0
segutils/core/data/dataloader/cityscapes.py View File

@@ -0,0 +1,137 @@
"""Prepare Cityscapes dataset"""
import os
import torch
import numpy as np

from PIL import Image
from .segbase import SegmentationDataset


class CitySegmentation(SegmentationDataset):
"""Cityscapes Semantic Segmentation Dataset.

Parameters
----------
root : string
Path to Cityscapes folder. Default is './datasets/citys'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from torchvision import transforms
>>> import torch.utils.data as data
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)),
>>> ])
>>> # Create Dataset
>>> trainset = CitySegmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = data.DataLoader(
>>> trainset, 4, shuffle=True,
>>> num_workers=4)
"""
BASE_DIR = 'cityscapes'
NUM_CLASS = 19

def __init__(self, root='../datasets/citys', split='train', mode=None, transform=None, **kwargs):
super(CitySegmentation, self).__init__(root, split, mode, transform, **kwargs)
# self.root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(self.root), "Please setup the dataset using ../datasets/cityscapes.py"
self.images, self.mask_paths = _get_city_pairs(self.root, self.split)
assert (len(self.images) == len(self.mask_paths))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of:" + root + "\n")
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
self._key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
self._mapping = np.array(range(-1, len(self._key) - 1)).astype('int32')

def _class_to_index(self, mask):
# assert the value
values = np.unique(mask)
for value in values:
assert (value in self._mapping)
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.mask_paths[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.images[index])

def _mask_transform(self, mask):
target = self._class_to_index(np.array(mask).astype('int32'))
return torch.LongTensor(np.array(target).astype('int32'))

def __len__(self):
return len(self.images)

@property
def pred_offset(self):
return 0


def _get_city_pairs(folder, split='train'):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith('.png'):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths

if split in ('train', 'val'):
img_folder = os.path.join(folder, 'leftImg8bit/' + split)
mask_folder = os.path.join(folder, 'gtFine/' + split)
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths
else:
assert split == 'trainval'
print('trainval set')
train_img_folder = os.path.join(folder, 'leftImg8bit/train')
train_mask_folder = os.path.join(folder, 'gtFine/train')
val_img_folder = os.path.join(folder, 'leftImg8bit/val')
val_mask_folder = os.path.join(folder, 'gtFine/val')
train_img_paths, train_mask_paths = get_path_pairs(train_img_folder, train_mask_folder)
val_img_paths, val_mask_paths = get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
return img_paths, mask_paths


if __name__ == '__main__':
dataset = CitySegmentation()

+ 90
- 0
segutils/core/data/dataloader/lip_parsing.py View File

@@ -0,0 +1,90 @@
"""Look into Person Dataset"""
import os
import torch
import numpy as np

from PIL import Image
from core.data.dataloader.segbase import SegmentationDataset


class LIPSegmentation(SegmentationDataset):
"""Look into person parsing dataset """

BASE_DIR = 'LIP'
NUM_CLASS = 20

def __init__(self, root='../datasets/LIP', split='train', mode=None, transform=None, **kwargs):
super(LIPSegmentation, self).__init__(root, split, mode, transform, **kwargs)
_trainval_image_dir = os.path.join(root, 'TrainVal_images')
_testing_image_dir = os.path.join(root, 'Testing_images')
_trainval_mask_dir = os.path.join(root, 'TrainVal_parsing_annotations')
if split == 'train':
_image_dir = os.path.join(_trainval_image_dir, 'train_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'train_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'train_id.txt')
elif split == 'val':
_image_dir = os.path.join(_trainval_image_dir, 'val_images')
_mask_dir = os.path.join(_trainval_mask_dir, 'val_segmentations')
_split_f = os.path.join(_trainval_image_dir, 'val_id.txt')
elif split == 'test':
_image_dir = os.path.join(_testing_image_dir, 'testing_images')
_split_f = os.path.join(_testing_image_dir, 'test_id.txt')
else:
raise RuntimeError('Unknown dataset split.')

self.images = []
self.masks = []
with open(os.path.join(_split_f), 'r') as lines:
for line in lines:
_image = os.path.join(_image_dir, line.rstrip('\n') + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
if split != 'test':
_mask = os.path.join(_mask_dir, line.rstrip('\n') + '.png')
assert os.path.isfile(_mask)
self.masks.append(_mask)

if split != 'test':
assert (len(self.images) == len(self.masks))
print('Found {} {} images in the folder {}'.format(len(self.images), split, root))

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchronized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)

return img, mask, os.path.basename(self.images[index])

def __len__(self):
return len(self.images)

def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
return torch.from_numpy(target).long()

@property
def classes(self):
"""Category name."""
return ('background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe')


if __name__ == '__main__':
dataset = LIPSegmentation(base_size=280, crop_size=256)

+ 136
- 0
segutils/core/data/dataloader/mscoco.py View File

@@ -0,0 +1,136 @@
"""MSCOCO Semantic Segmentation pretraining for VOC."""
import os
import pickle
import torch
import numpy as np

from tqdm import trange
from PIL import Image
from .segbase import SegmentationDataset


class COCOSegmentation(SegmentationDataset):
"""COCO Semantic Segmentation Dataset for VOC Pre-training.

Parameters
----------
root : string
Path to ADE20K folder. Default is './datasets/coco'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from torchvision import transforms
>>> import torch.utils.data as data
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize((.485, .456, .406), (.229, .224, .225)),
>>> ])
>>> # Create Dataset
>>> trainset = COCOSegmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = data.DataLoader(
>>> trainset, 4, shuffle=True,
>>> num_workers=4)
"""
CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4,
1, 64, 20, 63, 7, 72]
NUM_CLASS = 21

def __init__(self, root='../datasets/coco', split='train', mode=None, transform=None, **kwargs):
super(COCOSegmentation, self).__init__(root, split, mode, transform, **kwargs)
# lazy import pycocotools
from pycocotools.coco import COCO
from pycocotools import mask
if split == 'train':
print('train set')
ann_file = os.path.join(root, 'annotations/instances_train2017.json')
ids_file = os.path.join(root, 'annotations/train_ids.mx')
self.root = os.path.join(root, 'train2017')
else:
print('val set')
ann_file = os.path.join(root, 'annotations/instances_val2017.json')
ids_file = os.path.join(root, 'annotations/val_ids.mx')
self.root = os.path.join(root, 'val2017')
self.coco = COCO(ann_file)
self.coco_mask = mask
if os.path.exists(ids_file):
with open(ids_file, 'rb') as f:
self.ids = pickle.load(f)
else:
ids = list(self.coco.imgs.keys())
self.ids = self._preprocess(ids, ids_file)
self.transform = transform

def __getitem__(self, index):
coco = self.coco
img_id = self.ids[index]
img_metadata = coco.loadImgs(img_id)[0]
path = img_metadata['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
cocotarget = coco.loadAnns(coco.getAnnIds(imgIds=img_id))
mask = Image.fromarray(self._gen_seg_mask(
cocotarget, img_metadata['height'], img_metadata['width']))
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.ids[index])

def _mask_transform(self, mask):
return torch.LongTensor(np.array(mask).astype('int32'))

def _gen_seg_mask(self, target, h, w):
mask = np.zeros((h, w), dtype=np.uint8)
coco_mask = self.coco_mask
for instance in target:
rle = coco_mask.frPyObjects(instance['Segmentation'], h, w)
m = coco_mask.decode(rle)
cat = instance['category_id']
if cat in self.CAT_LIST:
c = self.CAT_LIST.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask

def _preprocess(self, ids, ids_file):
print("Preprocessing mask, this will take a while." + \
"But don't worry, it only run once for each split.")
tbar = trange(len(ids))
new_ids = []
for i in tbar:
img_id = ids[i]
cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width'])
# more than 1k pixels
if (mask > 0).sum() > 1000:
new_ids.append(img_id)
tbar.set_description('Doing: {}/{}, got {} qualified images'. \
format(i, len(ids), len(new_ids)))
print('Found number of qualified images: ', len(new_ids))
with open(ids_file, 'wb') as f:
pickle.dump(new_ids, f)
return new_ids

@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')

+ 104
- 0
segutils/core/data/dataloader/pascal_aug.py View File

@@ -0,0 +1,104 @@
"""Pascal Augmented VOC Semantic Segmentation Dataset."""
import os
import torch
import scipy.io as sio
import numpy as np

from PIL import Image
from .segbase import SegmentationDataset


class VOCAugSegmentation(SegmentationDataset):
"""Pascal VOC Augmented Semantic Segmentation Dataset.

Parameters
----------
root : string
Path to VOCdevkit folder. Default is './datasets/voc'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from torchvision import transforms
>>> import torch.utils.data as data
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
>>> ])
>>> # Create Dataset
>>> trainset = VOCAugSegmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = data.DataLoader(
>>> trainset, 4, shuffle=True,
>>> num_workers=4)
"""
BASE_DIR = 'VOCaug/dataset/'
NUM_CLASS = 21

def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs):
super(VOCAugSegmentation, self).__init__(root, split, mode, transform, **kwargs)
# train/val/test splits are pre-cut
_voc_root = os.path.join(root, self.BASE_DIR)
_mask_dir = os.path.join(_voc_root, 'cls')
_image_dir = os.path.join(_voc_root, 'img')
if split == 'train':
_split_f = os.path.join(_voc_root, 'trainval.txt')
elif split == 'val':
_split_f = os.path.join(_voc_root, 'val.txt')
else:
raise RuntimeError('Unknown dataset split: {}'.format(split))

self.images = []
self.masks = []
with open(os.path.join(_split_f), "r") as lines:
for line in lines:
_image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
_mask = os.path.join(_mask_dir, line.rstrip('\n') + ".mat")
assert os.path.isfile(_mask)
self.masks.append(_mask)

assert (len(self.images) == len(self.masks))
print('Found {} images in the folder {}'.format(len(self.images), _voc_root))

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
target = self._load_mat(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, target = self._sync_transform(img, target)
elif self.mode == 'val':
img, target = self._val_sync_transform(img, target)
else:
raise RuntimeError('unknown mode for dataloader: {}'.format(self.mode))
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, target, os.path.basename(self.images[index])

def _mask_transform(self, mask):
return torch.LongTensor(np.array(mask).astype('int32'))

def _load_mat(self, filename):
mat = sio.loadmat(filename, mat_dtype=True, squeeze_me=True, struct_as_record=False)
mask = mat['GTcls'].Segmentation
return Image.fromarray(mask)

def __len__(self):
return len(self.images)

@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')


if __name__ == '__main__':
dataset = VOCAugSegmentation()

+ 112
- 0
segutils/core/data/dataloader/pascal_voc.py View File

@@ -0,0 +1,112 @@
"""Pascal VOC Semantic Segmentation Dataset."""
import os
import torch
import numpy as np

from PIL import Image
from .segbase import SegmentationDataset


class VOCSegmentation(SegmentationDataset):
"""Pascal VOC Semantic Segmentation Dataset.

Parameters
----------
root : string
Path to VOCdevkit folder. Default is './datasets/VOCdevkit'
split: string
'train', 'val' or 'test'
transform : callable, optional
A function that transforms the image
Examples
--------
>>> from torchvision import transforms
>>> import torch.utils.data as data
>>> # Transforms for Normalization
>>> input_transform = transforms.Compose([
>>> transforms.ToTensor(),
>>> transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
>>> ])
>>> # Create Dataset
>>> trainset = VOCSegmentation(split='train', transform=input_transform)
>>> # Create Training Loader
>>> train_data = data.DataLoader(
>>> trainset, 4, shuffle=True,
>>> num_workers=4)
"""
BASE_DIR = 'VOC2012'
NUM_CLASS = 21

def __init__(self, root='../datasets/voc', split='train', mode=None, transform=None, **kwargs):
super(VOCSegmentation, self).__init__(root, split, mode, transform, **kwargs)
_voc_root = os.path.join(root, self.BASE_DIR)
_mask_dir = os.path.join(_voc_root, 'SegmentationClass')
_image_dir = os.path.join(_voc_root, 'JPEGImages')
# train/val/test splits are pre-cut
_splits_dir = os.path.join(_voc_root, 'ImageSets/Segmentation')
if split == 'train':
_split_f = os.path.join(_splits_dir, 'train.txt')
elif split == 'val':
_split_f = os.path.join(_splits_dir, 'val.txt')
elif split == 'test':
_split_f = os.path.join(_splits_dir, 'test.txt')
else:
raise RuntimeError('Unknown dataset split.')

self.images = []
self.masks = []
with open(os.path.join(_split_f), "r") as lines:
for line in lines:
_image = os.path.join(_image_dir, line.rstrip('\n') + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
if split != 'test':
_mask = os.path.join(_mask_dir, line.rstrip('\n') + ".png")
assert os.path.isfile(_mask)
self.masks.append(_mask)

if split != 'test':
assert (len(self.images) == len(self.masks))
print('Found {} images in the folder {}'.format(len(self.images), _voc_root))

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
img = self._img_transform(img)
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchronized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)

return img, mask, os.path.basename(self.images[index])

def __len__(self):
return len(self.images)

def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
target[target == 255] = -1
return torch.from_numpy(target).long()

@property
def classes(self):
"""Category names."""
return ('background', 'airplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorcycle', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv')


if __name__ == '__main__':
dataset = VOCSegmentation()

+ 88
- 0
segutils/core/data/dataloader/sbu_shadow.py View File

@@ -0,0 +1,88 @@
"""SBU Shadow Segmentation Dataset."""
import os
import torch
import numpy as np

from PIL import Image
from .segbase import SegmentationDataset


class SBUSegmentation(SegmentationDataset):
"""SBU Shadow Segmentation Dataset
"""
NUM_CLASS = 2

def __init__(self, root='../datasets/sbu', split='train', mode=None, transform=None, **kwargs):
super(SBUSegmentation, self).__init__(root, split, mode, transform, **kwargs)
assert os.path.exists(self.root)
self.images, self.masks = _get_sbu_pairs(self.root, self.split)
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of:" + root + "\n")

def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
img, mask = self._img_transform(img), self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
return img, mask, os.path.basename(self.images[index])

def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
target[target > 0] = 1
return torch.from_numpy(target).long()

def __len__(self):
return len(self.images)

@property
def pred_offset(self):
return 0


def _get_sbu_pairs(folder, split='train'):
def get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
print(root)
for filename in files:
if filename.endswith('.jpg'):
imgpath = os.path.join(root, filename)
maskname = filename.replace('.jpg', '.png')
maskpath = os.path.join(mask_folder, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths

if split == 'train':
img_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowImages')
mask_folder = os.path.join(folder, 'SBUTrain4KRecoveredSmall/ShadowMasks')
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
else:
assert split in ('val', 'test')
img_folder = os.path.join(folder, 'SBU-Test/ShadowImages')
mask_folder = os.path.join(folder, 'SBU-Test/ShadowMasks')
img_paths, mask_paths = get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths


if __name__ == '__main__':
dataset = SBUSegmentation(base_size=280, crop_size=256)

+ 93
- 0
segutils/core/data/dataloader/segbase.py View File

@@ -0,0 +1,93 @@
"""Base segmentation dataset"""
import random
import numpy as np

from PIL import Image, ImageOps, ImageFilter

__all__ = ['SegmentationDataset']


class SegmentationDataset(object):
"""Segmentation Base Dataset"""

def __init__(self, root, split, mode, transform, base_size=520, crop_size=480):
super(SegmentationDataset, self).__init__()
self.root = root
self.transform = transform
self.split = split
self.mode = mode if mode is not None else split
self.base_size = base_size
self.crop_size = crop_size

def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask

def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(radius=random.random()))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask

def _img_transform(self, img):
return np.array(img)

def _mask_transform(self, mask):
return np.array(mask).astype('int32')

@property
def num_class(self):
"""Number of categories."""
return self.NUM_CLASS

@property
def pred_offset(self):
return 0

+ 69
- 0
segutils/core/data/dataloader/utils.py View File

@@ -0,0 +1,69 @@
import os
import hashlib
import errno
import tarfile
from six.moves import urllib
from torch.utils.model_zoo import tqdm

def gen_bar_updater():
pbar = tqdm(total=None)

def bar_update(count, block_size, total_size):
if pbar.total is None and total_size:
pbar.total = total_size
progress_bytes = count * block_size
pbar.update(progress_bytes - pbar.n)

return bar_update

def check_integrity(fpath, md5=None):
if md5 is None:
return True
if not os.path.isfile(fpath):
return False
md5o = hashlib.md5()
with open(fpath, 'rb') as f:
# read in 1MB chunks
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5o.update(chunk)
md5c = md5o.hexdigest()
if md5c != md5:
return False
return True

def makedir_exist_ok(dirpath):
try:
os.makedirs(dirpath)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
pass

def download_url(url, root, filename=None, md5=None):
"""Download a file from a url and place it in root."""
root = os.path.expanduser(root)
if not filename:
filename = os.path.basename(url)
fpath = os.path.join(root, filename)

makedir_exist_ok(root)

# downloads file
if os.path.isfile(fpath) and check_integrity(fpath, md5):
print('Using downloaded and verified file: ' + fpath)
else:
try:
print('Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())
except OSError:
if url[:5] == 'https':
url = url.replace('https:', 'http:')
print('Failed download. Trying https -> http instead.'
' Downloading ' + url + ' to ' + fpath)
urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())

def download_extract(url, root, filename, md5):
download_url(url, root, filename, md5)
with tarfile.open(os.path.join(root, filename), "r") as tar:
tar.extractall(path=root)

+ 0
- 0
segutils/core/data/downloader/__init__.py View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save