|
- import asyncio
- import copy
- import json
- import os
- import time
- from concurrent.futures import ThreadPoolExecutor
- from multiprocessing import Queue, Process
-
- from loguru import logger
- import subprocess as sp
-
- import cv2
- import numpy as np
- from aip import AipImageClassify
- import sys
- from enums.BaiduSdkEnum import BAIDUERRORDATA, VehicleEnumVALUE
- from enums.ExceptionEnum import ExceptionType
- from enums.ModelTypeEnum import ModelType
- from exception.CustomerException import ServiceException
- from util.ModelUtils import Model
-
-
- def get_recording_video_info(url):
- try:
- video_info = 'ffprobe -show_format -show_streams -of json %s' % url
- p = sp.Popen(video_info, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
- out, err = p.communicate(timeout=17)
- if p.returncode != 0:
- raise Exception("未获取视频信息!!!!!")
- probe = json.loads(out.decode('utf-8'))
- if probe is None or probe.get("streams") is None:
- raise Exception("未获取视频信息!!!!!:")
- video_stream = next((stream for stream in probe['streams'] if stream.get('codec_type') == 'video'), None)
- if video_stream is None:
- raise Exception("未获取视频信息!!!!!")
- width = video_stream.get('width')
- height = video_stream.get('height')
- nb_frames = video_stream.get('nb_frames')
- fps = video_stream.get('r_frame_rate')
- up, down = str(fps).split('/')
- fps = int(eval(up) / eval(down))
- return (width, height, nb_frames, fps)
- except Exception as e:
- raise e
-
- client = AipImageClassify(str(31096670), 'Dam3O4tgPRN3qh4OYE82dbg7', '1PGZ9LAXRR5zcT5MN9rHcW8kLBIS5DAa')
- def vehicleDetect(client, iamge, options={}):
- reply_num = 0
- reply_value = None
- while True:
- try:
- options["show"] = "true"
- res_image = client.vehicleDetect(iamge,options)
- error_code = res_image.get("error_code")
- if error_code:
- enum = BAIDUERRORDATA.get(error_code)
- # 如果异常编码未知, 返回空值
- if enum is None:
- logger.error("百度云车辆检测异常!error_code:{}", error_code)
- return None
- # 重试指定次数后,还是异常,输出统一内部异常
- if enum.value[3] == 0:
- if reply_value is None:
- reply_value = 10
- logger.error("百度云车辆检测异常!error_code:{}, error_msg:{}, reply_num:{}", enum.value[0], enum.value[2], reply_num)
- raise Exception()
- # 重试指定次数后,还是异常,输出对应的异常
- if enum.value[3] == 1:
- if reply_value is None:
- reply_value = 10
- raise ServiceException(str(enum.value[0]), enum.value[2])
- # 重试指定次数后,还是异常,输出空
- if enum.value[3] == 2:
- if reply_value is None:
- reply_value = 10
- if reply_num >= reply_value:
- return None
- return res_image
- except ServiceException as s:
- time.sleep(0.2)
- reply_num += 1
- if reply_num > reply_value:
- logger.exception("车辆检测识别失败: {}", s.msg)
- raise ServiceException(e.code, e.msg)
- except Exception as e:
- logger.exception("车辆检测失败: {}, 当前重试次数:{}", e, reply_num)
- time.sleep(0.2)
- reply_num += 1
- if reply_num > reply_value:
- logger.exception("车辆检测识别失败: {}", e)
- raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
- ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
-
- def mark(content, info, img, color):
- score = info.get("probability")
- if score is None:
- score = info.get("location").get("score")
- text = "%s: %.2f" % (content, score)
- text_xy = (info.get("location").get("left"), info.get("location").get("top") - 25)
- img_lu = (info.get("location").get("left"), info.get("location").get("top"))
- img_rd = (info.get("location").get("left") + info.get("location").get("width"),
- info.get("location").get("top") + info.get("location").get("height"))
- cv2.putText(img, text, text_xy, cv2.FONT_HERSHEY_SIMPLEX, 1.0, color, 2, cv2.LINE_AA)
- count = 1
- if img.shape[1] > 1600:
- count = 2
- cv2.rectangle(img, img_lu, img_rd, color, count)
- return img
-
- async def mode_handler(img, width):
- return senlin_mod.process(copy.deepcopy(img), width)
-
- async def modprocess(img, width):
- p_result, timeOut = await mode_handler(img, width)
- return p_result, timeOut
-
-
- async def car_handler(img, width):
- return car_mod.process(copy.deepcopy(img), width)
-
- async def carprocess(img, width):
- p_result, timeOut = await car_handler(img, width)
- return p_result, timeOut
-
-
- async def baidu_handler(img, client):
- or_result, or_image = cv2.imencode(".jpg", img)
- return vehicleDetect(client, or_image)
-
- async def baiduprocess(img, client):
- result = await baidu_handler(img, client)
- return result
-
-
-
- url ='/home/th/tuo_heng/dev/11.mp4'
- width, height, nb_frames, fps = get_recording_video_info(url)
-
- current_path = os.path.abspath(os.path.dirname(__file__))
- import GPUtil
- senlin_mod = Model(str(GPUtil.getAvailable()[0]), [2,3,4], logger, "11112", ModelType.FOREST_FARM_MODEL)
- car_mod = Model(str(GPUtil.getAvailable()[0]), [0], logger, "11112", ModelType.VEHICLE_MODEL)
- or_video_file = cv2.VideoWriter("aaa2.mp4", cv2.VideoWriter_fourcc(*'mp4v'), fps,
- (int(width) * 2, int(height)))
-
- command = ['ffmpeg -re -y -i ' + url +' -f rawvideo -pix_fmt bgr24 -an -']
- pull_p = sp.Popen(command, stdout=sp.PIPE, shell=True)
- num = 0
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- try:
- while True:
- print(num, nb_frames)
- in_bytes = pull_p.stdout.read(width*height*3)
- if in_bytes is not None and len(in_bytes) > 0:
- img = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3])
- # r = loop.run_until_complete(asyncio.gather(modprocess(img, width), carprocess(img, width)))
- p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width)
- p_result1, timeOut1 = car_mod.process(copy.deepcopy(p_result[1]), width)
- # r = loop.run_until_complete(asyncio.gather(modprocess(img, width), baiduprocess(img, client)))
- # p_result, timeOut = r[0]
- # result = r[1]
- # p_result, timeOut = senlin_mod.process(copy.deepcopy(img), width)
-
- # if result is not None:
- # vehicleInfo = result.get("vehicle_info")
- # if vehicleInfo is not None and len(vehicleInfo) > 0:
- # for i, info in enumerate(vehicleInfo):
- # value = VehicleEnumVALUE.get(info.get("type"))
- # if value is None:
- # logger.error("车辆识别出现未支持的目标类型!type:{}", info.get("type"))
- # raise ServiceException(ExceptionType.SERVICE_INNER_EXCEPTION.value[0],
- # ExceptionType.SERVICE_INNER_EXCEPTION.value[1])
- # p_result[1] = mark(value.value[1], info, p_result[1], (255, 0, 255))
- frame_merge = np.hstack((img, p_result1[1]))
- or_video_file.write(frame_merge)
- num+=1
- else:
- if num -10 > nb_frames:
- break;
- finally:
- or_video_file.release()
- pull_p.terminate()
- pull_p.wait()
-
-
-
-
|