Browse Source

算法交互更新图片相似度逻辑

release_back
chenyukun 2 years ago
parent
commit
86e6e50d93
27 changed files with 548 additions and 124 deletions
  1. +1
    -1
      concurrency/FileUpdateThread.py
  2. +56
    -17
      concurrency/IntelligentRecognitionProcess.py
  3. +6
    -0
      dsp_application.yml
  4. BIN
      test/AI.jpg
  5. +44
    -0
      test/ORB算法.py
  6. BIN
      test/a.jpg
  7. +106
    -106
      test/cv2test.py
  8. +19
    -0
      test/cv2test1.py
  9. BIN
      test/d.jpg
  10. +50
    -0
      test/ffmpeg2.py
  11. BIN
      test/image/AI.jpg
  12. BIN
      test/image/AI1.jpg
  13. BIN
      test/image/AI2.jpg
  14. BIN
      test/image/AI3.jpg
  15. BIN
      test/image/AI4.jpg
  16. BIN
      test/image/AI5.jpg
  17. BIN
      test/image/AI6.jpg
  18. BIN
      test/image/AI7.jpg
  19. BIN
      test/image/AI8.jpg
  20. +46
    -0
      test/same1.py
  21. +21
    -0
      test/same2.py
  22. +18
    -0
      test/same3.py
  23. +47
    -0
      test/test1.py
  24. +8
    -0
      test/互信息.py
  25. +97
    -0
      test/余弦相似度计算.py
  26. +2
    -0
      util/Cv2Utils.py
  27. +27
    -0
      util/ImageUtils.py

+ 1
- 1
concurrency/FileUpdateThread.py View File

@@ -3,7 +3,7 @@ from threading import Thread
from loguru import logger
import cv2
from util.AliyunSdk import AliyunOssSdk
from util import TimeUtils
from util import TimeUtils, ImageUtils
from entity import FeedBack
from enums.AnalysisStatusEnum import AnalysisStatus
import numpy as np

+ 56
- 17
concurrency/IntelligentRecognitionProcess.py View File

@@ -10,7 +10,7 @@ from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from enums.ModelTypeEnum import ModelType
from util import LogUtils, TimeUtils, ModelUtils
from util import LogUtils, TimeUtils, ModelUtils, ImageUtils
from util.Cv2Utils import Cv2Util
from entity.FeedBack import message_feedback
from util import AliyunSdk
@@ -177,6 +177,10 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
cmdStr = eBody.get("command")
# 接收到停止指令
if 'stop' == cmdStr:
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("实时任务开始停止, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, orFilePath, aiFilePath, AnalysisStatus.SUCCESS.value)
break
@@ -187,6 +191,10 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
# 默认1个小时
pull_stream_timeout = time.time() - pull_start_time
if pull_stream_timeout > int(self.content["service"]["cv2_read_stream_timeout"]):
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("拉流超时, 超时时间:{}, requestId:{}", pull_stream_timeout, self.msg.get("request_id"))
self.stop_task(cv2tool, orFilePath, aiFilePath, AnalysisStatus.TIMEOUT.value)
break
@@ -202,6 +210,10 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
read_start_time = time.time()
read_stream_timeout = time.time() - read_start_time
if read_stream_timeout > int(self.content["service"]["cv2_read_stream_timeout"]):
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("运行中读流超时, 超时时间: {}, requestId: {}", read_stream_timeout,
self.msg.get("request_id"))
self.stop_task(cv2tool, orFilePath, aiFilePath, AnalysisStatus.TIMEOUT.value)
@@ -224,8 +236,8 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
# logger.info("算法模型调度时间:{}s, requestId:{}", int(time11-time00), self.msg.get("request_id"))
# AI推流
if self.content["video"]["video_add_water"]:
frame = self.pic.common_water(frame, self.logo)
p_result[1] = self.pic.common_water(p_result[1], self.logo)
frame = self.pic.common_water_1(frame, self.logo)
p_result[1] = self.pic.common_water_1(p_result[1], self.logo)
frame_merge = cv2tool.video_merge(copy.deepcopy(frame), copy.deepcopy(p_result[1]))
try:
cv2tool.getOrVideoFile().write(frame)
@@ -254,7 +266,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None:
if high_result is None and conf_c >= float(self.content["service"]["frame_score"]):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
@@ -267,7 +279,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"socre": conf_c
}
else:
if conf_c > high_result.get("socre"):
if conf_c >= float(self.content["service"]["frame_score"]) and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
@@ -280,9 +292,19 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"socre": conf_c
}
if cf % step == 0 and len(high_score_image) > 0:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()
if self.content["service"]["filter"]["picture_similarity"]:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.content["service"]["filter"]["similarity"]:
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()
logger.info("实时进程任务完成,requestId:{}", self.msg.get("request_id"))
except ServiceException as s:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
@@ -397,6 +419,10 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
if eBody is not None and len(eBody) > 0:
cmdStr = eBody.get("command")
if 'stop' == cmdStr:
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
logger.info("离线任务开始停止分析, requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, aiFilePath, AnalysisStatus.SUCCESS.value)
break
@@ -415,9 +441,13 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
is_opened, frame = cv2tool.cap.read()
cf = int(cv2tool.cap.get(1))
if is_opened is None or not is_opened:
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}, is_opened: {}", cv2tool.cap.get(7),
logger.info("总帧数: {}, 当前帧数: {}, requestId: {}, is_opened: {}", float(cv2tool.cap.get(7)),
cv2tool.cap.get(1), self.msg.get("request_id"), is_opened)
logger.info("离线读流结束,读流时间: {}", time.time() - start_read_time)
if high_score_image is not None and len(high_score_image) > 0:
for key in list(high_score_image.keys()):
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
if float(cf) < float(all_f):
logger.info("离线异常结束:requestId: {}", self.msg.get("request_id"))
self.stop_task(cv2tool, aiFilePath, AnalysisStatus.TIMEOUT.value)
@@ -465,7 +495,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
order = str(int(ai_analyse_result[0]))
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None:
if high_result is None and conf_c >= float(self.content["service"]["frame_score"]):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
@@ -478,7 +508,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"socre": conf_c
}
else:
if conf_c > high_result.get("socre"):
if conf_c >= float(self.content["service"]["frame_score"]) and conf_c > high_result.get("socre"):
high_score_image[order] = {
"or_frame": frame,
"ai_frame": p_result[1],
@@ -491,10 +521,20 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"socre": conf_c
}
if cf % step == 0 and len(high_score_image) > 0:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
hbQueue.put({"cf": cf, "af": all_f})
high_score_image.clear()
if self.content["service"]["filter"]["picture_similarity"]:
for key in list(high_score_image.keys()):
hash1 = ImageUtils.dHash(high_score_image[key].get("ai_frame"))
hash2 = ImageUtils.dHash(p_result[1])
dist = ImageUtils.Hamming_distance(hash1, hash2)
similarity = 1 - dist * 1.0 / 64
if similarity < self.content["service"]["filter"]["similarity"]:
self.imageQueue.put({"image": high_score_image[key]})
del high_score_image[key]
else:
for value in high_score_image.values():
self.imageQueue.put({"image": value})
high_score_image.clear()
hbQueue.put({"cf": cf, "af": all_f})
logger.info("离线进程任务完成,requestId:{}", self.msg.get("request_id"))
except ServiceException as s:
self.sendResult({"command": "stop_heartbeat_imageFileUpdate"})
@@ -551,8 +591,7 @@ def get_model(args):
logger.info("code:{}, 检查目标:{}, gpuId:{}", code, needed_objectsIndex, args[1])
if code == ModelType.WATER_SURFACE_MODEL.value[1]:
logger.info("######################加载河道模型######################")
mod, model_type_code, modelConfig = ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz")
return mod, model_type_code, modelConfig
return ModelUtils.SZModel(args[1], needed_objectsIndex), code, args[0].get("sz")
elif code == ModelType.FOREST_FARM_MODEL.value[1]:
logger.info("######################加载林场模型######################")
return ModelUtils.LCModel(args[1], needed_objectsIndex), code, args[0].get("lc")

+ 6
- 0
dsp_application.yml View File

@@ -100,6 +100,12 @@ aliyun:
ecsRegionId: "cn-shanghai"
service:
frame_step: 300 # 多少帧数步长之间获取一次分析图片
frame_score: 0.4 # 获取最低得分以上的图片
filter:
# 识别相似度是否开启
picture_similarity: True
# 相似度阀值
similarity: 0.65
timeout: 21600 # 一次识别任务超时时间,单位秒,默认6个小时
cv2_pull_stream_timeout: 3600 # 直播开始视频未推流超时时间
cv2_read_stream_timeout: 1800 # 直播读流中超时时间

BIN
test/AI.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 477KB

+ 44
- 0
test/ORB算法.py View File

@@ -0,0 +1,44 @@

# 自定义计算两个图片相似度函数
import cv2


def img_similarity(img1_path, img2_path):
"""
:param img1_path: 图片1路径
:param img2_path: 图片2路径
:return: 图片相似度
"""
try:
# 读取图片
img1 = cv2.imread(img1_path, cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(img2_path, cv2.IMREAD_GRAYSCALE)

# 初始化ORB检测器
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)

# 提取并计算特征点
bf = cv2.BFMatcher(cv2.NORM_HAMMING)

# knn筛选结果
matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)

# 查看最大匹配点数目
good = [m for (m, n) in matches if m.distance < 0.75 * n.distance]
# print(len(good))
# print(len(matches))
similary = float(len(good))/len(matches)
print("(ORB算法)两张图片相似度为:%s" % similary)
return similary

except:
print('无法计算两张图片相似度')
return '0'
if __name__ == '__main__':
name1='image/AI.jpg'
name2='image/AI3.jpg'
# similary 值为0-1之间,1表示相同
similary = img_similarity(name1, name2)
print(similary)

BIN
test/a.jpg View File

Before After
Width: 640  |  Height: 640  |  Size: 48KB

+ 106
- 106
test/cv2test.py View File

@@ -1,107 +1,107 @@
import cv2,os
import time
import subprocess as sp
# 图片合并
# def readImage():
# p1 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg")
# p2 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg")
# ret = cv2.add(p1, p2)
# cv2.imwrite(r"C:\Users\chenyukun\Pictures\Camera Roll\aa.jpg", ret)
# import cv2,os
# import time
# import subprocess as sp
# # 图片合并
# # def readImage():
# # p1 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg")
# # p2 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg")
# # ret = cv2.add(p1, p2)
# # cv2.imwrite(r"C:\Users\chenyukun\Pictures\Camera Roll\aa.jpg", ret)
# #
# # readImage()
# # https://opencv.apachecn.org/#/docs/4.0.0/2.1-tutorial_py_image_display
# if __name__ == "__main__":
# # print(cv2.__version__)
# # # 读取图像
# # p1 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg", 0) # 以灰度模式加载图像
# # p2 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg", 1) # **cv.IMREAD_COLOR**:加载彩色图像,任何图像的透明度都会被忽略,它是默认标志
# # p3 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg", -1) # 加载图像,包括 alpha 通道
# # p4 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg")
# # p5 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/c.jpg")
# # print(type(p5))
# # ret = cv2.add(p4, p5)
# # # 显示图像
# # cv2.imshow('frame', p1)
# # cv2.imshow('frame1', p2)
# # cv2.imshow('frame2', p3)
# # # 对显示窗口做调整,WINDOW_AUTOSIZE不可调整,WINDOW_NORMAL可调整窗口大小
# # cv2.namedWindow('frame3', cv2.WINDOW_AUTOSIZE)
# # cv2.imshow('frame3', ret)
# # # 等待时间 按下任务键
# # k = cv2.waitKey(1) & 0xFF
# # if k == 27: #ESC退出
# # cv2.destroyAllWindows()
# # elif k == ord('s'): # 's' 保存退出
# # # 保存图像
# # cv2.imwrite("C:/Users/chenyukun/Pictures/Camera Roll/d.jpg", ret)
# # cv2.destroyAllWindows()
#
# readImage()
# https://opencv.apachecn.org/#/docs/4.0.0/2.1-tutorial_py_image_display
if __name__ == "__main__":
# print(cv2.__version__)
# # 读取图像
# p1 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg", 0) # 以灰度模式加载图像
# p2 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg", 1) # **cv.IMREAD_COLOR**:加载彩色图像,任何图像的透明度都会被忽略,它是默认标志
# p3 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/b.jpg", -1) # 加载图像,包括 alpha 通道
# p4 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/a.jpg")
# p5 = cv2.imread("C:/Users/chenyukun/Pictures/Camera Roll/c.jpg")
# print(type(p5))
# ret = cv2.add(p4, p5)
# # 显示图像
# cv2.imshow('frame', p1)
# cv2.imshow('frame1', p2)
# cv2.imshow('frame2', p3)
# # 对显示窗口做调整,WINDOW_AUTOSIZE不可调整,WINDOW_NORMAL可调整窗口大小
# cv2.namedWindow('frame3', cv2.WINDOW_AUTOSIZE)
# cv2.imshow('frame3', ret)
# # 等待时间 按下任务键
# k = cv2.waitKey(1) & 0xFF
# if k == 27: #ESC退出
# cv2.destroyAllWindows()
# elif k == ord('s'): # 's' 保存退出
# # 保存图像
# cv2.imwrite("C:/Users/chenyukun/Pictures/Camera Roll/d.jpg", ret)
# cv2.destroyAllWindows()



# 视频入门
try:
cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd") # 0:表示连接一台摄像机
print(cap.isOpened())
print(cap)
except Exception as e:
print(e)
raise e
print("aaaa")
# 有时,cap 可能没有初始化 capture。在这种情况下,此代码显示错误。你可以通过该方法 cap.isOpened() 检查它是否初始化。
# 如果它是 True,那么是好的,否则用 cap.open() 打开在使用。
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(fps,width,height)
# # 设置宽
# print(cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320))
# # 设置高
# print(cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240))

# 声明编码器和创建 VideoWrite 对象
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('/home/DATA/dsp/ai/video/eee.mp4',fourcc, fps, (width, height))
command=['/usr/bin/ffmpeg',
'-y', # 覆盖输出文件
'-f', 'rawvideo', # 强迫采用格式fmt
'-vcodec','rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(640, 480),# 图片分辨率
'-r', str(25.0),# 视频帧率
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'flv',
"rtmp://live.push.t-aaron.com/live/THSAb"]
# p = sp.Popen(command, stdin=sp.PIPE)
start = time.time()
while True:

try:
if not cap.isOpened():
cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd")
continue
# 一帧一帧捕捉
ret, frame = cap.read() # 返回一个 bool 值(True/False)。如果加载成功,它会返回True
if not ret:
cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd")
continue
# print(ret) #True
# # 我们对帧的操作在这里
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
end = time.time()
print("bbbbbbbbb")
# p.stdin.write(frame.tostring())
# print("aaaaa")
out.write(frame)
except Exception as e:
raise e
# # 显示返回的每帧
# cv2.imshow('frame',gray)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# 当所有事完成,释放 VideoCapture 对象
cap.release()
out.release()
cv2.destroyAllWindows()
#
#
# # 视频入门
# try:
# cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd") # 0:表示连接一台摄像机
# print(cap.isOpened())
# print(cap)
# except Exception as e:
# print(e)
# raise e
# print("aaaa")
# # 有时,cap 可能没有初始化 capture。在这种情况下,此代码显示错误。你可以通过该方法 cap.isOpened() 检查它是否初始化。
# # 如果它是 True,那么是好的,否则用 cap.open() 打开在使用。
# fps = int(cap.get(cv2.CAP_PROP_FPS))
# width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# print(fps,width,height)
# # # 设置宽
# # print(cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320))
# # # 设置高
# # print(cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240))
#
# # 声明编码器和创建 VideoWrite 对象
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# out = cv2.VideoWriter('/home/DATA/dsp/ai/video/eee.mp4',fourcc, fps, (width, height))
# command=['/usr/bin/ffmpeg',
# '-y', # 覆盖输出文件
# '-f', 'rawvideo', # 强迫采用格式fmt
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(640, 480),# 图片分辨率
# '-r', str(25.0),# 视频帧率
# '-i', '-',
# '-c:v', 'libx264',
# '-pix_fmt', 'yuv420p',
# '-preset', 'ultrafast',
# '-f', 'flv',
# "rtmp://live.push.t-aaron.com/live/THSAb"]
# # p = sp.Popen(command, stdin=sp.PIPE)
# start = time.time()
# while True:
#
# try:
# if not cap.isOpened():
# cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd")
# continue
# # 一帧一帧捕捉
# ret, frame = cap.read() # 返回一个 bool 值(True/False)。如果加载成功,它会返回True
# if not ret:
# cap = cv2.VideoCapture("rtmp://live.play.t-aaron.com/live/THSAf_hd")
# continue
# # print(ret) #True
# # # 我们对帧的操作在这里
# # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# end = time.time()
# print("bbbbbbbbb")
# # p.stdin.write(frame.tostring())
# # print("aaaaa")
# out.write(frame)
# except Exception as e:
# raise e
# # # 显示返回的每帧
# # cv2.imshow('frame',gray)
# # if cv2.waitKey(1) & 0xFF == ord('q'):
# # break
# # 当所有事完成,释放 VideoCapture 对象
# cap.release()
# out.release()
# cv2.destroyAllWindows()

+ 19
- 0
test/cv2test1.py View File

@@ -0,0 +1,19 @@
import cv2

if __name__ == "__main__":
# 先把图片灰度处理。
img = cv2.imread('image/AI6.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

template = cv2.imread('image/AI.jpg')
template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
h, w = template.shape[:2]

# 匹配
result = cv2.matchTemplate(img_gray, template_gray, cv2.TM_CCOEFF_NORMED)

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print(min_val, max_val, min_loc, max_loc)




BIN
test/d.jpg View File

Before After
Width: 640  |  Height: 640  |  Size: 69KB

+ 50
- 0
test/ffmpeg2.py View File

@@ -0,0 +1,50 @@
import subprocess as sp
from PIL import Image
import time
import cv2
import oss2
import numpy as np
# 推流
if __name__== "__main__":

cap = cv2.VideoCapture(r"https://vod.play.t-aaron.com/customerTrans/14d44756fa6d37db17008d98bdee3558/18ac4fa7-18369b0e703-0004-f90c-f2c-7ec68.mp4")

# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
print(fps)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(width)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(height)
# ffmpeg command
# command = ['D:/百度下载/ffmpeg-20200831-4a11a6f-win64-static/bin/ffmpeg.exe',
# '-y', # 不经过确认,输出时直接覆盖同名文件。
# '-f', 'rawvideo',
# '-vcodec','rawvideo',
# '-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(width, height),
# # '-s', "{}x{}".format(1280, 720),
# '-i', '-', # 指定输入文件
# '-c:v', 'libx264', # 指定视频编码器
# '-pix_fmt', 'yuv420p',
# '-r', '15',
# '-g', '15',
# "-an",
# '-b:v', '3000k',
# '-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# # superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
# '-f', 'flv',
# "rtmp://live.push.t-aaron.com/live/THSAk"]

# # 管道配置
# p = sp.Popen(command, stdin=sp.PIPE, shell=False)
while(cap.isOpened()):
start =time.time()
# ret, frame = cap.read()
cap.grab()
print(time.time()-start)
# if not ret:
# print("Opening camera is failed")
# break
# p.stdin.write(frame.tostring())


BIN
test/image/AI.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 477KB

BIN
test/image/AI1.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 462KB

BIN
test/image/AI2.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 436KB

BIN
test/image/AI3.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 417KB

BIN
test/image/AI4.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 422KB

BIN
test/image/AI5.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 423KB

BIN
test/image/AI6.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 414KB

BIN
test/image/AI7.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 411KB

BIN
test/image/AI8.jpg View File

Before After
Width: 1920  |  Height: 1080  |  Size: 430KB

+ 46
- 0
test/same1.py View File

@@ -0,0 +1,46 @@
import cv2
import os
from matplotlib import pyplot as plt

def FLANN():

targetPath = 'AI.jpg'
trainingImage = cv2.imread(targetPath, flags=0)

templatePath = 'image/'
icons = os.listdir(templatePath)
iconMatch= dict({'name': '未识别', 'value': 0})
for icon in icons:
queryImage = cv2.imread(templatePath + icon, 0)

sift = cv2.SIFT_create()
kp1, des1 = sift.detectAndCompute(queryImage, None)
kp2, des2 = sift.detectAndCompute(trainingImage, None)

indexParams = dict(algorithm=0, trees=5)
searchParams = dict(checks=50)
flann = cv2.FlannBasedMatcher(indexParams,searchParams)
matches = flann.knnMatch(des1,des2,k=2)

matchesMask = [[0,0] for i in range(len(matches))]
matchNumber = 0

for i,(m,n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1,0]
matchNumber = matchNumber+1

drawParams = dict(matchColor = (0,255,0), matchesMask = matchesMask[:50], flags = 0)
resultImage = cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches[:50],None,**drawParams)

if matchNumber > iconMatch['value']:
iconMatch['name'] = icon.split('_')[0]
iconMatch['value'] = matchNumber

return resultImage, iconMatch

if __name__ == '__main__':
resultImage, res = FLANN()
# plt.imshow(resultImage)
# plt.show()
print(resultImage, res)

+ 21
- 0
test/same2.py View File

@@ -0,0 +1,21 @@
import cv2

target = cv2.imread("image/AI.jpg")

template = cv2.imread("image/AI1.jpg")

theight, twidth = template.shape[:2]

result = cv2.matchTemplate(target,template,cv2.TM_CCOEFF_NORMED)
print(result)
cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 )

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print(min_val, max_val, max_loc)
strmin_val = str(min_val)

cv2.rectangle(target,min_loc,(min_loc[0]+twidth,min_loc[1]+theight),(0,0,225),2)

# cv2.imshow("MatchResult----MatchingValue="+strmin_val,target)
# cv2.waitKey()
# cv2.destroyAllWindows()

+ 18
- 0
test/same3.py View File

@@ -0,0 +1,18 @@
import os
import cv2
# from skimage.measure import compare_ssim
# from skimage.metrics import _structural_similarity
from skimage.metrics import structural_similarity as ssim


if __name__ == '__main__':
img00 = cv2.imread('image/AI.jpg')
img10 = cv2.imread('image/AI3.jpg')
img0 = cv2.cvtColor(img00,cv2.COLOR_BGR2GRAY) # 将图像转换为灰度图
img1 = cv2.cvtColor(img10,cv2.COLOR_BGR2GRAY) # 将图像转换为灰度图
#进行结构性相似度判断
# ssim_value = _structural_similarity.structural_similarity(img,img1,multichannel=True)
score= ssim(img0, img1)
print(score)



+ 47
- 0
test/test1.py View File

@@ -0,0 +1,47 @@
import subprocess as sp
from PIL import Image
import time
import cv2
import oss2
import numpy as np
# 推流
if __name__== "__main__":

cap = cv2.VideoCapture("/home/DATA/chenyukun/DJI_20211229100908_0001_S.mp4")

# Get video information
fps = int(cap.get(cv2.CAP_PROP_FPS))
print(fps)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(width)
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(height)
# ffmpeg command
command = ['/usr/bin/ffmpeg',
'-y', # 不经过确认,输出时直接覆盖同名文件。
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
# '-s', "{}x{}".format(self.width * 2, self.height),
'-s', "{}x{}".format(width, height),
'-r', str(15),
'-i', '-', # 指定输入文件
'-g', '25',
'-b:v', '3000k',
'-c:v', 'libx264', # 指定视频编码器
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',
"rtmp://live.push.t-aaron.com/live/THSAe"]

# 管道配置
p = sp.Popen(command, stdin=sp.PIPE, shell=False)
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
print("Opening camera is failed")
break
time.sleep(0.03)
p.stdin.write(frame.tostring())


+ 8
- 0
test/互信息.py View File

@@ -0,0 +1,8 @@
from PIL import Image
from pixelmatch.contrib.PIL import pixelmatch
img_a = Image.open("image/AI.jpg")
img_b = Image.open("image/AI3.jpg")
img_diff = Image.new("RGBA", img_a.size)
# note how there is no need to specify dimensions
mismatch = pixelmatch(img_a, img_b, img_diff, includeAA=True)
print(mismatch)

+ 97
- 0
test/余弦相似度计算.py View File

@@ -0,0 +1,97 @@
## -*- coding: utf-8 -*-
# !/usr/bin/env python
# 余弦相似度计算
import time

import cv2
from PIL import Image
from numpy import average, dot, linalg
# 对图片进行统一化处理
def get_thum(image, size=(64, 64), greyscale=False):
# 利用image对图像大小重新设置, Image.ANTIALIAS为高质量的
image = image.resize(size, Image.ANTIALIAS)
if greyscale:
# 将图片转换为L模式,其为灰度图,其每个像素用8个bit表示
image = image.convert('L')
return image
# 计算图片的余弦距离
def image_similarity_vectors_via_numpy(image1, image2):
image1 = get_thum(image1)
image2 = get_thum(image2)
images = [image1, image2]
vectors = []
norms = []
for image in images:
vector = []
for pixel_tuple in image.getdata():
vector.append(average(pixel_tuple))
vectors.append(vector)
# linalg=linear(线性)+algebra(代数),norm则表示范数
# 求图片的范数
norms.append(linalg.norm(vector, 2))
a, b = vectors
a_norm, b_norm = norms
# dot返回的是点积,对二维数组(矩阵)进行计算
res = dot(a / a_norm, b / b_norm)
return res


#差值感知算法
def dHash(image):
#缩放9*8
image=cv2.resize(image,(9,8),interpolation=cv2.INTER_CUBIC)
#转换灰度图
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# print(image.shape)
hash=[]
#每行前一个像素大于后一个像素为1,相反为0,生成哈希
for i in range(8):
for j in range(8):
if image[i,j]>image[i,j+1]:
hash.append(1)
else:
hash.append(0)
return hash

#计算汉明距离
def Hamming_distance(hash1,hash2):
num = 0
for index in range(len(hash1)):
if hash1[index] != hash2[index]:
num += 1
return num

if __name__ == '__main__':
# 把图片表示成一个向量,通过计算向量之间的余弦距离来表征两张图片的相似度 0.9760
image1 = cv2.imread('image/AI.jpg')
image2 = cv2.imread('image/AI1.jpg')
image3 = cv2.imread('image/AI2.jpg')
image4 = cv2.imread('image/AI3.jpg')
image5 = cv2.imread('image/AI4.jpg')
image6 = cv2.imread('a.jpg')
image7 = cv2.imread('AI.jpg')
hash1 = dHash(image1)
hash2 = dHash(image2)
hash3 = dHash(image3)
hash4 = dHash(image4)
hash5 = dHash(image5)
hash6 = dHash(image6)
hash7 = dHash(image7)
start = time.time()
dist = Hamming_distance(hash6, hash7)
#将距离转化为相似度
similarity = 1 - dist * 1.0 / 64
print(dist)
print(similarity, time.time() - start)
# cosin = image_similarity_vectors_via_numpy(image1, image2)
# print('图片余弦相似度', cosin)
# cosin1 = image_similarity_vectors_via_numpy(image1, image3)
# print('图片余弦相似度', cosin1)
# cosin2 = image_similarity_vectors_via_numpy(image1, image4)
# print('图片余弦相似度', cosin2)
# cosin3 = image_similarity_vectors_via_numpy(image1, image5)
# print('图片余弦相似度', cosin3)
# cosin4 = image_similarity_vectors_via_numpy(image5, image6)
# print('图片余弦相似度', cosin4)
# cosin5 = image_similarity_vectors_via_numpy(image1, image6)
# print('图片余弦相似度', cosin5)

+ 2
- 0
util/Cv2Utils.py View File

@@ -79,6 +79,7 @@ class Cv2Util():
'-b:v', '3000k',
'-c:v', 'libx264', # 指定视频编码器
'-pix_fmt', 'yuv420p',
'-an',
'-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',
@@ -118,6 +119,7 @@ class Cv2Util():
'-b:v', '3000k',
'-c:v', 'libx264', # 指定视频编码器
'-pix_fmt', 'yuv420p',
"-an",
'-preset', 'ultrafast', # 指定输出的视频质量,会影响文件的生成速度,有以下几个可用的值 ultrafast,
# superfast, veryfast, faster, fast, medium, slow, slower, veryslow。
'-f', 'flv',

+ 27
- 0
util/ImageUtils.py View File

@@ -152,6 +152,33 @@ class PictureWaterMark():
dst_img = cv2.merge(dst_channels)
return dst_img

#差值感知算法
def dHash(image):
#缩放9*8
image=cv2.resize(image,(9,8),interpolation=cv2.INTER_CUBIC)
#转换灰度图
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# print(image.shape)
hash=[]
#每行前一个像素大于后一个像素为1,相反为0,生成哈希
for i in range(8):
for j in range(8):
if image[i,j]>image[i,j+1]:
hash.append(1)
else:
hash.append(0)
return hash

#计算汉明距离
def Hamming_distance(hash1,hash2):
num = 0
for index in range(len(hash1)):
if hash1[index] != hash2[index]:
num += 1
return num



if __name__ == '__main__':
# img = cv2.imread("../test/a.jpg", -1)
# fontcolor = 'yellow'

Loading…
Cancel
Save