* Youtube livestream detection * dependancy update to auto install pafy * Remove print * include youtube_dl in deps * PEP8 reformat * youtube url check fix * reduce lines * add comment * update check_requirements * stream framerate fix * Update README.md * cleanup * PEP8 * remove cap.retrieve() failure code Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>5.0
@@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam | |||
file.mp4 # video | |||
path/ # directory | |||
path/*.jpg # glob | |||
rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream | |||
rtmp://192.168.1.105/live/test # rtmp stream | |||
http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream | |||
'https://youtu.be/NUsoVlDFqZg' # YouTube video | |||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream | |||
``` | |||
To run inference on example images in `data/images`: |
@@ -19,7 +19,7 @@ def detect(save_img=False): | |||
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size | |||
save_img = not opt.nosave and not source.endswith('.txt') # save inference images | |||
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( | |||
('rtsp://', 'rtmp://', 'http://')) | |||
('rtsp://', 'rtmp://', 'http://', 'https://')) | |||
# Directories | |||
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run |
@@ -20,8 +20,8 @@ from PIL import Image, ExifTags | |||
from torch.utils.data import Dataset | |||
from tqdm import tqdm | |||
from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ | |||
clean_str | |||
from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ | |||
resample_segments, clean_str | |||
from utils.torch_utils import torch_distributed_zero_first | |||
# Parameters | |||
@@ -275,14 +275,20 @@ class LoadStreams: # multiple IP or RTSP cameras | |||
for i, s in enumerate(sources): | |||
# Start the thread to read frames from the video stream | |||
print(f'{i + 1}/{n}: {s}... ', end='') | |||
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) | |||
url = eval(s) if s.isnumeric() else s | |||
if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video | |||
check_requirements(('pafy', 'youtube_dl')) | |||
import pafy | |||
url = pafy.new(url).getbest(preftype="mp4").url | |||
cap = cv2.VideoCapture(url) | |||
assert cap.isOpened(), f'Failed to open {s}' | |||
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |||
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |||
fps = cap.get(cv2.CAP_PROP_FPS) % 100 | |||
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 | |||
_, self.imgs[i] = cap.read() # guarantee first frame | |||
thread = Thread(target=self.update, args=([i, cap]), daemon=True) | |||
print(f' success ({w}x{h} at {fps:.2f} FPS).') | |||
print(f' success ({w}x{h} at {self.fps:.2f} FPS).') | |||
thread.start() | |||
print('') # newline | |||
@@ -303,7 +309,7 @@ class LoadStreams: # multiple IP or RTSP cameras | |||
success, im = cap.retrieve() | |||
self.imgs[index] = im if success else self.imgs[index] * 0 | |||
n = 0 | |||
time.sleep(0.01) # wait time | |||
time.sleep(1 / self.fps) # wait time | |||
def __iter__(self): | |||
self.count = -1 | |||
@@ -444,7 +450,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing | |||
gb += self.imgs[i].nbytes | |||
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' | |||
pbar.close() | |||
def cache_labels(self, path=Path('./labels.cache'), prefix=''): | |||
# Cache dataset labels, check images and read shapes | |||
x = {} # dict | |||
@@ -489,7 +495,7 @@ class LoadImagesAndLabels(Dataset): # for training/testing | |||
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ | |||
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" | |||
pbar.close() | |||
if nf == 0: | |||
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') | |||
@@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ | |||
b[[1, 3]] = np.clip(b[[1, 3]], 0, h) | |||
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' | |||
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): | |||
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files | |||
Usage: from utils.datasets import *; autosplit('../coco128') |
@@ -91,17 +91,20 @@ def check_git_status(): | |||
print(e) | |||
def check_requirements(file='requirements.txt', exclude=()): | |||
# Check installed dependencies meet requirements | |||
def check_requirements(requirements='requirements.txt', exclude=()): | |||
# Check installed dependencies meet requirements (pass *.txt file or list of packages) | |||
import pkg_resources as pkg | |||
prefix = colorstr('red', 'bold', 'requirements:') | |||
file = Path(file) | |||
if not file.exists(): | |||
print(f"{prefix} {file.resolve()} not found, check failed.") | |||
return | |||
if isinstance(requirements, (str, Path)): # requirements.txt file | |||
file = Path(requirements) | |||
if not file.exists(): | |||
print(f"{prefix} {file.resolve()} not found, check failed.") | |||
return | |||
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] | |||
else: # list or tuple of packages | |||
requirements = [x for x in requirements if x not in exclude] | |||
n = 0 # number of packages updates | |||
requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] | |||
for r in requirements: | |||
try: | |||
pkg.require(r) | |||
@@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()): | |||
print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) | |||
if n: # if packages updated | |||
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ | |||
source = file.resolve() if 'file' in locals() else requirements | |||
s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ | |||
f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" | |||
print(emojis(s)) # emoji-safe | |||