Browse Source

Update TorchScript suffix to `*.torchscript` (#5856)

modifyDataloader
Glenn Jocher GitHub 2 years ago
parent
commit
00e308f7be
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 20 additions and 20 deletions
  1. +4
    -4
      detect.py
  2. +3
    -3
      export.py
  3. +6
    -6
      models/common.py
  4. +2
    -2
      utils/activations.py
  5. +5
    -5
      val.py

+ 4
- 4
detect.py View File

@@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s)
imgsz = check_img_size(imgsz, s=stride) # check image size

# Half
half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt or jit:
model.model.half() if half else model.model.float()

# Dataloader
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit)
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
bs = len(dataset) # batch_size
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit)
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
bs = 1 # batch_size
vid_path, vid_writer = [None] * bs, [None] * bs


+ 3
- 3
export.py View File

@@ -5,7 +5,7 @@ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by h
Format | Example | Export `include=(...)` argument
--- | --- | ---
PyTorch | yolov5s.pt | -
TorchScript | yolov5s.torchscript.pt | 'torchscript'
TorchScript | yolov5s.torchscript | 'torchscript'
ONNX | yolov5s.onnx | 'onnx'
CoreML | yolov5s.mlmodel | 'coreml'
TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model'
@@ -19,7 +19,7 @@ Usage:

Inference:
$ python path/to/detect.py --weights yolov5s.pt
yolov5s.torchscript.pt
yolov5s.torchscript
yolov5s.onnx
yolov5s.mlmodel (under development)
yolov5s_saved_model
@@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:'
# YOLOv5 TorchScript model export
try:
LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
f = file.with_suffix('.torchscript.pt')
f = file.with_suffix('.torchscript')

ts = torch.jit.trace(model, im, strict=False)
d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}

+ 6
- 6
models/common.py View File

@@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module):
def __init__(self, weights='yolov5s.pt', device=None, dnn=True):
# Usage:
# PyTorch: weights = *.pt
# TorchScript: *.torchscript.pt
# TorchScript: *.torchscript
# CoreML: *.mlmodel
# TensorFlow: *_saved_model
# TensorFlow: *.pb
@@ -289,10 +289,10 @@ class DetectMultiBackend(nn.Module):
# TensorRT: *.engine
super().__init__()
w = str(weights[0] if isinstance(weights, list) else weights)
suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']
suffix = Path(w).suffix.lower()
suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']
check_suffix(w, suffixes) # check weights have acceptable suffix
pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans
jit = pt and 'torchscript' in w.lower()
pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans
stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults

if jit: # TorchScript
@@ -304,10 +304,10 @@ class DetectMultiBackend(nn.Module):
stride, names = int(d['stride']), d['names']
elif pt: # PyTorch
from models.experimental import attempt_load # scoped to avoid circular import
model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device)
model = attempt_load(weights, map_location=device)
stride = int(model.stride.max()) # model stride
names = model.module.names if hasattr(model, 'module') else model.names # get class names
elif coreml: # CoreML *.mlmodel
elif coreml: # CoreML
import coremltools as ct
model = ct.models.MLModel(w)
elif dnn: # ONNX OpenCV DNN

+ 2
- 2
utils/activations.py View File

@@ -18,8 +18,8 @@ class SiLU(nn.Module): # export-friendly version of nn.SiLU()
class Hardswish(nn.Module): # export-friendly version of nn.Hardswish()
@staticmethod
def forward(x):
# return x * F.hardsigmoid(x) # for torchscript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX
# return x * F.hardsigmoid(x) # for TorchScript and CoreML
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX


# Mish https://github.com/digantamisra98/Mish --------------------------------------------------------------------------

+ 5
- 5
val.py View File

@@ -111,7 +111,7 @@ def run(data,
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model
device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model

half &= device.type != 'cpu' # half precision only supported on CUDA
model.half() if half else model.float()
@@ -124,10 +124,10 @@ def run(data,

# Load model
model = DetectMultiBackend(weights, device=device, dnn=dnn)
stride, pt, engine = model.stride, model.pt, model.engine
stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine
imgsz = check_img_size(imgsz, s=stride) # check image size
half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt:
half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA
if pt or jit:
model.model.half() if half else model.model.float()
elif engine:
batch_size = model.batch_size
@@ -166,7 +166,7 @@ def run(data,
pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for batch_i, (im, targets, paths, shapes) in enumerate(pbar):
t1 = time_sync()
if pt or engine:
if pt or jit or engine:
im = im.to(device, non_blocking=True)
targets = targets.to(device)
im = im.half() if half else im.float() # uint8 to fp16/32

Loading…
Cancel
Save