Suppress `torch` AMP-CPU warnings (#6706)
This is a torch bug, but they seem unable or unwilling to fix it so I'm creating a suppression in YOLOv5. Resolves https://github.com/ultralytics/yolov5/issues/6692
This commit is contained in:
parent
de9c25b35e
commit
4de8b24881
|
|
@ -9,6 +9,7 @@ import os
|
||||||
import platform
|
import platform
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
|
import warnings
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
@ -25,6 +26,9 @@ try:
|
||||||
except ImportError:
|
except ImportError:
|
||||||
thop = None
|
thop = None
|
||||||
|
|
||||||
|
# Suppress PyTorch warnings
|
||||||
|
warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling')
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def torch_distributed_zero_first(local_rank: int):
|
def torch_distributed_zero_first(local_rank: int):
|
||||||
|
|
@ -293,13 +297,9 @@ class EarlyStopping:
|
||||||
|
|
||||||
|
|
||||||
class ModelEMA:
|
class ModelEMA:
|
||||||
""" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
|
""" Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models
|
||||||
Keep a moving average of everything in the model state_dict (parameters and buffers).
|
Keeps a moving average of everything in the model state_dict (parameters and buffers)
|
||||||
This is intended to allow functionality like
|
For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
|
||||||
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
|
|
||||||
A smoothed version of the weights is necessary for some training schemes to perform well.
|
|
||||||
This class is sensitive where it is initialized in the sequence of model init,
|
|
||||||
GPU assignment and distributed training wrappers.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, model, decay=0.9999, updates=0):
|
def __init__(self, model, decay=0.9999, updates=0):
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue