@@ -21,14 +21,13 @@ def notebook_init(verbose=True): | |||
if is_colab(): | |||
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory | |||
# System info | |||
if verbose: | |||
# System info | |||
# gb = 1 / 1000 ** 3 # bytes to GB | |||
gib = 1 / 1024 ** 3 # bytes to GiB | |||
gb = 1 << 30 # bytes to GiB (1024 ** 3) | |||
ram = psutil.virtual_memory().total | |||
total, used, free = shutil.disk_usage("/") | |||
display.clear_output() | |||
s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' | |||
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' | |||
else: | |||
s = '' | |||
@@ -34,11 +34,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): | |||
LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') | |||
return batch_size | |||
gb = 1 << 30 # bytes to GiB (1024 ** 3) | |||
d = str(device).upper() # 'CUDA:0' | |||
properties = torch.cuda.get_device_properties(device) # device properties | |||
t = properties.total_memory / 1024 ** 3 # (GiB) | |||
r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) | |||
a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) | |||
t = properties.total_memory / gb # (GiB) | |||
r = torch.cuda.memory_reserved(device) / gb # (GiB) | |||
a = torch.cuda.memory_allocated(device) / gb # (GiB) | |||
f = t - (r + a) # free inside reserved | |||
LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') | |||
@@ -223,11 +223,12 @@ def emojis(str=''): | |||
def file_size(path): | |||
# Return file/dir size (MB) | |||
mb = 1 << 20 # bytes to MiB (1024 ** 2) | |||
path = Path(path) | |||
if path.is_file(): | |||
return path.stat().st_size / 1E6 | |||
return path.stat().st_size / mb | |||
elif path.is_dir(): | |||
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 | |||
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb | |||
else: | |||
return 0.0 | |||
@@ -86,7 +86,7 @@ def select_device(device='', batch_size=0, newline=True): | |||
space = ' ' * (len(s) + 1) | |||
for i, d in enumerate(devices): | |||
p = torch.cuda.get_device_properties(i) | |||
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB | |||
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB | |||
else: | |||
s += 'CPU\n' | |||