* Update download script headers * cleanup * bug fix attempt * bug fix attempt2 * bug fix attempt3 * cleanupmodifyDataloader
@@ -1,7 +1,12 @@ | |||
#!/bin/bash | |||
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 | |||
# Download latest models from https://github.com/ultralytics/yolov5/releases | |||
# Usage: | |||
# $ bash path/to/download_weights.sh | |||
# YOLOv5 🚀 example usage: bash path/to/download_weights.sh | |||
# parent | |||
# └── yolov5 | |||
# ├── yolov5s.pt ← downloads here | |||
# ├── yolov5m.pt | |||
# └── ... | |||
python - <<EOF | |||
from utils.google_utils import attempt_download |
@@ -1,11 +1,11 @@ | |||
#!/bin/bash | |||
# COCO 2017 dataset http://cocodataset.org | |||
# Download command: bash data/scripts/get_coco.sh | |||
# Train command: python train.py --data coco.yaml | |||
# Default dataset location is next to YOLOv5: | |||
# /parent_folder | |||
# /coco | |||
# /yolov5 | |||
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 | |||
# Download COCO 2017 dataset http://cocodataset.org | |||
# YOLOv5 🚀 example usage: bash data/scripts/get_coco.sh | |||
# parent | |||
# ├── yolov5 | |||
# └── datasets | |||
# └── coco ← downloads here | |||
# Download/unzip labels | |||
d='../datasets' # unzip directory |
@@ -1,14 +1,14 @@ | |||
#!/bin/bash | |||
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 | |||
# Download command: bash data/scripts/get_coco128.sh | |||
# Train command: python train.py --data coco128.yaml | |||
# Default dataset location is next to /yolov5: | |||
# /parent_folder | |||
# /coco128 | |||
# /yolov5 | |||
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 | |||
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) | |||
# YOLOv5 🚀 example usage: bash data/scripts/get_coco128.sh | |||
# parent | |||
# ├── yolov5 | |||
# └── datasets | |||
# └── coco128 ← downloads here | |||
# Download/unzip images and labels | |||
d='../' # unzip directory | |||
d='../datasets' # unzip directory | |||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ | |||
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB | |||
echo 'Downloading' $url$f ' ...' |
@@ -78,8 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary | |||
plots = not evolve # create plots | |||
cuda = device.type != 'cpu' | |||
init_seeds(1 + RANK) | |||
with open(data) as f: | |||
data_dict = yaml.safe_load(f) # data dict | |||
with open(data, encoding='ascii', errors='ignore') as f: | |||
data_dict = yaml.safe_load(f) | |||
nc = 1 if single_cls else int(data_dict['nc']) # number of classes | |||
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names | |||
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check |
@@ -60,11 +60,11 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640): | |||
print('') # newline | |||
def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): | |||
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): | |||
""" Creates kmeans-evolved anchors from training dataset | |||
Arguments: | |||
path: path to dataset *.yaml, or a loaded dataset | |||
dataset: path to data.yaml, or a loaded dataset | |||
n: number of anchors | |||
img_size: image size used for training | |||
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 | |||
@@ -103,13 +103,11 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10 | |||
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg | |||
return k | |||
if isinstance(path, str): # *.yaml file | |||
with open(path) as f: | |||
if isinstance(dataset, str): # *.yaml file | |||
with open(dataset, encoding='ascii', errors='ignore') as f: | |||
data_dict = yaml.safe_load(f) # model dict | |||
from utils.datasets import LoadImagesAndLabels | |||
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) | |||
else: | |||
dataset = path # dataset | |||
# Get label wh | |||
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) |
@@ -909,7 +909,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): | |||
return False, None, path | |||
zipped, data_dir, yaml_path = unzip(Path(path)) | |||
with open(check_file(yaml_path)) as f: | |||
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f: | |||
data = yaml.safe_load(f) # data dict | |||
if zipped: | |||
data['path'] = data_dir # TODO: should this be dir.resolve()? |
@@ -8,9 +8,9 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' | |||
def create_dataset_artifact(opt): | |||
with open(opt.data) as f: | |||
with open(opt.data, encoding='ascii', errors='ignore') as f: | |||
data = yaml.safe_load(f) # data dict | |||
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') | |||
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused | |||
if __name__ == '__main__': |
@@ -62,7 +62,7 @@ def check_wandb_resume(opt): | |||
def process_wandb_config_ddp_mode(opt): | |||
with open(check_file(opt.data)) as f: | |||
with open(check_file(opt.data), encoding='ascii', errors='ignore') as f: | |||
data_dict = yaml.safe_load(f) # data dict | |||
train_dir, val_dir = None, None | |||
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): | |||
@@ -150,7 +150,7 @@ class WandbLogger(): | |||
opt.single_cls, | |||
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) | |||
print("Created dataset config file ", config_path) | |||
with open(config_path) as f: | |||
with open(config_path, encoding='ascii', errors='ignore') as f: | |||
wandb_data_dict = yaml.safe_load(f) | |||
return wandb_data_dict | |||
@@ -226,7 +226,7 @@ class WandbLogger(): | |||
print("Saving model artifact on epoch ", epoch + 1) | |||
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): | |||
with open(data_file) as f: | |||
with open(data_file, encoding='ascii', errors='ignore') as f: | |||
data = yaml.safe_load(f) # data dict | |||
check_dataset(data) | |||
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) |
@@ -123,7 +123,7 @@ def run(data, | |||
# model = nn.DataParallel(model) | |||
# Data | |||
with open(data) as f: | |||
with open(data, encoding='ascii', errors='ignore') as f: | |||
data = yaml.safe_load(f) | |||
check_dataset(data) # check | |||