Browse Source

Update script headers (#4163)

* Update download script headers

* cleanup

* bug fix attempt

* bug fix attempt2

* bug fix attempt3

* cleanup
modifyDataloader
Glenn Jocher GitHub 3 years ago
parent
commit
0ad6301c96
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 36 additions and 32 deletions
  1. +7
    -2
      data/scripts/download_weights.sh
  2. +7
    -7
      data/scripts/get_coco.sh
  3. +8
    -8
      data/scripts/get_coco128.sh
  4. +3
    -2
      train.py
  5. +4
    -6
      utils/autoanchor.py
  6. +1
    -1
      utils/datasets.py
  7. +2
    -2
      utils/loggers/wandb/log_dataset.py
  8. +3
    -3
      utils/loggers/wandb/wandb_utils.py
  9. +1
    -1
      val.py

+ 7
- 2
data/scripts/download_weights.sh View File

#!/bin/bash #!/bin/bash
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download latest models from https://github.com/ultralytics/yolov5/releases # Download latest models from https://github.com/ultralytics/yolov5/releases
# Usage:
# $ bash path/to/download_weights.sh
# YOLOv5 🚀 example usage: bash path/to/download_weights.sh
# parent
# └── yolov5
# ├── yolov5s.pt ← downloads here
# ├── yolov5m.pt
# └── ...


python - <<EOF python - <<EOF
from utils.google_utils import attempt_download from utils.google_utils import attempt_download

+ 7
- 7
data/scripts/get_coco.sh View File

#!/bin/bash #!/bin/bash
# COCO 2017 dataset http://cocodataset.org
# Download command: bash data/scripts/get_coco.sh
# Train command: python train.py --data coco.yaml
# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download COCO 2017 dataset http://cocodataset.org
# YOLOv5 🚀 example usage: bash data/scripts/get_coco.sh
# parent
# ├── yolov5
# └── datasets
# └── coco ← downloads here


# Download/unzip labels # Download/unzip labels
d='../datasets' # unzip directory d='../datasets' # unzip directory

+ 8
- 8
data/scripts/get_coco128.sh View File

#!/bin/bash #!/bin/bash
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128
# Download command: bash data/scripts/get_coco128.sh
# Train command: python train.py --data coco128.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco128
# /yolov5
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
# YOLOv5 🚀 example usage: bash data/scripts/get_coco128.sh
# parent
# ├── yolov5
# └── datasets
# └── coco128 ← downloads here


# Download/unzip images and labels # Download/unzip images and labels
d='../' # unzip directory
d='../datasets' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...' echo 'Downloading' $url$f ' ...'

+ 3
- 2
train.py View File

plots = not evolve # create plots plots = not evolve # create plots
cuda = device.type != 'cpu' cuda = device.type != 'cpu'
init_seeds(1 + RANK) init_seeds(1 + RANK)
with open(data) as f:
data_dict = yaml.safe_load(f) # data dict
with open(data, encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f)

nc = 1 if single_cls else int(data_dict['nc']) # number of classes nc = 1 if single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check

+ 4
- 6
utils/autoanchor.py View File

print('') # newline print('') # newline




def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset """ Creates kmeans-evolved anchors from training dataset


Arguments: Arguments:
path: path to dataset *.yaml, or a loaded dataset
dataset: path to data.yaml, or a loaded dataset
n: number of anchors n: number of anchors
img_size: image size used for training img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k return k


if isinstance(path, str): # *.yaml file
with open(path) as f:
if isinstance(dataset, str): # *.yaml file
with open(dataset, encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f) # model dict data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
dataset = path # dataset


# Get label wh # Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)

+ 1
- 1
utils/datasets.py View File

return False, None, path return False, None, path


zipped, data_dir, yaml_path = unzip(Path(path)) zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_file(yaml_path)) as f:
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict data = yaml.safe_load(f) # data dict
if zipped: if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()? data['path'] = data_dir # TODO: should this be dir.resolve()?

+ 2
- 2
utils/loggers/wandb/log_dataset.py View File





def create_dataset_artifact(opt): def create_dataset_artifact(opt):
with open(opt.data) as f:
with open(opt.data, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict data = yaml.safe_load(f) # data dict
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused




if __name__ == '__main__': if __name__ == '__main__':

+ 3
- 3
utils/loggers/wandb/wandb_utils.py View File





def process_wandb_config_ddp_mode(opt): def process_wandb_config_ddp_mode(opt):
with open(check_file(opt.data)) as f:
with open(check_file(opt.data), encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f) # data dict data_dict = yaml.safe_load(f) # data dict
train_dir, val_dir = None, None train_dir, val_dir = None, None
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
opt.single_cls, opt.single_cls,
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
print("Created dataset config file ", config_path) print("Created dataset config file ", config_path)
with open(config_path) as f:
with open(config_path, encoding='ascii', errors='ignore') as f:
wandb_data_dict = yaml.safe_load(f) wandb_data_dict = yaml.safe_load(f)
return wandb_data_dict return wandb_data_dict


print("Saving model artifact on epoch ", epoch + 1) print("Saving model artifact on epoch ", epoch + 1)


def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
with open(data_file) as f:
with open(data_file, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict data = yaml.safe_load(f) # data dict
check_dataset(data) check_dataset(data)
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])

+ 1
- 1
val.py View File

# model = nn.DataParallel(model) # model = nn.DataParallel(model)


# Data # Data
with open(data) as f:
with open(data, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) data = yaml.safe_load(f)
check_dataset(data) # check check_dataset(data) # check



Loading…
Cancel
Save