Update `lrf: 0.1`, tested on YOLOv5x6 to 55.0 mAP@0.5:0.95, slightly higher than current.modifyDataloader
@@ -4,7 +4,7 @@ | |||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | |||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) | |||
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) | |||
lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) | |||
momentum: 0.937 # SGD momentum/Adam beta1 | |||
weight_decay: 0.0005 # optimizer weight decay 5e-4 | |||
warmup_epochs: 3.0 # warmup epochs (fractions ok) |