You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123 lines
4.5KB

  1. # Model validation metrics
  2. from pathlib import Path
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. def fitness(x):
  6. # Model fitness as a weighted combination of metrics
  7. w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
  8. return (x[:, :4] * w).sum(1)
  9. def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
  10. """ Compute the average precision, given the recall and precision curves.
  11. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
  12. # Arguments
  13. tp: True positives (nparray, nx1 or nx10).
  14. conf: Objectness value from 0-1 (nparray).
  15. pred_cls: Predicted object classes (nparray).
  16. target_cls: True object classes (nparray).
  17. plot: Plot precision-recall curve at mAP@0.5
  18. save_dir: Plot save directory
  19. # Returns
  20. The average precision as computed in py-faster-rcnn.
  21. """
  22. # Sort by objectness
  23. i = np.argsort(-conf)
  24. tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
  25. # Find unique classes
  26. unique_classes = np.unique(target_cls)
  27. # Create Precision-Recall curve and compute AP for each class
  28. px, py = np.linspace(0, 1, 1000), [] # for plotting
  29. pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
  30. s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
  31. ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
  32. for ci, c in enumerate(unique_classes):
  33. i = pred_cls == c
  34. n_l = (target_cls == c).sum() # number of labels
  35. n_p = i.sum() # number of predictions
  36. if n_p == 0 or n_l == 0:
  37. continue
  38. else:
  39. # Accumulate FPs and TPs
  40. fpc = (1 - tp[i]).cumsum(0)
  41. tpc = tp[i].cumsum(0)
  42. # Recall
  43. recall = tpc / (n_l + 1e-16) # recall curve
  44. r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
  45. # Precision
  46. precision = tpc / (tpc + fpc) # precision curve
  47. p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
  48. # AP from recall-precision curve
  49. for j in range(tp.shape[1]):
  50. ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
  51. if j == 0:
  52. py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
  53. # Compute F1 score (harmonic mean of precision and recall)
  54. f1 = 2 * p * r / (p + r + 1e-16)
  55. if plot:
  56. plot_pr_curve(px, py, ap, save_dir, names)
  57. return p, r, ap, f1, unique_classes.astype('int32')
  58. def compute_ap(recall, precision):
  59. """ Compute the average precision, given the recall and precision curves.
  60. Source: https://github.com/rbgirshick/py-faster-rcnn.
  61. # Arguments
  62. recall: The recall curve (list).
  63. precision: The precision curve (list).
  64. # Returns
  65. The average precision as computed in py-faster-rcnn.
  66. """
  67. # Append sentinel values to beginning and end
  68. mrec = recall # np.concatenate(([0.], recall, [recall[-1] + 1E-3]))
  69. mpre = precision # np.concatenate(([0.], precision, [0.]))
  70. # Compute the precision envelope
  71. mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
  72. # Integrate area under curve
  73. method = 'interp' # methods: 'continuous', 'interp'
  74. if method == 'interp':
  75. x = np.linspace(0, 1, 101) # 101-point interp (COCO)
  76. ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
  77. else: # 'continuous'
  78. i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
  79. ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
  80. return ap, mpre, mrec
  81. def plot_pr_curve(px, py, ap, save_dir='.', names=()):
  82. fig, ax = plt.subplots(1, 1, figsize=(9, 6))
  83. py = np.stack(py, axis=1)
  84. if 0 < len(names) < 21: # show mAP in legend if < 10 classes
  85. for i, y in enumerate(py.T):
  86. ax.plot(px, y, linewidth=1, label=f'{names[i]} %.3f' % ap[i, 0]) # plot(recall, precision)
  87. else:
  88. ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
  89. ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean())
  90. ax.set_xlabel('Recall')
  91. ax.set_ylabel('Precision')
  92. ax.set_xlim(0, 1)
  93. ax.set_ylim(0, 1)
  94. plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
  95. fig.tight_layout()
  96. fig.savefig(Path(save_dir) / 'precision_recall_curve.png', dpi=250)