TensorRT转化代码
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

246 lines
13KB

  1. # Loss functions
  2. import torch
  3. import torch.nn as nn
  4. from utils.metrics import bbox_iou
  5. from utils.torch_utils import is_parallel
  6. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  7. # return positive, negative label smoothing BCE targets 标签平滑,https://wenku.baidu.com/view/27fdf1deadf8941ea76e58fafab069dc51224773.html
  8. #机器学习样本中少量错误标签,影响预测效果,训练时假设可能存在错误,避免过分相信。如果是交叉熵,可以简单实现,成为标签平滑。
  9. return 1.0 - 0.5 * eps, 0.5 * eps
  10. class BCEBlurWithLogitsLoss(nn.Module):
  11. # BCEwithLogitLoss() with reduced missing label effects.
  12. #BCEWithLogitsLoss这个loss类将sigmoid操作和BCELoss(二进制交叉熵损失)集合到了一个类
  13. def __init__(self, alpha=0.05):
  14. super(BCEBlurWithLogitsLoss, self).__init__()
  15. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  16. self.alpha = alpha
  17. def forward(self, pred, true):
  18. loss = self.loss_fcn(pred, true)
  19. pred = torch.sigmoid(pred) # prob from logits
  20. dx = pred - true # reduce only missing label effects
  21. # dx = (pred - true).abs() # reduce missing label and false label effects
  22. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  23. loss *= alpha_factor #loss乘以alpha_factor这个系数, 考虑到图片中有目标但是没有做标签的情况,false negative
  24. return loss.mean()
  25. class FocalLoss(nn.Module):
  26. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  27. #FocalLoss主要是为了解决one-stage的目标检测中正负样本比例严重失衡的问题,损失函数降低了大量简单负样本在训练过程中的比例
  28. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  29. super(FocalLoss, self).__init__()
  30. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  31. self.gamma = gamma
  32. self.alpha = alpha
  33. self.reduction = loss_fcn.reduction
  34. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  35. def forward(self, pred, true):
  36. loss = self.loss_fcn(pred, true)
  37. # p_t = torch.exp(-loss)
  38. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  39. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  40. pred_prob = torch.sigmoid(pred) # prob from logits
  41. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  42. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  43. modulating_factor = (1.0 - p_t) ** self.gamma
  44. loss *= alpha_factor * modulating_factor
  45. if self.reduction == 'mean':
  46. return loss.mean()
  47. elif self.reduction == 'sum':
  48. return loss.sum()
  49. else: # 'none'
  50. return loss
  51. class QFocalLoss(nn.Module):
  52. # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  53. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  54. super(QFocalLoss, self).__init__()
  55. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  56. self.gamma = gamma
  57. self.alpha = alpha
  58. self.reduction = loss_fcn.reduction
  59. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  60. def forward(self, pred, true):
  61. loss = self.loss_fcn(pred, true)
  62. pred_prob = torch.sigmoid(pred) # prob from logits
  63. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  64. modulating_factor = torch.abs(true - pred_prob) ** self.gamma
  65. loss *= alpha_factor * modulating_factor
  66. if self.reduction == 'mean':
  67. return loss.mean()
  68. elif self.reduction == 'sum':
  69. return loss.sum()
  70. else: # 'none'
  71. return loss
  72. #计算损失=(分类损失+置信度损失+框坐标回归损失)
  73. class ComputeLoss:
  74. # Compute losses
  75. def __init__(self, model, autobalance=False):
  76. super(ComputeLoss, self).__init__()
  77. self.sort_obj_iou = False
  78. device = next(model.parameters()).device # get model device
  79. h = model.hyp # hyperparameters 获得超参数!!!
  80. # Define criteria 定义类别及目标性得分损失函数
  81. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) #将'cls_pw'这两个参数传进来,在hyp.scratch.yaml里
  82. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) #将'obj_pw'这两个参数传进来,在hyp.scratch.yaml里
  83. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  84. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
  85. # Focal loss
  86. g = h['fl_gamma'] # focal loss gamma
  87. #这里g>0才考虑focal loss
  88. if g > 0:
  89. BCEcls, BCEobj = QFocalLoss(BCEcls, g), QFocalLoss(BCEobj, g)
  90. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  91. #设置3个特征图对应的损失函数的损失系数 80x80、40x40、20x20有相应的系数 显然80特征图系数最大
  92. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
  93. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
  94. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
  95. for k in 'na', 'nc', 'nl', 'anchors':
  96. setattr(self, k, getattr(det, k))
  97. def __call__(self, p, targets): # predictions, targets, model __call__可以实例化对象名后直接调用这个函数,格式是:实例化对象名(参数)
  98. #p是网络的输出,targets是这个batch中所有图片标注的目标框信息
  99. #获取设备,用的是cuda
  100. device = targets.device
  101. #初始化各部分损失
  102. #类别损失、box回归损失、目标性得分损失(即置信度)
  103. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  104. #获得标签分类信息、边框信息(不同尺度上的预测框)、索引信息、anchors
  105. tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
  106. # Losses
  107. #遍历每一个预测输出
  108. for i, pi in enumerate(p): # layer index, layer predictions 在每一层特征图上迭代,比如先80x80,再40x40,最后20x20
  109. #根据indices获取索引,方便找到对应网格的输出
  110. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  111. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj tobj初始化为0
  112. n = b.shape[0] # number of targets
  113. if n:
  114. #找到对应网格的输出,取出对应位置的预测值。若pi里为80x80,则那个维度数据对应此特征图上,下面pxy和pwh进行框微调,有专门公式
  115. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  116. # Regression
  117. #对输出的xywh做反算
  118. #想计算预测框的xy,这里是微调?
  119. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  120. #想计算预测框的wh,这里是微调? 通过偏移值,求出这个框真正的xywh
  121. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  122. pbox = torch.cat((pxy, pwh), 1) # predicted box
  123. #计算边框损失,注意这个CIoU=True,计算的是是CIoU,bbox_iou可以选择传参呢!。 注意:tbox[i]里面是groundtruth
  124. iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  125. lbox += (1.0 - iou).mean() # iou loss 损失函数在这里了,通过iou算出iou的loss。求了mean,就变成一个均值。
  126. # Objectness
  127. score_iou = iou.detach().clamp(0).type(tobj.dtype)
  128. if self.sort_obj_iou:
  129. sort_id = torch.argsort(score_iou)
  130. b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
  131. #根据model.gr设置objectness的标签值,有目标的conf分支权重。
  132. #不同anchor和gt bbox匹配度不一样,预测框和gt bbox的匹配度也不一样,如果权重设置一样肯定不是最优的
  133. #故将预测框和bbox的iou作为权重乘到conf分支,用于表征预测质量
  134. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
  135. # Classification
  136. #如果类别数>1,才计算分类损失(即多类别损失)
  137. if self.nc > 1: # cls loss (only if multiple classes)
  138. t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
  139. t[range(n), tcls[i]] = self.cp
  140. lcls += self.BCEcls(ps[:, 5:], t) # BCE对每个类别单独计算loss
  141. # Append targets to text file
  142. # with open('targets.txt', 'a') as file:
  143. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  144. #计算objectness的损失
  145. obji = self.BCEobj(pi[..., 4], tobj)
  146. lobj += obji * self.balance[i] # obj loss 考虑了balance的值,即不同的特征图大小考虑不同的权重!!!
  147. if self.autobalance:
  148. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
  149. if self.autobalance:
  150. self.balance = [x / self.balance[self.ssi] for x in self.balance]
  151. lbox *= self.hyp['box'] #求最后总的损失时,还进行了加权
  152. lobj *= self.hyp['obj'] #超参里面有设置
  153. lcls *= self.hyp['cls'] #超参里面有设置
  154. bs = tobj.shape[0] # batch size
  155. #总的loss=lbox + lobj + lcls,再乘以batchsize,返回
  156. return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
  157. def build_targets(self, p, targets):
  158. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  159. na, nt = self.na, targets.shape[0] # number of anchors, targets
  160. tcls, tbox, indices, anch = [], [], [], []
  161. gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
  162. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  163. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  164. g = 0.5 # bias
  165. off = torch.tensor([[0, 0],
  166. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  167. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  168. ], device=targets.device).float() * g # offsets
  169. for i in range(self.nl):
  170. anchors = self.anchors[i]
  171. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  172. # Match targets to anchors
  173. t = targets * gain
  174. if nt:
  175. # Matches
  176. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  177. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  178. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  179. t = t[j] # filter
  180. # Offsets
  181. gxy = t[:, 2:4] # grid xy
  182. gxi = gain[[2, 3]] - gxy # inverse
  183. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  184. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  185. j = torch.stack((torch.ones_like(j), j, k, l, m))
  186. t = t.repeat((5, 1, 1))[j]
  187. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  188. else:
  189. t = targets[0]
  190. offsets = 0
  191. # Define
  192. b, c = t[:, :2].long().T # image, class
  193. gxy = t[:, 2:4] # grid xy
  194. gwh = t[:, 4:6] # grid wh
  195. gij = (gxy - offsets).long()
  196. gi, gj = gij.T # grid xy indices
  197. # Append
  198. a = t[:, 6].long() # anchor indices
  199. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  200. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  201. anch.append(anchors[a]) # anchors
  202. tcls.append(c) # class
  203. return tcls, tbox, indices, anch