|
- # Loss functions
-
- import torch
- import torch.nn as nn
-
- from utils.metrics import bbox_iou
- from utils.torch_utils import is_parallel
-
-
- def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
- # return positive, negative label smoothing BCE targets 标签平滑,https://wenku.baidu.com/view/27fdf1deadf8941ea76e58fafab069dc51224773.html
- #机器学习样本中少量错误标签,影响预测效果,训练时假设可能存在错误,避免过分相信。如果是交叉熵,可以简单实现,成为标签平滑。
- return 1.0 - 0.5 * eps, 0.5 * eps
-
-
- class BCEBlurWithLogitsLoss(nn.Module):
- # BCEwithLogitLoss() with reduced missing label effects.
- #BCEWithLogitsLoss这个loss类将sigmoid操作和BCELoss(二进制交叉熵损失)集合到了一个类
- def __init__(self, alpha=0.05):
- super(BCEBlurWithLogitsLoss, self).__init__()
- self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
- self.alpha = alpha
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- pred = torch.sigmoid(pred) # prob from logits
- dx = pred - true # reduce only missing label effects
- # dx = (pred - true).abs() # reduce missing label and false label effects
- alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
- loss *= alpha_factor #loss乘以alpha_factor这个系数, 考虑到图片中有目标但是没有做标签的情况,false negative
- return loss.mean()
-
-
- class FocalLoss(nn.Module):
- # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
- #FocalLoss主要是为了解决one-stage的目标检测中正负样本比例严重失衡的问题,损失函数降低了大量简单负样本在训练过程中的比例
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(FocalLoss, self).__init__()
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
- self.gamma = gamma
- self.alpha = alpha
- self.reduction = loss_fcn.reduction
- self.loss_fcn.reduction = 'none' # required to apply FL to each element
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
- # p_t = torch.exp(-loss)
- # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
-
- # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
- pred_prob = torch.sigmoid(pred) # prob from logits
- p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
- modulating_factor = (1.0 - p_t) ** self.gamma
- loss *= alpha_factor * modulating_factor
-
- if self.reduction == 'mean':
- return loss.mean()
- elif self.reduction == 'sum':
- return loss.sum()
- else: # 'none'
- return loss
-
-
- class QFocalLoss(nn.Module):
- # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
- super(QFocalLoss, self).__init__()
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
- self.gamma = gamma
- self.alpha = alpha
- self.reduction = loss_fcn.reduction
- self.loss_fcn.reduction = 'none' # required to apply FL to each element
-
- def forward(self, pred, true):
- loss = self.loss_fcn(pred, true)
-
- pred_prob = torch.sigmoid(pred) # prob from logits
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
- modulating_factor = torch.abs(true - pred_prob) ** self.gamma
- loss *= alpha_factor * modulating_factor
-
- if self.reduction == 'mean':
- return loss.mean()
- elif self.reduction == 'sum':
- return loss.sum()
- else: # 'none'
- return loss
-
-
- #计算损失=(分类损失+置信度损失+框坐标回归损失)
- class ComputeLoss:
- # Compute losses
- def __init__(self, model, autobalance=False):
- super(ComputeLoss, self).__init__()
- self.sort_obj_iou = False
- device = next(model.parameters()).device # get model device
- h = model.hyp # hyperparameters 获得超参数!!!
-
- # Define criteria 定义类别及目标性得分损失函数
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) #将'cls_pw'这两个参数传进来,在hyp.scratch.yaml里
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) #将'obj_pw'这两个参数传进来,在hyp.scratch.yaml里
-
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
- self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
-
- # Focal loss
- g = h['fl_gamma'] # focal loss gamma
- #这里g>0才考虑focal loss
- if g > 0:
- BCEcls, BCEobj = QFocalLoss(BCEcls, g), QFocalLoss(BCEobj, g)
-
- det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
- #设置3个特征图对应的损失函数的损失系数 80x80、40x40、20x20有相应的系数 显然80特征图系数最大
- self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
-
- self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
- self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
- for k in 'na', 'nc', 'nl', 'anchors':
- setattr(self, k, getattr(det, k))
-
- def __call__(self, p, targets): # predictions, targets, model __call__可以实例化对象名后直接调用这个函数,格式是:实例化对象名(参数)
- #p是网络的输出,targets是这个batch中所有图片标注的目标框信息
- #获取设备,用的是cuda
- device = targets.device
- #初始化各部分损失
- #类别损失、box回归损失、目标性得分损失(即置信度)
- lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
- #获得标签分类信息、边框信息(不同尺度上的预测框)、索引信息、anchors
- tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
-
- # Losses
- #遍历每一个预测输出
- for i, pi in enumerate(p): # layer index, layer predictions 在每一层特征图上迭代,比如先80x80,再40x40,最后20x20
- #根据indices获取索引,方便找到对应网格的输出
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
- tobj = torch.zeros_like(pi[..., 0], device=device) # target obj tobj初始化为0
-
- n = b.shape[0] # number of targets
- if n:
- #找到对应网格的输出,取出对应位置的预测值。若pi里为80x80,则那个维度数据对应此特征图上,下面pxy和pwh进行框微调,有专门公式
- ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
-
- # Regression
- #对输出的xywh做反算
- #想计算预测框的xy,这里是微调?
- pxy = ps[:, :2].sigmoid() * 2. - 0.5
- #想计算预测框的wh,这里是微调? 通过偏移值,求出这个框真正的xywh
- pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
- pbox = torch.cat((pxy, pwh), 1) # predicted box
- #计算边框损失,注意这个CIoU=True,计算的是是CIoU,bbox_iou可以选择传参呢!。 注意:tbox[i]里面是groundtruth
- iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
- lbox += (1.0 - iou).mean() # iou loss 损失函数在这里了,通过iou算出iou的loss。求了mean,就变成一个均值。
-
- # Objectness
- score_iou = iou.detach().clamp(0).type(tobj.dtype)
- if self.sort_obj_iou:
- sort_id = torch.argsort(score_iou)
- b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id]
- #根据model.gr设置objectness的标签值,有目标的conf分支权重。
- #不同anchor和gt bbox匹配度不一样,预测框和gt bbox的匹配度也不一样,如果权重设置一样肯定不是最优的
- #故将预测框和bbox的iou作为权重乘到conf分支,用于表征预测质量
- tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio
-
- # Classification
- #如果类别数>1,才计算分类损失(即多类别损失)
- if self.nc > 1: # cls loss (only if multiple classes)
- t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
- t[range(n), tcls[i]] = self.cp
- lcls += self.BCEcls(ps[:, 5:], t) # BCE对每个类别单独计算loss
-
- # Append targets to text file
- # with open('targets.txt', 'a') as file:
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
-
- #计算objectness的损失
- obji = self.BCEobj(pi[..., 4], tobj)
- lobj += obji * self.balance[i] # obj loss 考虑了balance的值,即不同的特征图大小考虑不同的权重!!!
- if self.autobalance:
- self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
-
- if self.autobalance:
- self.balance = [x / self.balance[self.ssi] for x in self.balance]
- lbox *= self.hyp['box'] #求最后总的损失时,还进行了加权
- lobj *= self.hyp['obj'] #超参里面有设置
- lcls *= self.hyp['cls'] #超参里面有设置
- bs = tobj.shape[0] # batch size
-
- #总的loss=lbox + lobj + lcls,再乘以batchsize,返回
- return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach()
-
- def build_targets(self, p, targets):
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
- na, nt = self.na, targets.shape[0] # number of anchors, targets
- tcls, tbox, indices, anch = [], [], [], []
- gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
- ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
- targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
-
- g = 0.5 # bias
- off = torch.tensor([[0, 0],
- [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
- ], device=targets.device).float() * g # offsets
-
- for i in range(self.nl):
- anchors = self.anchors[i]
- gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
-
- # Match targets to anchors
- t = targets * gain
- if nt:
- # Matches
- r = t[:, :, 4:6] / anchors[:, None] # wh ratio
- j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
- t = t[j] # filter
-
- # Offsets
- gxy = t[:, 2:4] # grid xy
- gxi = gain[[2, 3]] - gxy # inverse
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
- l, m = ((gxi % 1. < g) & (gxi > 1.)).T
- j = torch.stack((torch.ones_like(j), j, k, l, m))
- t = t.repeat((5, 1, 1))[j]
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
- else:
- t = targets[0]
- offsets = 0
-
- # Define
- b, c = t[:, :2].long().T # image, class
- gxy = t[:, 2:4] # grid xy
- gwh = t[:, 4:6] # grid wh
- gij = (gxy - offsets).long()
- gi, gj = gij.T # grid xy indices
-
- # Append
- a = t[:, 6].long() # anchor indices
- indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
- anch.append(anchors[a]) # anchors
- tcls.append(c) # class
-
- return tcls, tbox, indices, anch
|