|
|
@@ -89,9 +89,10 @@ class QFocalLoss(nn.Module): |
|
|
|
|
|
|
|
|
|
|
|
class ComputeLoss: |
|
|
|
sort_obj_iou = False |
|
|
|
|
|
|
|
# Compute losses |
|
|
|
def __init__(self, model, autobalance=False): |
|
|
|
self.sort_obj_iou = False |
|
|
|
device = next(model.parameters()).device # get model device |
|
|
|
h = model.hyp # hyperparameters |
|
|
|
|
|
|
@@ -111,26 +112,28 @@ class ComputeLoss: |
|
|
|
self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 |
|
|
|
self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index |
|
|
|
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance |
|
|
|
self.device = device |
|
|
|
for k in 'na', 'nc', 'nl', 'anchors': |
|
|
|
setattr(self, k, getattr(det, k)) |
|
|
|
|
|
|
|
def __call__(self, p, targets): # predictions, targets, model |
|
|
|
device = targets.device |
|
|
|
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) |
|
|
|
def __call__(self, p, targets): # predictions, targets |
|
|
|
lcls = torch.zeros(1, device=self.device) # class loss |
|
|
|
lbox = torch.zeros(1, device=self.device) # box loss |
|
|
|
lobj = torch.zeros(1, device=self.device) # object loss |
|
|
|
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets |
|
|
|
|
|
|
|
# Losses |
|
|
|
for i, pi in enumerate(p): # layer index, layer predictions |
|
|
|
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx |
|
|
|
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj |
|
|
|
tobj = torch.zeros(pi.shape[:4], device=self.device) # target obj |
|
|
|
|
|
|
|
n = b.shape[0] # number of targets |
|
|
|
if n: |
|
|
|
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets |
|
|
|
pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions |
|
|
|
|
|
|
|
# Regression |
|
|
|
pxy = ps[:, :2].sigmoid() * 2 - 0.5 |
|
|
|
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] |
|
|
|
pxy = pxy.sigmoid() * 2 - 0.5 |
|
|
|
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] |
|
|
|
pbox = torch.cat((pxy, pwh), 1) # predicted box |
|
|
|
iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) |
|
|
|
lbox += (1.0 - iou).mean() # iou loss |
|
|
@@ -144,9 +147,9 @@ class ComputeLoss: |
|
|
|
|
|
|
|
# Classification |
|
|
|
if self.nc > 1: # cls loss (only if multiple classes) |
|
|
|
t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets |
|
|
|
t = torch.full_like(pcls, self.cn, device=self.device) # targets |
|
|
|
t[range(n), tcls[i]] = self.cp |
|
|
|
lcls += self.BCEcls(ps[:, 5:], t) # BCE |
|
|
|
lcls += self.BCEcls(pcls, t) # BCE |
|
|
|
|
|
|
|
# Append targets to text file |
|
|
|
# with open('targets.txt', 'a') as file: |
|
|
@@ -170,15 +173,15 @@ class ComputeLoss: |
|
|
|
# Build targets for compute_loss(), input targets(image,class,x,y,w,h) |
|
|
|
na, nt = self.na, targets.shape[0] # number of anchors, targets |
|
|
|
tcls, tbox, indices, anch = [], [], [], [] |
|
|
|
gain = torch.ones(7, device=targets.device) # normalized to gridspace gain |
|
|
|
ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) |
|
|
|
gain = torch.ones(7, device=self.device) # normalized to gridspace gain |
|
|
|
ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) |
|
|
|
targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices |
|
|
|
|
|
|
|
g = 0.5 # bias |
|
|
|
off = torch.tensor([[0, 0], |
|
|
|
[1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m |
|
|
|
# [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm |
|
|
|
], device=targets.device).float() * g # offsets |
|
|
|
], device=self.device).float() * g # offsets |
|
|
|
|
|
|
|
for i in range(self.nl): |
|
|
|
anchors = self.anchors[i] |
|
|
@@ -206,14 +209,12 @@ class ComputeLoss: |
|
|
|
offsets = 0 |
|
|
|
|
|
|
|
# Define |
|
|
|
b, c = t[:, :2].long().T # image, class |
|
|
|
gxy = t[:, 2:4] # grid xy |
|
|
|
gwh = t[:, 4:6] # grid wh |
|
|
|
bc, gxy, gwh, a = t.unsafe_chunk(4, dim=1) # (image, class), grid xy, grid wh, anchors |
|
|
|
a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class |
|
|
|
gij = (gxy - offsets).long() |
|
|
|
gi, gj = gij.T # grid xy indices |
|
|
|
gi, gj = gij.T # grid indices |
|
|
|
|
|
|
|
# Append |
|
|
|
a = t[:, 6].long() # anchor indices |
|
|
|
indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices |
|
|
|
tbox.append(torch.cat((gxy - gij, gwh), 1)) # box |
|
|
|
anch.append(anchors[a]) # anchors |