You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

136 lines
5.1KB

  1. # YOLOv5 experimental modules
  2. import numpy as np
  3. import torch
  4. import torch.nn as nn
  5. import os
  6. from models.common import Conv, DWConv
  7. from utils.google_utils import attempt_download
  8. class CrossConv(nn.Module):
  9. # Cross Convolution Downsample
  10. def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
  11. # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
  12. super(CrossConv, self).__init__()
  13. c_ = int(c2 * e) # hidden channels
  14. self.cv1 = Conv(c1, c_, (1, k), (1, s))
  15. self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
  16. self.add = shortcut and c1 == c2
  17. def forward(self, x):
  18. return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
  19. class Sum(nn.Module):
  20. # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
  21. def __init__(self, n, weight=False): # n: number of inputs
  22. super(Sum, self).__init__()
  23. self.weight = weight # apply weights boolean
  24. self.iter = range(n - 1) # iter object
  25. if weight:
  26. self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights
  27. def forward(self, x):
  28. y = x[0] # no weight
  29. if self.weight:
  30. w = torch.sigmoid(self.w) * 2
  31. for i in self.iter:
  32. y = y + x[i + 1] * w[i]
  33. else:
  34. for i in self.iter:
  35. y = y + x[i + 1]
  36. return y
  37. class GhostConv(nn.Module):
  38. # Ghost Convolution https://github.com/huawei-noah/ghostnet
  39. def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
  40. super(GhostConv, self).__init__()
  41. c_ = c2 // 2 # hidden channels
  42. self.cv1 = Conv(c1, c_, k, s, None, g, act)
  43. self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
  44. def forward(self, x):
  45. y = self.cv1(x)
  46. return torch.cat([y, self.cv2(y)], 1)
  47. class GhostBottleneck(nn.Module):
  48. # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
  49. def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
  50. super(GhostBottleneck, self).__init__()
  51. c_ = c2 // 2
  52. self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw
  53. DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
  54. GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
  55. self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),
  56. Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
  57. def forward(self, x):
  58. return self.conv(x) + self.shortcut(x)
  59. class MixConv2d(nn.Module):
  60. # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
  61. def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
  62. super(MixConv2d, self).__init__()
  63. groups = len(k)
  64. if equal_ch: # equal c_ per group
  65. i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices
  66. c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
  67. else: # equal weight.numel() per group
  68. b = [c2] + [0] * groups
  69. a = np.eye(groups + 1, groups, k=-1)
  70. a -= np.roll(a, 1, axis=1)
  71. a *= np.array(k) ** 2
  72. a[0] = 1
  73. c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
  74. self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
  75. self.bn = nn.BatchNorm2d(c2)
  76. self.act = nn.LeakyReLU(0.1, inplace=True)
  77. def forward(self, x):
  78. return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
  79. class Ensemble(nn.ModuleList):
  80. # Ensemble of models
  81. def __init__(self):
  82. super(Ensemble, self).__init__()
  83. def forward(self, x, augment=False):
  84. y = []
  85. for module in self:
  86. y.append(module(x, augment)[0])
  87. # y = torch.stack(y).max(0)[0] # max ensemble
  88. # y = torch.stack(y).mean(0) # mean ensemble
  89. y = torch.cat(y, 1) # nms ensemble
  90. return y, None # inference, train output
  91. def attempt_load(weights, map_location=None):
  92. # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
  93. model = Ensemble()
  94. for w in weights if isinstance(weights, list) else [weights]:
  95. #attempt_download(w)
  96. assert os.path.exists(w),"%s not exists"
  97. ckpt = torch.load(w, map_location=map_location) # load
  98. model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
  99. # Compatibility updates
  100. for m in model.modules():
  101. if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
  102. m.inplace = True # pytorch 1.7.0 compatibility
  103. elif type(m) is Conv:
  104. m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
  105. if len(model) == 1:
  106. return model[-1] # return model
  107. else:
  108. print('Ensemble created with %s\n' % weights)
  109. for k in ['names', 'stride']:
  110. setattr(model, k, getattr(model[-1], k))
  111. return model # return ensemble