用kafka接收消息
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

209 lines
7.6KB

  1. """Point-wise Spatial Attention Network"""
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from core.nn import CollectAttention, DistributeAttention
  6. from core.models.segbase import SegBaseModel
  7. from core.models.fcn import _FCNHead
  8. #运行失败,name '_C' is not defined。也是跟psa_block模块的实现有关:用到了自定义的torch.autograd.Function(里面用到了cpp文件,找不到文件出错)
  9. __all__ = ['PSANet', 'get_psanet', 'get_psanet_resnet50_voc', 'get_psanet_resnet101_voc',
  10. 'get_psanet_resnet152_voc', 'get_psanet_resnet50_citys', 'get_psanet_resnet101_citys',
  11. 'get_psanet_resnet152_citys']
  12. class PSANet(SegBaseModel):
  13. r"""PSANet
  14. Parameters
  15. ----------
  16. nclass : int
  17. Number of categories for the training dataset.
  18. backbone : string
  19. Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
  20. 'resnet101' or 'resnet152').
  21. norm_layer : object
  22. Normalization layer used in backbone network (default: :class:`nn.BatchNorm`;
  23. for Synchronized Cross-GPU BachNormalization).
  24. aux : bool
  25. Auxiliary loss.
  26. Reference:
  27. Hengshuang Zhao, et al. "PSANet: Point-wise Spatial Attention Network for Scene Parsing."
  28. ECCV-2018.
  29. """
  30. def __init__(self, nclass, backbone='resnet', aux=False, pretrained_base=False, **kwargs):
  31. super(PSANet, self).__init__(nclass, aux, backbone, pretrained_base, **kwargs)
  32. self.head = _PSAHead(nclass, **kwargs)
  33. if aux:
  34. self.auxlayer = _FCNHead(1024, nclass, **kwargs)
  35. self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head'])
  36. def forward(self, x):
  37. size = x.size()[2:]
  38. _, _, c3, c4 = self.base_forward(x)
  39. outputs = list()
  40. x = self.head(c4)
  41. x = F.interpolate(x, size, mode='bilinear', align_corners=True)
  42. outputs.append(x)
  43. if self.aux:
  44. auxout = self.auxlayer(c3)
  45. auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
  46. outputs.append(auxout)
  47. return tuple(outputs)
  48. class _PSAHead(nn.Module):
  49. def __init__(self, nclass, norm_layer=nn.BatchNorm2d, **kwargs):
  50. super(_PSAHead, self).__init__()
  51. self.collect = _CollectModule(2048, 512, 60, 60, norm_layer, **kwargs)
  52. self.distribute = _DistributeModule(2048, 512, 60, 60, norm_layer, **kwargs)
  53. self.conv_post = nn.Sequential(
  54. nn.Conv2d(1024, 2048, 1, bias=False),
  55. norm_layer(2048),
  56. nn.ReLU(True))
  57. self.project = nn.Sequential(
  58. nn.Conv2d(4096, 512, 3, padding=1, bias=False),
  59. norm_layer(512),
  60. nn.ReLU(True),
  61. nn.Conv2d(512, nclass, 1)
  62. )
  63. def forward(self, x):
  64. global_feature_collect = self.collect(x)
  65. global_feature_distribute = self.distribute(x)
  66. global_feature = torch.cat([global_feature_collect, global_feature_distribute], dim=1)
  67. out = self.conv_post(global_feature)
  68. out = F.interpolate(out, scale_factor=2, mode='bilinear', align_corners=True)
  69. out = torch.cat([x, out], dim=1)
  70. out = self.project(out)
  71. return out
  72. class _CollectModule(nn.Module):
  73. def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs):
  74. super(_CollectModule, self).__init__()
  75. self.conv_reduce = nn.Sequential(
  76. nn.Conv2d(in_channels, reduced_channels, 1, bias=False),
  77. norm_layer(reduced_channels),
  78. nn.ReLU(True))
  79. self.conv_adaption = nn.Sequential(
  80. nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False),
  81. norm_layer(reduced_channels),
  82. nn.ReLU(True),
  83. nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False))
  84. self.collect_attention = CollectAttention()
  85. self.reduced_channels = reduced_channels
  86. self.feat_w = feat_w
  87. self.feat_h = feat_h
  88. def forward(self, x):
  89. x = self.conv_reduce(x)
  90. # shrink
  91. x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True)
  92. x_adaption = self.conv_adaption(x_shrink)
  93. ca = self.collect_attention(x_adaption)
  94. global_feature_collect_list = list()
  95. for i in range(x_shrink.shape[0]):
  96. x_shrink_i = x_shrink[i].view(self.reduced_channels, -1)
  97. ca_i = ca[i].view(ca.shape[1], -1)
  98. global_feature_collect_list.append(
  99. torch.mm(x_shrink_i, ca_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2))
  100. global_feature_collect = torch.cat(global_feature_collect_list)
  101. return global_feature_collect
  102. class _DistributeModule(nn.Module):
  103. def __init__(self, in_channels, reduced_channels, feat_w, feat_h, norm_layer, **kwargs):
  104. super(_DistributeModule, self).__init__()
  105. self.conv_reduce = nn.Sequential(
  106. nn.Conv2d(in_channels, reduced_channels, 1, bias=False),
  107. norm_layer(reduced_channels),
  108. nn.ReLU(True))
  109. self.conv_adaption = nn.Sequential(
  110. nn.Conv2d(reduced_channels, reduced_channels, 1, bias=False),
  111. norm_layer(reduced_channels),
  112. nn.ReLU(True),
  113. nn.Conv2d(reduced_channels, (feat_w - 1) * (feat_h), 1, bias=False))
  114. self.distribute_attention = DistributeAttention()
  115. self.reduced_channels = reduced_channels
  116. self.feat_w = feat_w
  117. self.feat_h = feat_h
  118. def forward(self, x):
  119. x = self.conv_reduce(x)
  120. x_shrink = F.interpolate(x, scale_factor=1 / 2, mode='bilinear', align_corners=True)
  121. x_adaption = self.conv_adaption(x_shrink)
  122. da = self.distribute_attention(x_adaption)
  123. global_feature_distribute_list = list()
  124. for i in range(x_shrink.shape[0]):
  125. x_shrink_i = x_shrink[i].view(self.reduced_channels, -1)
  126. da_i = da[i].view(da.shape[1], -1)
  127. global_feature_distribute_list.append(
  128. torch.mm(x_shrink_i, da_i).view(1, self.reduced_channels, self.feat_h // 2, self.feat_w // 2))
  129. global_feature_distribute = torch.cat(global_feature_distribute_list)
  130. return global_feature_distribute
  131. def get_psanet(dataset='pascal_voc', backbone='resnet50', pretrained=False, root='~/.torch/models',
  132. pretrained_base=False, **kwargs):
  133. acronyms = {
  134. 'pascal_voc': 'pascal_voc',
  135. 'pascal_aug': 'pascal_aug',
  136. 'ade20k': 'ade',
  137. 'coco': 'coco',
  138. 'citys': 'citys',
  139. }
  140. # from ..data.dataloader import datasets
  141. model = PSANet(4, backbone=backbone, pretrained_base=pretrained_base, **kwargs)
  142. # if pretrained:
  143. # from .model_store import get_model_file
  144. # device = torch.device(kwargs['local_rank'])
  145. # model.load_state_dict(torch.load(get_model_file('deeplabv3_%s_%s' % (backbone, acronyms[dataset]), root=root),
  146. # map_location=device))
  147. return model
  148. def get_psanet_resnet50_voc(**kwargs):
  149. return get_psanet('pascal_voc', 'resnet50', **kwargs)
  150. def get_psanet_resnet101_voc(**kwargs):
  151. return get_psanet('pascal_voc', 'resnet101', **kwargs)
  152. def get_psanet_resnet152_voc(**kwargs):
  153. return get_psanet('pascal_voc', 'resnet152', **kwargs)
  154. def get_psanet_resnet50_citys(**kwargs):
  155. return get_psanet('citys', 'resnet50', **kwargs)
  156. def get_psanet_resnet101_citys(**kwargs):
  157. return get_psanet('citys', 'resnet101', **kwargs)
  158. def get_psanet_resnet152_citys(**kwargs):
  159. return get_psanet('citys', 'resnet152', **kwargs)
  160. if __name__ == '__main__':
  161. model = get_psanet_resnet50_voc()
  162. img = torch.randn(1, 3, 480, 480)
  163. output = model(img)