darknet.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. # Copyright (c) 2019 Western Digital Corporation or its affiliates.
  3. import warnings
  4. import torch.nn as nn
  5. from mmcv.cnn import ConvModule
  6. from mmengine.model import BaseModule
  7. from torch.nn.modules.batchnorm import _BatchNorm
  8. from mmdet.registry import MODELS
  9. class ResBlock(BaseModule):
  10. """The basic residual block used in Darknet. Each ResBlock consists of two
  11. ConvModules and the input is added to the final output. Each ConvModule is
  12. composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
  13. has half of the number of the filters as much as the second convLayer. The
  14. first convLayer has filter size of 1x1 and the second one has the filter
  15. size of 3x3.
  16. Args:
  17. in_channels (int): The input channels. Must be even.
  18. conv_cfg (dict): Config dict for convolution layer. Default: None.
  19. norm_cfg (dict): Dictionary to construct and config norm layer.
  20. Default: dict(type='BN', requires_grad=True)
  21. act_cfg (dict): Config dict for activation layer.
  22. Default: dict(type='LeakyReLU', negative_slope=0.1).
  23. init_cfg (dict or list[dict], optional): Initialization config dict.
  24. Default: None
  25. """
  26. def __init__(self,
  27. in_channels,
  28. conv_cfg=None,
  29. norm_cfg=dict(type='BN', requires_grad=True),
  30. act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
  31. init_cfg=None):
  32. super(ResBlock, self).__init__(init_cfg)
  33. assert in_channels % 2 == 0 # ensure the in_channels is even
  34. half_in_channels = in_channels // 2
  35. # shortcut
  36. cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
  37. self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
  38. self.conv2 = ConvModule(
  39. half_in_channels, in_channels, 3, padding=1, **cfg)
  40. def forward(self, x):
  41. residual = x
  42. out = self.conv1(x)
  43. out = self.conv2(out)
  44. out = out + residual
  45. return out
  46. @MODELS.register_module()
  47. class Darknet(BaseModule):
  48. """Darknet backbone.
  49. Args:
  50. depth (int): Depth of Darknet. Currently only support 53.
  51. out_indices (Sequence[int]): Output from which stages.
  52. frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
  53. -1 means not freezing any parameters. Default: -1.
  54. conv_cfg (dict): Config dict for convolution layer. Default: None.
  55. norm_cfg (dict): Dictionary to construct and config norm layer.
  56. Default: dict(type='BN', requires_grad=True)
  57. act_cfg (dict): Config dict for activation layer.
  58. Default: dict(type='LeakyReLU', negative_slope=0.1).
  59. norm_eval (bool): Whether to set norm layers to eval mode, namely,
  60. freeze running stats (mean and var). Note: Effect on Batch Norm
  61. and its variants only.
  62. pretrained (str, optional): model pretrained path. Default: None
  63. init_cfg (dict or list[dict], optional): Initialization config dict.
  64. Default: None
  65. Example:
  66. >>> from mmdet.models import Darknet
  67. >>> import torch
  68. >>> self = Darknet(depth=53)
  69. >>> self.eval()
  70. >>> inputs = torch.rand(1, 3, 416, 416)
  71. >>> level_outputs = self.forward(inputs)
  72. >>> for level_out in level_outputs:
  73. ... print(tuple(level_out.shape))
  74. ...
  75. (1, 256, 52, 52)
  76. (1, 512, 26, 26)
  77. (1, 1024, 13, 13)
  78. """
  79. # Dict(depth: (layers, channels))
  80. arch_settings = {
  81. 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
  82. (512, 1024)))
  83. }
  84. def __init__(self,
  85. depth=53,
  86. out_indices=(3, 4, 5),
  87. frozen_stages=-1,
  88. conv_cfg=None,
  89. norm_cfg=dict(type='BN', requires_grad=True),
  90. act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
  91. norm_eval=True,
  92. pretrained=None,
  93. init_cfg=None):
  94. super(Darknet, self).__init__(init_cfg)
  95. if depth not in self.arch_settings:
  96. raise KeyError(f'invalid depth {depth} for darknet')
  97. self.depth = depth
  98. self.out_indices = out_indices
  99. self.frozen_stages = frozen_stages
  100. self.layers, self.channels = self.arch_settings[depth]
  101. cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
  102. self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
  103. self.cr_blocks = ['conv1']
  104. for i, n_layers in enumerate(self.layers):
  105. layer_name = f'conv_res_block{i + 1}'
  106. in_c, out_c = self.channels[i]
  107. self.add_module(
  108. layer_name,
  109. self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
  110. self.cr_blocks.append(layer_name)
  111. self.norm_eval = norm_eval
  112. assert not (init_cfg and pretrained), \
  113. 'init_cfg and pretrained cannot be specified at the same time'
  114. if isinstance(pretrained, str):
  115. warnings.warn('DeprecationWarning: pretrained is deprecated, '
  116. 'please use "init_cfg" instead')
  117. self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
  118. elif pretrained is None:
  119. if init_cfg is None:
  120. self.init_cfg = [
  121. dict(type='Kaiming', layer='Conv2d'),
  122. dict(
  123. type='Constant',
  124. val=1,
  125. layer=['_BatchNorm', 'GroupNorm'])
  126. ]
  127. else:
  128. raise TypeError('pretrained must be a str or None')
  129. def forward(self, x):
  130. outs = []
  131. for i, layer_name in enumerate(self.cr_blocks):
  132. cr_block = getattr(self, layer_name)
  133. x = cr_block(x)
  134. if i in self.out_indices:
  135. outs.append(x)
  136. return tuple(outs)
  137. def _freeze_stages(self):
  138. if self.frozen_stages >= 0:
  139. for i in range(self.frozen_stages):
  140. m = getattr(self, self.cr_blocks[i])
  141. m.eval()
  142. for param in m.parameters():
  143. param.requires_grad = False
  144. def train(self, mode=True):
  145. super(Darknet, self).train(mode)
  146. self._freeze_stages()
  147. if mode and self.norm_eval:
  148. for m in self.modules():
  149. if isinstance(m, _BatchNorm):
  150. m.eval()
  151. @staticmethod
  152. def make_conv_res_block(in_channels,
  153. out_channels,
  154. res_repeat,
  155. conv_cfg=None,
  156. norm_cfg=dict(type='BN', requires_grad=True),
  157. act_cfg=dict(type='LeakyReLU',
  158. negative_slope=0.1)):
  159. """In Darknet backbone, ConvLayer is usually followed by ResBlock. This
  160. function will make that. The Conv layers always have 3x3 filters with
  161. stride=2. The number of the filters in Conv layer is the same as the
  162. out channels of the ResBlock.
  163. Args:
  164. in_channels (int): The number of input channels.
  165. out_channels (int): The number of output channels.
  166. res_repeat (int): The number of ResBlocks.
  167. conv_cfg (dict): Config dict for convolution layer. Default: None.
  168. norm_cfg (dict): Dictionary to construct and config norm layer.
  169. Default: dict(type='BN', requires_grad=True)
  170. act_cfg (dict): Config dict for activation layer.
  171. Default: dict(type='LeakyReLU', negative_slope=0.1).
  172. """
  173. cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
  174. model = nn.Sequential()
  175. model.add_module(
  176. 'conv',
  177. ConvModule(
  178. in_channels, out_channels, 3, stride=2, padding=1, **cfg))
  179. for idx in range(res_repeat):
  180. model.add_module('res{}'.format(idx),
  181. ResBlock(out_channels, **cfg))
  182. return model