vfnet_head.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. from typing import List, Tuple, Union
  3. import numpy as np
  4. import torch
  5. import torch.nn as nn
  6. from mmcv.cnn import ConvModule, Scale
  7. from mmcv.ops import DeformConv2d
  8. from torch import Tensor
  9. from mmdet.registry import MODELS, TASK_UTILS
  10. from mmdet.structures.bbox import bbox_overlaps
  11. from mmdet.utils import (ConfigType, InstanceList, MultiConfig,
  12. OptInstanceList, RangeType, reduce_mean)
  13. from ..task_modules.prior_generators import MlvlPointGenerator
  14. from ..task_modules.samplers import PseudoSampler
  15. from ..utils import multi_apply
  16. from .atss_head import ATSSHead
  17. from .fcos_head import FCOSHead
  18. INF = 1e8
  19. @MODELS.register_module()
  20. class VFNetHead(ATSSHead, FCOSHead):
  21. """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
  22. Detector.<https://arxiv.org/abs/2008.13367>`_.
  23. The VFNet predicts IoU-aware classification scores which mix the
  24. object presence confidence and object localization accuracy as the
  25. detection score. It is built on the FCOS architecture and uses ATSS
  26. for defining positive/negative training examples. The VFNet is trained
  27. with Varifocal Loss and empolys star-shaped deformable convolution to
  28. extract features for a bbox.
  29. Args:
  30. num_classes (int): Number of categories excluding the background
  31. category.
  32. in_channels (int): Number of channels in the input feature map.
  33. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple
  34. level points.
  35. center_sampling (bool): If true, use center sampling. Defaults to False.
  36. center_sample_radius (float): Radius of center sampling. Defaults to 1.5.
  37. sync_num_pos (bool): If true, synchronize the number of positive
  38. examples across GPUs. Defaults to True
  39. gradient_mul (float): The multiplier to gradients from bbox refinement
  40. and recognition. Defaults to 0.1.
  41. bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
  42. 'stride'. Defaults to reg_denom
  43. loss_cls_fl (:obj:`ConfigDict` or dict): Config of focal loss.
  44. use_vfl (bool): If true, use varifocal loss for training.
  45. Defaults to True.
  46. loss_cls (:obj:`ConfigDict` or dict): Config of varifocal loss.
  47. loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss,
  48. GIoU Loss.
  49. loss_bbox (:obj:`ConfigDict` or dict): Config of localization
  50. refinement loss, GIoU Loss.
  51. norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and
  52. config norm layer. Defaults to norm_cfg=dict(type='GN',
  53. num_groups=32, requires_grad=True).
  54. use_atss (bool): If true, use ATSS to define positive/negative
  55. examples. Defaults to True.
  56. anchor_generator (:obj:`ConfigDict` or dict): Config of anchor
  57. generator for ATSS.
  58. init_cfg (:obj:`ConfigDict` or dict or list[dict] or
  59. list[:obj:`ConfigDict`]): Initialization config dict.
  60. Example:
  61. >>> self = VFNetHead(11, 7)
  62. >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
  63. >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
  64. >>> assert len(cls_score) == len(self.scales)
  65. """ # noqa: E501
  66. def __init__(self,
  67. num_classes: int,
  68. in_channels: int,
  69. regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256),
  70. (256, 512), (512, INF)),
  71. center_sampling: bool = False,
  72. center_sample_radius: float = 1.5,
  73. sync_num_pos: bool = True,
  74. gradient_mul: float = 0.1,
  75. bbox_norm_type: str = 'reg_denom',
  76. loss_cls_fl: ConfigType = dict(
  77. type='FocalLoss',
  78. use_sigmoid=True,
  79. gamma=2.0,
  80. alpha=0.25,
  81. loss_weight=1.0),
  82. use_vfl: bool = True,
  83. loss_cls: ConfigType = dict(
  84. type='VarifocalLoss',
  85. use_sigmoid=True,
  86. alpha=0.75,
  87. gamma=2.0,
  88. iou_weighted=True,
  89. loss_weight=1.0),
  90. loss_bbox: ConfigType = dict(
  91. type='GIoULoss', loss_weight=1.5),
  92. loss_bbox_refine: ConfigType = dict(
  93. type='GIoULoss', loss_weight=2.0),
  94. norm_cfg: ConfigType = dict(
  95. type='GN', num_groups=32, requires_grad=True),
  96. use_atss: bool = True,
  97. reg_decoded_bbox: bool = True,
  98. anchor_generator: ConfigType = dict(
  99. type='AnchorGenerator',
  100. ratios=[1.0],
  101. octave_base_scale=8,
  102. scales_per_octave=1,
  103. center_offset=0.0,
  104. strides=[8, 16, 32, 64, 128]),
  105. init_cfg: MultiConfig = dict(
  106. type='Normal',
  107. layer='Conv2d',
  108. std=0.01,
  109. override=dict(
  110. type='Normal',
  111. name='vfnet_cls',
  112. std=0.01,
  113. bias_prob=0.01)),
  114. **kwargs) -> None:
  115. # dcn base offsets, adapted from reppoints_head.py
  116. self.num_dconv_points = 9
  117. self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
  118. self.dcn_pad = int((self.dcn_kernel - 1) / 2)
  119. dcn_base = np.arange(-self.dcn_pad,
  120. self.dcn_pad + 1).astype(np.float64)
  121. dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
  122. dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
  123. dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
  124. (-1))
  125. self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
  126. super(FCOSHead, self).__init__(
  127. num_classes=num_classes,
  128. in_channels=in_channels,
  129. norm_cfg=norm_cfg,
  130. init_cfg=init_cfg,
  131. **kwargs)
  132. self.regress_ranges = regress_ranges
  133. self.reg_denoms = [
  134. regress_range[-1] for regress_range in regress_ranges
  135. ]
  136. self.reg_denoms[-1] = self.reg_denoms[-2] * 2
  137. self.center_sampling = center_sampling
  138. self.center_sample_radius = center_sample_radius
  139. self.sync_num_pos = sync_num_pos
  140. self.bbox_norm_type = bbox_norm_type
  141. self.gradient_mul = gradient_mul
  142. self.use_vfl = use_vfl
  143. if self.use_vfl:
  144. self.loss_cls = MODELS.build(loss_cls)
  145. else:
  146. self.loss_cls = MODELS.build(loss_cls_fl)
  147. self.loss_bbox = MODELS.build(loss_bbox)
  148. self.loss_bbox_refine = MODELS.build(loss_bbox_refine)
  149. # for getting ATSS targets
  150. self.use_atss = use_atss
  151. self.reg_decoded_bbox = reg_decoded_bbox
  152. self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
  153. self.anchor_center_offset = anchor_generator['center_offset']
  154. self.num_base_priors = self.prior_generator.num_base_priors[0]
  155. if self.train_cfg:
  156. self.assigner = TASK_UTILS.build(self.train_cfg['assigner'])
  157. if self.train_cfg.get('sampler', None) is not None:
  158. self.sampler = TASK_UTILS.build(
  159. self.train_cfg['sampler'], default_args=dict(context=self))
  160. else:
  161. self.sampler = PseudoSampler()
  162. # only be used in `get_atss_targets` when `use_atss` is True
  163. self.atss_prior_generator = TASK_UTILS.build(anchor_generator)
  164. self.fcos_prior_generator = MlvlPointGenerator(
  165. anchor_generator['strides'],
  166. self.anchor_center_offset if self.use_atss else 0.5)
  167. # In order to reuse the `get_bboxes` in `BaseDenseHead.
  168. # Only be used in testing phase.
  169. self.prior_generator = self.fcos_prior_generator
  170. def _init_layers(self) -> None:
  171. """Initialize layers of the head."""
  172. super(FCOSHead, self)._init_cls_convs()
  173. super(FCOSHead, self)._init_reg_convs()
  174. self.relu = nn.ReLU()
  175. self.vfnet_reg_conv = ConvModule(
  176. self.feat_channels,
  177. self.feat_channels,
  178. 3,
  179. stride=1,
  180. padding=1,
  181. conv_cfg=self.conv_cfg,
  182. norm_cfg=self.norm_cfg,
  183. bias=self.conv_bias)
  184. self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  185. self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
  186. self.vfnet_reg_refine_dconv = DeformConv2d(
  187. self.feat_channels,
  188. self.feat_channels,
  189. self.dcn_kernel,
  190. 1,
  191. padding=self.dcn_pad)
  192. self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
  193. self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
  194. self.vfnet_cls_dconv = DeformConv2d(
  195. self.feat_channels,
  196. self.feat_channels,
  197. self.dcn_kernel,
  198. 1,
  199. padding=self.dcn_pad)
  200. self.vfnet_cls = nn.Conv2d(
  201. self.feat_channels, self.cls_out_channels, 3, padding=1)
  202. def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:
  203. """Forward features from the upstream network.
  204. Args:
  205. x (tuple[Tensor]): Features from the upstream network, each is
  206. a 4D-tensor.
  207. Returns:
  208. tuple:
  209. - cls_scores (list[Tensor]): Box iou-aware scores for each scale
  210. level, each is a 4D-tensor, the channel number is
  211. num_points * num_classes.
  212. - bbox_preds (list[Tensor]): Box offsets for each
  213. scale level, each is a 4D-tensor, the channel number is
  214. num_points * 4.
  215. - bbox_preds_refine (list[Tensor]): Refined Box offsets for
  216. each scale level, each is a 4D-tensor, the channel
  217. number is num_points * 4.
  218. """
  219. return multi_apply(self.forward_single, x, self.scales,
  220. self.scales_refine, self.strides, self.reg_denoms)
  221. def forward_single(self, x: Tensor, scale: Scale, scale_refine: Scale,
  222. stride: int, reg_denom: int) -> tuple:
  223. """Forward features of a single scale level.
  224. Args:
  225. x (Tensor): FPN feature maps of the specified stride.
  226. scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
  227. the bbox prediction.
  228. scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
  229. resize the refined bbox prediction.
  230. stride (int): The corresponding stride for feature maps,
  231. used to normalize the bbox prediction when
  232. bbox_norm_type = 'stride'.
  233. reg_denom (int): The corresponding regression range for feature
  234. maps, only used to normalize the bbox prediction when
  235. bbox_norm_type = 'reg_denom'.
  236. Returns:
  237. tuple: iou-aware cls scores for each box, bbox predictions and
  238. refined bbox predictions of input feature maps.
  239. """
  240. cls_feat = x
  241. reg_feat = x
  242. for cls_layer in self.cls_convs:
  243. cls_feat = cls_layer(cls_feat)
  244. for reg_layer in self.reg_convs:
  245. reg_feat = reg_layer(reg_feat)
  246. # predict the bbox_pred of different level
  247. reg_feat_init = self.vfnet_reg_conv(reg_feat)
  248. if self.bbox_norm_type == 'reg_denom':
  249. bbox_pred = scale(
  250. self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
  251. elif self.bbox_norm_type == 'stride':
  252. bbox_pred = scale(
  253. self.vfnet_reg(reg_feat_init)).float().exp() * stride
  254. else:
  255. raise NotImplementedError
  256. # compute star deformable convolution offsets
  257. # converting dcn_offset to reg_feat.dtype thus VFNet can be
  258. # trained with FP16
  259. dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
  260. stride).to(reg_feat.dtype)
  261. # refine the bbox_pred
  262. reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
  263. bbox_pred_refine = scale_refine(
  264. self.vfnet_reg_refine(reg_feat)).float().exp()
  265. bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
  266. # predict the iou-aware cls score
  267. cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
  268. cls_score = self.vfnet_cls(cls_feat)
  269. if self.training:
  270. return cls_score, bbox_pred, bbox_pred_refine
  271. else:
  272. return cls_score, bbox_pred_refine
  273. def star_dcn_offset(self, bbox_pred: Tensor, gradient_mul: float,
  274. stride: int) -> Tensor:
  275. """Compute the star deformable conv offsets.
  276. Args:
  277. bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
  278. gradient_mul (float): Gradient multiplier.
  279. stride (int): The corresponding stride for feature maps,
  280. used to project the bbox onto the feature map.
  281. Returns:
  282. Tensor: The offsets for deformable convolution.
  283. """
  284. dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
  285. bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
  286. gradient_mul * bbox_pred
  287. # map to the feature map scale
  288. bbox_pred_grad_mul = bbox_pred_grad_mul / stride
  289. N, C, H, W = bbox_pred.size()
  290. x1 = bbox_pred_grad_mul[:, 0, :, :]
  291. y1 = bbox_pred_grad_mul[:, 1, :, :]
  292. x2 = bbox_pred_grad_mul[:, 2, :, :]
  293. y2 = bbox_pred_grad_mul[:, 3, :, :]
  294. bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
  295. N, 2 * self.num_dconv_points, H, W)
  296. bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
  297. bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
  298. bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
  299. bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
  300. bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
  301. bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
  302. bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
  303. bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
  304. bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
  305. bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
  306. bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
  307. bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
  308. dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
  309. return dcn_offset
  310. def loss_by_feat(
  311. self,
  312. cls_scores: List[Tensor],
  313. bbox_preds: List[Tensor],
  314. bbox_preds_refine: List[Tensor],
  315. batch_gt_instances: InstanceList,
  316. batch_img_metas: List[dict],
  317. batch_gt_instances_ignore: OptInstanceList = None) -> dict:
  318. """Compute loss of the head.
  319. Args:
  320. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  321. level, each is a 4D-tensor, the channel number is
  322. num_points * num_classes.
  323. bbox_preds (list[Tensor]): Box offsets for each
  324. scale level, each is a 4D-tensor, the channel number is
  325. num_points * 4.
  326. bbox_preds_refine (list[Tensor]): Refined Box offsets for
  327. each scale level, each is a 4D-tensor, the channel
  328. number is num_points * 4.
  329. batch_gt_instances (list[:obj:`InstanceData`]): Batch of
  330. gt_instance. It usually includes ``bboxes`` and ``labels``
  331. attributes.
  332. batch_img_metas (list[dict]): Meta information of each image, e.g.,
  333. image size, scaling factor, etc.
  334. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
  335. Batch of gt_instances_ignore. It includes ``bboxes`` attribute
  336. data that is ignored during training and testing.
  337. Defaults to None.
  338. Returns:
  339. dict[str, Tensor]: A dictionary of loss components.
  340. """
  341. assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
  342. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
  343. all_level_points = self.fcos_prior_generator.grid_priors(
  344. featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)
  345. labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
  346. cls_scores,
  347. all_level_points,
  348. batch_gt_instances,
  349. batch_img_metas,
  350. batch_gt_instances_ignore=batch_gt_instances_ignore)
  351. num_imgs = cls_scores[0].size(0)
  352. # flatten cls_scores, bbox_preds and bbox_preds_refine
  353. flatten_cls_scores = [
  354. cls_score.permute(0, 2, 3,
  355. 1).reshape(-1,
  356. self.cls_out_channels).contiguous()
  357. for cls_score in cls_scores
  358. ]
  359. flatten_bbox_preds = [
  360. bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
  361. for bbox_pred in bbox_preds
  362. ]
  363. flatten_bbox_preds_refine = [
  364. bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
  365. for bbox_pred_refine in bbox_preds_refine
  366. ]
  367. flatten_cls_scores = torch.cat(flatten_cls_scores)
  368. flatten_bbox_preds = torch.cat(flatten_bbox_preds)
  369. flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
  370. flatten_labels = torch.cat(labels)
  371. flatten_bbox_targets = torch.cat(bbox_targets)
  372. # repeat points to align with bbox_preds
  373. flatten_points = torch.cat(
  374. [points.repeat(num_imgs, 1) for points in all_level_points])
  375. # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
  376. bg_class_ind = self.num_classes
  377. pos_inds = torch.where(
  378. ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
  379. num_pos = len(pos_inds)
  380. pos_bbox_preds = flatten_bbox_preds[pos_inds]
  381. pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
  382. pos_labels = flatten_labels[pos_inds]
  383. # sync num_pos across all gpus
  384. if self.sync_num_pos:
  385. num_pos_avg_per_gpu = reduce_mean(
  386. pos_inds.new_tensor(num_pos).float()).item()
  387. num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
  388. else:
  389. num_pos_avg_per_gpu = num_pos
  390. pos_bbox_targets = flatten_bbox_targets[pos_inds]
  391. pos_points = flatten_points[pos_inds]
  392. pos_decoded_bbox_preds = self.bbox_coder.decode(
  393. pos_points, pos_bbox_preds)
  394. pos_decoded_target_preds = self.bbox_coder.decode(
  395. pos_points, pos_bbox_targets)
  396. iou_targets_ini = bbox_overlaps(
  397. pos_decoded_bbox_preds,
  398. pos_decoded_target_preds.detach(),
  399. is_aligned=True).clamp(min=1e-6)
  400. bbox_weights_ini = iou_targets_ini.clone().detach()
  401. bbox_avg_factor_ini = reduce_mean(
  402. bbox_weights_ini.sum()).clamp_(min=1).item()
  403. pos_decoded_bbox_preds_refine = \
  404. self.bbox_coder.decode(pos_points, pos_bbox_preds_refine)
  405. iou_targets_rf = bbox_overlaps(
  406. pos_decoded_bbox_preds_refine,
  407. pos_decoded_target_preds.detach(),
  408. is_aligned=True).clamp(min=1e-6)
  409. bbox_weights_rf = iou_targets_rf.clone().detach()
  410. bbox_avg_factor_rf = reduce_mean(
  411. bbox_weights_rf.sum()).clamp_(min=1).item()
  412. if num_pos > 0:
  413. loss_bbox = self.loss_bbox(
  414. pos_decoded_bbox_preds,
  415. pos_decoded_target_preds.detach(),
  416. weight=bbox_weights_ini,
  417. avg_factor=bbox_avg_factor_ini)
  418. loss_bbox_refine = self.loss_bbox_refine(
  419. pos_decoded_bbox_preds_refine,
  420. pos_decoded_target_preds.detach(),
  421. weight=bbox_weights_rf,
  422. avg_factor=bbox_avg_factor_rf)
  423. # build IoU-aware cls_score targets
  424. if self.use_vfl:
  425. pos_ious = iou_targets_rf.clone().detach()
  426. cls_iou_targets = torch.zeros_like(flatten_cls_scores)
  427. cls_iou_targets[pos_inds, pos_labels] = pos_ious
  428. else:
  429. loss_bbox = pos_bbox_preds.sum() * 0
  430. loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
  431. if self.use_vfl:
  432. cls_iou_targets = torch.zeros_like(flatten_cls_scores)
  433. if self.use_vfl:
  434. loss_cls = self.loss_cls(
  435. flatten_cls_scores,
  436. cls_iou_targets,
  437. avg_factor=num_pos_avg_per_gpu)
  438. else:
  439. loss_cls = self.loss_cls(
  440. flatten_cls_scores,
  441. flatten_labels,
  442. weight=label_weights,
  443. avg_factor=num_pos_avg_per_gpu)
  444. return dict(
  445. loss_cls=loss_cls,
  446. loss_bbox=loss_bbox,
  447. loss_bbox_rf=loss_bbox_refine)
  448. def get_targets(
  449. self,
  450. cls_scores: List[Tensor],
  451. mlvl_points: List[Tensor],
  452. batch_gt_instances: InstanceList,
  453. batch_img_metas: List[dict],
  454. batch_gt_instances_ignore: OptInstanceList = None) -> tuple:
  455. """A wrapper for computing ATSS and FCOS targets for points in multiple
  456. images.
  457. Args:
  458. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  459. level with shape (N, num_points * num_classes, H, W).
  460. mlvl_points (list[Tensor]): Points of each fpn level, each has
  461. shape (num_points, 2).
  462. batch_gt_instances (list[:obj:`InstanceData`]): Batch of
  463. gt_instance. It usually includes ``bboxes`` and ``labels``
  464. attributes.
  465. batch_img_metas (list[dict]): Meta information of each image, e.g.,
  466. image size, scaling factor, etc.
  467. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
  468. Batch of gt_instances_ignore. It includes ``bboxes`` attribute
  469. data that is ignored during training and testing.
  470. Defaults to None.
  471. Returns:
  472. tuple:
  473. - labels_list (list[Tensor]): Labels of each level.
  474. - label_weights (Tensor/None): Label weights of all levels.
  475. - bbox_targets_list (list[Tensor]): Regression targets of each
  476. level, (l, t, r, b).
  477. - bbox_weights (Tensor/None): Bbox weights of all levels.
  478. """
  479. if self.use_atss:
  480. return self.get_atss_targets(cls_scores, mlvl_points,
  481. batch_gt_instances, batch_img_metas,
  482. batch_gt_instances_ignore)
  483. else:
  484. self.norm_on_bbox = False
  485. return self.get_fcos_targets(mlvl_points, batch_gt_instances)
  486. def _get_targets_single(self, *args, **kwargs):
  487. """Avoid ambiguity in multiple inheritance."""
  488. if self.use_atss:
  489. return ATSSHead._get_targets_single(self, *args, **kwargs)
  490. else:
  491. return FCOSHead._get_targets_single(self, *args, **kwargs)
  492. def get_fcos_targets(self, points: List[Tensor],
  493. batch_gt_instances: InstanceList) -> tuple:
  494. """Compute FCOS regression and classification targets for points in
  495. multiple images.
  496. Args:
  497. points (list[Tensor]): Points of each fpn level, each has shape
  498. (num_points, 2).
  499. batch_gt_instances (list[:obj:`InstanceData`]): Batch of
  500. gt_instance. It usually includes ``bboxes`` and ``labels``
  501. attributes.
  502. Returns:
  503. tuple:
  504. - labels (list[Tensor]): Labels of each level.
  505. - label_weights: None, to be compatible with ATSS targets.
  506. - bbox_targets (list[Tensor]): BBox targets of each level.
  507. - bbox_weights: None, to be compatible with ATSS targets.
  508. """
  509. labels, bbox_targets = FCOSHead.get_targets(self, points,
  510. batch_gt_instances)
  511. label_weights = None
  512. bbox_weights = None
  513. return labels, label_weights, bbox_targets, bbox_weights
  514. def get_anchors(self,
  515. featmap_sizes: List[Tuple],
  516. batch_img_metas: List[dict],
  517. device: str = 'cuda') -> tuple:
  518. """Get anchors according to feature map sizes.
  519. Args:
  520. featmap_sizes (list[tuple]): Multi-level feature map sizes.
  521. batch_img_metas (list[dict]): Image meta info.
  522. device (str): Device for returned tensors
  523. Returns:
  524. tuple:
  525. - anchor_list (list[Tensor]): Anchors of each image.
  526. - valid_flag_list (list[Tensor]): Valid flags of each image.
  527. """
  528. num_imgs = len(batch_img_metas)
  529. # since feature map sizes of all images are the same, we only compute
  530. # anchors for one time
  531. multi_level_anchors = self.atss_prior_generator.grid_priors(
  532. featmap_sizes, device=device)
  533. anchor_list = [multi_level_anchors for _ in range(num_imgs)]
  534. # for each image, we compute valid flags of multi level anchors
  535. valid_flag_list = []
  536. for img_id, img_meta in enumerate(batch_img_metas):
  537. multi_level_flags = self.atss_prior_generator.valid_flags(
  538. featmap_sizes, img_meta['pad_shape'], device=device)
  539. valid_flag_list.append(multi_level_flags)
  540. return anchor_list, valid_flag_list
  541. def get_atss_targets(
  542. self,
  543. cls_scores: List[Tensor],
  544. mlvl_points: List[Tensor],
  545. batch_gt_instances: InstanceList,
  546. batch_img_metas: List[dict],
  547. batch_gt_instances_ignore: OptInstanceList = None) -> tuple:
  548. """A wrapper for computing ATSS targets for points in multiple images.
  549. Args:
  550. cls_scores (list[Tensor]): Box iou-aware scores for each scale
  551. level with shape (N, num_points * num_classes, H, W).
  552. mlvl_points (list[Tensor]): Points of each fpn level, each has
  553. shape (num_points, 2).
  554. batch_gt_instances (list[:obj:`InstanceData`]): Batch of
  555. gt_instance. It usually includes ``bboxes`` and ``labels``
  556. attributes.
  557. batch_img_metas (list[dict]): Meta information of each image, e.g.,
  558. image size, scaling factor, etc.
  559. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
  560. Batch of gt_instances_ignore. It includes ``bboxes`` attribute
  561. data that is ignored during training and testing.
  562. Defaults to None.
  563. Returns:
  564. tuple:
  565. - labels_list (list[Tensor]): Labels of each level.
  566. - label_weights (Tensor): Label weights of all levels.
  567. - bbox_targets_list (list[Tensor]): Regression targets of each
  568. level, (l, t, r, b).
  569. - bbox_weights (Tensor): Bbox weights of all levels.
  570. """
  571. featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
  572. assert len(
  573. featmap_sizes
  574. ) == self.atss_prior_generator.num_levels == \
  575. self.fcos_prior_generator.num_levels
  576. device = cls_scores[0].device
  577. anchor_list, valid_flag_list = self.get_anchors(
  578. featmap_sizes, batch_img_metas, device=device)
  579. cls_reg_targets = ATSSHead.get_targets(
  580. self,
  581. anchor_list,
  582. valid_flag_list,
  583. batch_gt_instances,
  584. batch_img_metas,
  585. batch_gt_instances_ignore,
  586. unmap_outputs=True)
  587. (anchor_list, labels_list, label_weights_list, bbox_targets_list,
  588. bbox_weights_list, avg_factor) = cls_reg_targets
  589. bbox_targets_list = [
  590. bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
  591. ]
  592. num_imgs = len(batch_img_metas)
  593. # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
  594. bbox_targets_list = self.transform_bbox_targets(
  595. bbox_targets_list, mlvl_points, num_imgs)
  596. labels_list = [labels.reshape(-1) for labels in labels_list]
  597. label_weights_list = [
  598. label_weights.reshape(-1) for label_weights in label_weights_list
  599. ]
  600. bbox_weights_list = [
  601. bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
  602. ]
  603. label_weights = torch.cat(label_weights_list)
  604. bbox_weights = torch.cat(bbox_weights_list)
  605. return labels_list, label_weights, bbox_targets_list, bbox_weights
  606. def transform_bbox_targets(self, decoded_bboxes: List[Tensor],
  607. mlvl_points: List[Tensor],
  608. num_imgs: int) -> List[Tensor]:
  609. """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
  610. Args:
  611. decoded_bboxes (list[Tensor]): Regression targets of each level,
  612. in the form of (x1, y1, x2, y2).
  613. mlvl_points (list[Tensor]): Points of each fpn level, each has
  614. shape (num_points, 2).
  615. num_imgs (int): the number of images in a batch.
  616. Returns:
  617. bbox_targets (list[Tensor]): Regression targets of each level in
  618. the form of (l, t, r, b).
  619. """
  620. # TODO: Re-implemented in Class PointCoder
  621. assert len(decoded_bboxes) == len(mlvl_points)
  622. num_levels = len(decoded_bboxes)
  623. mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
  624. bbox_targets = []
  625. for i in range(num_levels):
  626. bbox_target = self.bbox_coder.encode(mlvl_points[i],
  627. decoded_bboxes[i])
  628. bbox_targets.append(bbox_target)
  629. return bbox_targets
  630. def _load_from_state_dict(self, state_dict: dict, prefix: str,
  631. local_metadata: dict, strict: bool,
  632. missing_keys: Union[List[str], str],
  633. unexpected_keys: Union[List[str], str],
  634. error_msgs: Union[List[str], str]) -> None:
  635. """Override the method in the parent class to avoid changing para's
  636. name."""
  637. pass