base_dense_head.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import copy
  3. from abc import ABCMeta, abstractmethod
  4. from inspect import signature
  5. from typing import List, Optional, Tuple
  6. import torch
  7. from mmcv.ops import batched_nms
  8. from mmengine.config import ConfigDict
  9. from mmengine.model import BaseModule, constant_init
  10. from mmengine.structures import InstanceData
  11. from torch import Tensor
  12. from mmdet.structures import SampleList
  13. from mmdet.structures.bbox import (cat_boxes, get_box_tensor, get_box_wh,
  14. scale_boxes)
  15. from mmdet.utils import InstanceList, OptMultiConfig
  16. from ..test_time_augs import merge_aug_results
  17. from ..utils import (filter_scores_and_topk, select_single_mlvl,
  18. unpack_gt_instances)
  19. class BaseDenseHead(BaseModule, metaclass=ABCMeta):
  20. """Base class for DenseHeads.
  21. 1. The ``init_weights`` method is used to initialize densehead's
  22. model parameters. After detector initialization, ``init_weights``
  23. is triggered when ``detector.init_weights()`` is called externally.
  24. 2. The ``loss`` method is used to calculate the loss of densehead,
  25. which includes two steps: (1) the densehead model performs forward
  26. propagation to obtain the feature maps (2) The ``loss_by_feat`` method
  27. is called based on the feature maps to calculate the loss.
  28. .. code:: text
  29. loss(): forward() -> loss_by_feat()
  30. 3. The ``predict`` method is used to predict detection results,
  31. which includes two steps: (1) the densehead model performs forward
  32. propagation to obtain the feature maps (2) The ``predict_by_feat`` method
  33. is called based on the feature maps to predict detection results including
  34. post-processing.
  35. .. code:: text
  36. predict(): forward() -> predict_by_feat()
  37. 4. The ``loss_and_predict`` method is used to return loss and detection
  38. results at the same time. It will call densehead's ``forward``,
  39. ``loss_by_feat`` and ``predict_by_feat`` methods in order. If one-stage is
  40. used as RPN, the densehead needs to return both losses and predictions.
  41. This predictions is used as the proposal of roihead.
  42. .. code:: text
  43. loss_and_predict(): forward() -> loss_by_feat() -> predict_by_feat()
  44. """
  45. def __init__(self, init_cfg: OptMultiConfig = None) -> None:
  46. super().__init__(init_cfg=init_cfg)
  47. # `_raw_positive_infos` will be used in `get_positive_infos`, which
  48. # can get positive information.
  49. self._raw_positive_infos = dict()
  50. def init_weights(self) -> None:
  51. """Initialize the weights."""
  52. super().init_weights()
  53. # avoid init_cfg overwrite the initialization of `conv_offset`
  54. for m in self.modules():
  55. # DeformConv2dPack, ModulatedDeformConv2dPack
  56. if hasattr(m, 'conv_offset'):
  57. constant_init(m.conv_offset, 0)
  58. def get_positive_infos(self) -> InstanceList:
  59. """Get positive information from sampling results.
  60. Returns:
  61. list[:obj:`InstanceData`]: Positive information of each image,
  62. usually including positive bboxes, positive labels, positive
  63. priors, etc.
  64. """
  65. if len(self._raw_positive_infos) == 0:
  66. return None
  67. sampling_results = self._raw_positive_infos.get(
  68. 'sampling_results', None)
  69. assert sampling_results is not None
  70. positive_infos = []
  71. for sampling_result in enumerate(sampling_results):
  72. pos_info = InstanceData()
  73. pos_info.bboxes = sampling_result.pos_gt_bboxes
  74. pos_info.labels = sampling_result.pos_gt_labels
  75. pos_info.priors = sampling_result.pos_priors
  76. pos_info.pos_assigned_gt_inds = \
  77. sampling_result.pos_assigned_gt_inds
  78. pos_info.pos_inds = sampling_result.pos_inds
  79. positive_infos.append(pos_info)
  80. return positive_infos
  81. def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:
  82. """Perform forward propagation and loss calculation of the detection
  83. head on the features of the upstream network.
  84. Args:
  85. x (tuple[Tensor]): Features from the upstream network, each is
  86. a 4D-tensor.
  87. batch_data_samples (List[:obj:`DetDataSample`]): The Data
  88. Samples. It usually includes information such as
  89. `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
  90. Returns:
  91. dict: A dictionary of loss components.
  92. """
  93. outs = self(x)
  94. outputs = unpack_gt_instances(batch_data_samples)
  95. (batch_gt_instances, batch_gt_instances_ignore,
  96. batch_img_metas) = outputs
  97. loss_inputs = outs + (batch_gt_instances, batch_img_metas,
  98. batch_gt_instances_ignore)
  99. losses = self.loss_by_feat(*loss_inputs)
  100. return losses
  101. @abstractmethod
  102. def loss_by_feat(self, **kwargs) -> dict:
  103. """Calculate the loss based on the features extracted by the detection
  104. head."""
  105. pass
  106. def loss_and_predict(
  107. self,
  108. x: Tuple[Tensor],
  109. batch_data_samples: SampleList,
  110. proposal_cfg: Optional[ConfigDict] = None
  111. ) -> Tuple[dict, InstanceList]:
  112. """Perform forward propagation of the head, then calculate loss and
  113. predictions from the features and data samples.
  114. Args:
  115. x (tuple[Tensor]): Features from FPN.
  116. batch_data_samples (list[:obj:`DetDataSample`]): Each item contains
  117. the meta information of each image and corresponding
  118. annotations.
  119. proposal_cfg (ConfigDict, optional): Test / postprocessing
  120. configuration, if None, test_cfg would be used.
  121. Defaults to None.
  122. Returns:
  123. tuple: the return value is a tuple contains:
  124. - losses: (dict[str, Tensor]): A dictionary of loss components.
  125. - predictions (list[:obj:`InstanceData`]): Detection
  126. results of each image after the post process.
  127. """
  128. outputs = unpack_gt_instances(batch_data_samples)
  129. (batch_gt_instances, batch_gt_instances_ignore,
  130. batch_img_metas) = outputs
  131. outs = self(x)
  132. loss_inputs = outs + (batch_gt_instances, batch_img_metas,
  133. batch_gt_instances_ignore)
  134. losses = self.loss_by_feat(*loss_inputs)
  135. predictions = self.predict_by_feat(
  136. *outs, batch_img_metas=batch_img_metas, cfg=proposal_cfg)
  137. return losses, predictions
  138. def predict(self,
  139. x: Tuple[Tensor],
  140. batch_data_samples: SampleList,
  141. rescale: bool = False) -> InstanceList:
  142. """Perform forward propagation of the detection head and predict
  143. detection results on the features of the upstream network.
  144. Args:
  145. x (tuple[Tensor]): Multi-level features from the
  146. upstream network, each is a 4D-tensor.
  147. batch_data_samples (List[:obj:`DetDataSample`]): The Data
  148. Samples. It usually includes information such as
  149. `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
  150. rescale (bool, optional): Whether to rescale the results.
  151. Defaults to False.
  152. Returns:
  153. list[obj:`InstanceData`]: Detection results of each image
  154. after the post process.
  155. """
  156. batch_img_metas = [
  157. data_samples.metainfo for data_samples in batch_data_samples
  158. ]
  159. outs = self(x)
  160. predictions = self.predict_by_feat(
  161. *outs, batch_img_metas=batch_img_metas, rescale=rescale)
  162. return predictions
  163. def predict_by_feat(self,
  164. cls_scores: List[Tensor],
  165. bbox_preds: List[Tensor],
  166. score_factors: Optional[List[Tensor]] = None,
  167. batch_img_metas: Optional[List[dict]] = None,
  168. cfg: Optional[ConfigDict] = None,
  169. rescale: bool = False,
  170. with_nms: bool = True) -> InstanceList:
  171. """Transform a batch of output features extracted from the head into
  172. bbox results.
  173. Note: When score_factors is not None, the cls_scores are
  174. usually multiplied by it then obtain the real score used in NMS,
  175. such as CenterNess in FCOS, IoU branch in ATSS.
  176. Args:
  177. cls_scores (list[Tensor]): Classification scores for all
  178. scale levels, each is a 4D-tensor, has shape
  179. (batch_size, num_priors * num_classes, H, W).
  180. bbox_preds (list[Tensor]): Box energies / deltas for all
  181. scale levels, each is a 4D-tensor, has shape
  182. (batch_size, num_priors * 4, H, W).
  183. score_factors (list[Tensor], optional): Score factor for
  184. all scale level, each is a 4D-tensor, has shape
  185. (batch_size, num_priors * 1, H, W). Defaults to None.
  186. batch_img_metas (list[dict], Optional): Batch image meta info.
  187. Defaults to None.
  188. cfg (ConfigDict, optional): Test / postprocessing
  189. configuration, if None, test_cfg would be used.
  190. Defaults to None.
  191. rescale (bool): If True, return boxes in original image space.
  192. Defaults to False.
  193. with_nms (bool): If True, do nms before return boxes.
  194. Defaults to True.
  195. Returns:
  196. list[:obj:`InstanceData`]: Object detection results of each image
  197. after the post process. Each item usually contains following keys.
  198. - scores (Tensor): Classification scores, has a shape
  199. (num_instance, )
  200. - labels (Tensor): Labels of bboxes, has a shape
  201. (num_instances, ).
  202. - bboxes (Tensor): Has a shape (num_instances, 4),
  203. the last dimension 4 arrange as (x1, y1, x2, y2).
  204. """
  205. assert len(cls_scores) == len(bbox_preds)
  206. if score_factors is None:
  207. # e.g. Retina, FreeAnchor, Foveabox, etc.
  208. with_score_factors = False
  209. else:
  210. # e.g. FCOS, PAA, ATSS, AutoAssign, etc.
  211. with_score_factors = True
  212. assert len(cls_scores) == len(score_factors)
  213. num_levels = len(cls_scores)
  214. featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
  215. mlvl_priors = self.prior_generator.grid_priors(
  216. featmap_sizes,
  217. dtype=cls_scores[0].dtype,
  218. device=cls_scores[0].device)
  219. result_list = []
  220. for img_id in range(len(batch_img_metas)):
  221. img_meta = batch_img_metas[img_id]
  222. cls_score_list = select_single_mlvl(
  223. cls_scores, img_id, detach=True)
  224. bbox_pred_list = select_single_mlvl(
  225. bbox_preds, img_id, detach=True)
  226. if with_score_factors:
  227. score_factor_list = select_single_mlvl(
  228. score_factors, img_id, detach=True)
  229. else:
  230. score_factor_list = [None for _ in range(num_levels)]
  231. results = self._predict_by_feat_single(
  232. cls_score_list=cls_score_list,
  233. bbox_pred_list=bbox_pred_list,
  234. score_factor_list=score_factor_list,
  235. mlvl_priors=mlvl_priors,
  236. img_meta=img_meta,
  237. cfg=cfg,
  238. rescale=rescale,
  239. with_nms=with_nms)
  240. result_list.append(results)
  241. return result_list
  242. def _predict_by_feat_single(self,
  243. cls_score_list: List[Tensor],
  244. bbox_pred_list: List[Tensor],
  245. score_factor_list: List[Tensor],
  246. mlvl_priors: List[Tensor],
  247. img_meta: dict,
  248. cfg: ConfigDict,
  249. rescale: bool = False,
  250. with_nms: bool = True) -> InstanceData:
  251. """Transform a single image's features extracted from the head into
  252. bbox results.
  253. Args:
  254. cls_score_list (list[Tensor]): Box scores from all scale
  255. levels of a single image, each item has shape
  256. (num_priors * num_classes, H, W).
  257. bbox_pred_list (list[Tensor]): Box energies / deltas from
  258. all scale levels of a single image, each item has shape
  259. (num_priors * 4, H, W).
  260. score_factor_list (list[Tensor]): Score factor from all scale
  261. levels of a single image, each item has shape
  262. (num_priors * 1, H, W).
  263. mlvl_priors (list[Tensor]): Each element in the list is
  264. the priors of a single level in feature pyramid. In all
  265. anchor-based methods, it has shape (num_priors, 4). In
  266. all anchor-free methods, it has shape (num_priors, 2)
  267. when `with_stride=True`, otherwise it still has shape
  268. (num_priors, 4).
  269. img_meta (dict): Image meta info.
  270. cfg (mmengine.Config): Test / postprocessing configuration,
  271. if None, test_cfg would be used.
  272. rescale (bool): If True, return boxes in original image space.
  273. Defaults to False.
  274. with_nms (bool): If True, do nms before return boxes.
  275. Defaults to True.
  276. Returns:
  277. :obj:`InstanceData`: Detection results of each image
  278. after the post process.
  279. Each item usually contains following keys.
  280. - scores (Tensor): Classification scores, has a shape
  281. (num_instance, )
  282. - labels (Tensor): Labels of bboxes, has a shape
  283. (num_instances, ).
  284. - bboxes (Tensor): Has a shape (num_instances, 4),
  285. the last dimension 4 arrange as (x1, y1, x2, y2).
  286. """
  287. if score_factor_list[0] is None:
  288. # e.g. Retina, FreeAnchor, etc.
  289. with_score_factors = False
  290. else:
  291. # e.g. FCOS, PAA, ATSS, etc.
  292. with_score_factors = True
  293. cfg = self.test_cfg if cfg is None else cfg
  294. cfg = copy.deepcopy(cfg)
  295. img_shape = img_meta['img_shape']
  296. nms_pre = cfg.get('nms_pre', -1)
  297. mlvl_bbox_preds = []
  298. mlvl_valid_priors = []
  299. mlvl_scores = []
  300. mlvl_labels = []
  301. if with_score_factors:
  302. mlvl_score_factors = []
  303. else:
  304. mlvl_score_factors = None
  305. for level_idx, (cls_score, bbox_pred, score_factor, priors) in \
  306. enumerate(zip(cls_score_list, bbox_pred_list,
  307. score_factor_list, mlvl_priors)):
  308. assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
  309. dim = self.bbox_coder.encode_size
  310. bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)
  311. if with_score_factors:
  312. score_factor = score_factor.permute(1, 2,
  313. 0).reshape(-1).sigmoid()
  314. cls_score = cls_score.permute(1, 2,
  315. 0).reshape(-1, self.cls_out_channels)
  316. if self.use_sigmoid_cls:
  317. scores = cls_score.sigmoid()
  318. else:
  319. # remind that we set FG labels to [0, num_class-1]
  320. # since mmdet v2.0
  321. # BG cat_id: num_class
  322. scores = cls_score.softmax(-1)[:, :-1]
  323. # After https://github.com/open-mmlab/mmdetection/pull/6268/,
  324. # this operation keeps fewer bboxes under the same `nms_pre`.
  325. # There is no difference in performance for most models. If you
  326. # find a slight drop in performance, you can set a larger
  327. # `nms_pre` than before.
  328. score_thr = cfg.get('score_thr', 0)
  329. results = filter_scores_and_topk(
  330. scores, score_thr, nms_pre,
  331. dict(bbox_pred=bbox_pred, priors=priors))
  332. scores, labels, keep_idxs, filtered_results = results
  333. bbox_pred = filtered_results['bbox_pred']
  334. priors = filtered_results['priors']
  335. if with_score_factors:
  336. score_factor = score_factor[keep_idxs]
  337. mlvl_bbox_preds.append(bbox_pred)
  338. mlvl_valid_priors.append(priors)
  339. mlvl_scores.append(scores)
  340. mlvl_labels.append(labels)
  341. if with_score_factors:
  342. mlvl_score_factors.append(score_factor)
  343. bbox_pred = torch.cat(mlvl_bbox_preds)
  344. priors = cat_boxes(mlvl_valid_priors)
  345. bboxes = self.bbox_coder.decode(priors, bbox_pred, max_shape=img_shape)
  346. results = InstanceData()
  347. results.bboxes = bboxes
  348. results.scores = torch.cat(mlvl_scores)
  349. results.labels = torch.cat(mlvl_labels)
  350. if with_score_factors:
  351. results.score_factors = torch.cat(mlvl_score_factors)
  352. return self._bbox_post_process(
  353. results=results,
  354. cfg=cfg,
  355. rescale=rescale,
  356. with_nms=with_nms,
  357. img_meta=img_meta)
  358. def _bbox_post_process(self,
  359. results: InstanceData,
  360. cfg: ConfigDict,
  361. rescale: bool = False,
  362. with_nms: bool = True,
  363. img_meta: Optional[dict] = None) -> InstanceData:
  364. """bbox post-processing method.
  365. The boxes would be rescaled to the original image scale and do
  366. the nms operation. Usually `with_nms` is False is used for aug test.
  367. Args:
  368. results (:obj:`InstaceData`): Detection instance results,
  369. each item has shape (num_bboxes, ).
  370. cfg (ConfigDict): Test / postprocessing configuration,
  371. if None, test_cfg would be used.
  372. rescale (bool): If True, return boxes in original image space.
  373. Default to False.
  374. with_nms (bool): If True, do nms before return boxes.
  375. Default to True.
  376. img_meta (dict, optional): Image meta info. Defaults to None.
  377. Returns:
  378. :obj:`InstanceData`: Detection results of each image
  379. after the post process.
  380. Each item usually contains following keys.
  381. - scores (Tensor): Classification scores, has a shape
  382. (num_instance, )
  383. - labels (Tensor): Labels of bboxes, has a shape
  384. (num_instances, ).
  385. - bboxes (Tensor): Has a shape (num_instances, 4),
  386. the last dimension 4 arrange as (x1, y1, x2, y2).
  387. """
  388. if rescale:
  389. assert img_meta.get('scale_factor') is not None
  390. scale_factor = [1 / s for s in img_meta['scale_factor']]
  391. results.bboxes = scale_boxes(results.bboxes, scale_factor)
  392. if hasattr(results, 'score_factors'):
  393. # TODO: Add sqrt operation in order to be consistent with
  394. # the paper.
  395. score_factors = results.pop('score_factors')
  396. results.scores = results.scores * score_factors
  397. # filter small size bboxes
  398. if cfg.get('min_bbox_size', -1) >= 0:
  399. w, h = get_box_wh(results.bboxes)
  400. valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
  401. if not valid_mask.all():
  402. results = results[valid_mask]
  403. # TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg
  404. if with_nms and results.bboxes.numel() > 0:
  405. bboxes = get_box_tensor(results.bboxes)
  406. det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,
  407. results.labels, cfg.nms)
  408. results = results[keep_idxs]
  409. # some nms would reweight the score, such as softnms
  410. results.scores = det_bboxes[:, -1]
  411. results = results[:cfg.max_per_img]
  412. return results
  413. def aug_test(self,
  414. aug_batch_feats,
  415. aug_batch_img_metas,
  416. rescale=False,
  417. with_ori_nms=False,
  418. **kwargs):
  419. """Test function with test time augmentation.
  420. Args:
  421. aug_batch_feats (list[tuple[Tensor]]): The outer list
  422. indicates test-time augmentations and inner tuple
  423. indicate the multi-level feats from
  424. FPN, each Tensor should have a shape (B, C, H, W),
  425. aug_batch_img_metas (list[list[dict]]): Meta information
  426. of images under the different test-time augs
  427. (multiscale, flip, etc.). The outer list indicate
  428. the
  429. rescale (bool, optional): Whether to rescale the results.
  430. Defaults to False.
  431. with_ori_nms (bool): Whether execute the nms in original head.
  432. Defaults to False. It will be `True` when the head is
  433. adopted as `rpn_head`.
  434. Returns:
  435. list(obj:`InstanceData`): Detection results of the
  436. input images. Each item usually contains\
  437. following keys.
  438. - scores (Tensor): Classification scores, has a shape
  439. (num_instance,)
  440. - labels (Tensor): Labels of bboxes, has a shape
  441. (num_instances,).
  442. - bboxes (Tensor): Has a shape (num_instances, 4),
  443. the last dimension 4 arrange as (x1, y1, x2, y2).
  444. """
  445. # TODO: remove this for detr and deformdetr
  446. sig_of_get_results = signature(self.get_results)
  447. get_results_args = [
  448. p.name for p in sig_of_get_results.parameters.values()
  449. ]
  450. get_results_single_sig = signature(self._get_results_single)
  451. get_results_single_sig_args = [
  452. p.name for p in get_results_single_sig.parameters.values()
  453. ]
  454. assert ('with_nms' in get_results_args) and \
  455. ('with_nms' in get_results_single_sig_args), \
  456. f'{self.__class__.__name__}' \
  457. 'does not support test-time augmentation '
  458. num_imgs = len(aug_batch_img_metas[0])
  459. aug_batch_results = []
  460. for x, img_metas in zip(aug_batch_feats, aug_batch_img_metas):
  461. outs = self.forward(x)
  462. batch_instance_results = self.get_results(
  463. *outs,
  464. img_metas=img_metas,
  465. cfg=self.test_cfg,
  466. rescale=False,
  467. with_nms=with_ori_nms,
  468. **kwargs)
  469. aug_batch_results.append(batch_instance_results)
  470. # after merging, bboxes will be rescaled to the original image
  471. batch_results = merge_aug_results(aug_batch_results,
  472. aug_batch_img_metas)
  473. final_results = []
  474. for img_id in range(num_imgs):
  475. results = batch_results[img_id]
  476. det_bboxes, keep_idxs = batched_nms(results.bboxes, results.scores,
  477. results.labels,
  478. self.test_cfg.nms)
  479. results = results[keep_idxs]
  480. # some nms operation may reweight the score such as softnms
  481. results.scores = det_bboxes[:, -1]
  482. results = results[:self.test_cfg.max_per_img]
  483. if rescale:
  484. # all results have been mapped to the original scale
  485. # in `merge_aug_results`, so just pass
  486. pass
  487. else:
  488. # map to the first aug image scale
  489. scale_factor = results.bboxes.new_tensor(
  490. aug_batch_img_metas[0][img_id]['scale_factor'])
  491. results.bboxes = \
  492. results.bboxes * scale_factor
  493. final_results.append(results)
  494. return final_results