d2_wrapper.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. from typing import Union
  3. from mmengine.config import ConfigDict
  4. from mmengine.structures import InstanceData
  5. from torch import Tensor
  6. from mmdet.registry import MODELS
  7. from mmdet.structures import SampleList
  8. from mmdet.structures.bbox import BaseBoxes
  9. from mmdet.structures.mask import BitmapMasks, PolygonMasks
  10. from mmdet.utils import ConfigType
  11. from .base import BaseDetector
  12. try:
  13. import detectron2
  14. from detectron2.config import get_cfg
  15. from detectron2.modeling import build_model
  16. from detectron2.structures.masks import BitMasks as D2_BitMasks
  17. from detectron2.structures.masks import PolygonMasks as D2_PolygonMasks
  18. from detectron2.utils.events import EventStorage
  19. except ImportError:
  20. detectron2 = None
  21. def _to_cfgnode_list(cfg: ConfigType,
  22. config_list: list = [],
  23. father_name: str = 'MODEL') -> tuple:
  24. """Convert the key and value of mmengine.ConfigDict into a list.
  25. Args:
  26. cfg (ConfigDict): The detectron2 model config.
  27. config_list (list): A list contains the key and value of ConfigDict.
  28. Defaults to [].
  29. father_name (str): The father name add before the key.
  30. Defaults to "MODEL".
  31. Returns:
  32. tuple:
  33. - config_list: A list contains the key and value of ConfigDict.
  34. - father_name (str): The father name add before the key.
  35. Defaults to "MODEL".
  36. """
  37. for key, value in cfg.items():
  38. name = f'{father_name}.{key.upper()}'
  39. if isinstance(value, ConfigDict) or isinstance(value, dict):
  40. config_list, fater_name = \
  41. _to_cfgnode_list(value, config_list, name)
  42. else:
  43. config_list.append(name)
  44. config_list.append(value)
  45. return config_list, father_name
  46. def convert_d2_pred_to_datasample(data_samples: SampleList,
  47. d2_results_list: list) -> SampleList:
  48. """Convert the Detectron2's result to DetDataSample.
  49. Args:
  50. data_samples (list[:obj:`DetDataSample`]): The batch
  51. data samples. It usually includes information such
  52. as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
  53. d2_results_list (list): The list of the results of Detectron2's model.
  54. Returns:
  55. list[:obj:`DetDataSample`]: Detection results of the
  56. input images. Each DetDataSample usually contain
  57. 'pred_instances'. And the ``pred_instances`` usually
  58. contains following keys.
  59. - scores (Tensor): Classification scores, has a shape
  60. (num_instance, )
  61. - labels (Tensor): Labels of bboxes, has a shape
  62. (num_instances, ).
  63. - bboxes (Tensor): Has a shape (num_instances, 4),
  64. the last dimension 4 arrange as (x1, y1, x2, y2).
  65. """
  66. assert len(data_samples) == len(d2_results_list)
  67. for data_sample, d2_results in zip(data_samples, d2_results_list):
  68. d2_instance = d2_results['instances']
  69. results = InstanceData()
  70. results.bboxes = d2_instance.pred_boxes.tensor
  71. results.scores = d2_instance.scores
  72. results.labels = d2_instance.pred_classes
  73. if d2_instance.has('pred_masks'):
  74. results.masks = d2_instance.pred_masks
  75. data_sample.pred_instances = results
  76. return data_samples
  77. @MODELS.register_module()
  78. class Detectron2Wrapper(BaseDetector):
  79. """Wrapper of a Detectron2 model. Input/output formats of this class follow
  80. MMDetection's convention, so a Detectron2 model can be trained and
  81. evaluated in MMDetection.
  82. Args:
  83. detector (:obj:`ConfigDict` or dict): The module config of
  84. Detectron2.
  85. bgr_to_rgb (bool): whether to convert image from BGR to RGB.
  86. Defaults to False.
  87. rgb_to_bgr (bool): whether to convert image from RGB to BGR.
  88. Defaults to False.
  89. """
  90. def __init__(self,
  91. detector: ConfigType,
  92. bgr_to_rgb: bool = False,
  93. rgb_to_bgr: bool = False) -> None:
  94. if detectron2 is None:
  95. raise ImportError('Please install Detectron2 first')
  96. assert not (bgr_to_rgb and rgb_to_bgr), (
  97. '`bgr2rgb` and `rgb2bgr` cannot be set to True at the same time')
  98. super().__init__()
  99. self._channel_conversion = rgb_to_bgr or bgr_to_rgb
  100. cfgnode_list, _ = _to_cfgnode_list(detector)
  101. self.cfg = get_cfg()
  102. self.cfg.merge_from_list(cfgnode_list)
  103. self.d2_model = build_model(self.cfg)
  104. self.storage = EventStorage()
  105. def init_weights(self) -> None:
  106. """Initialization Backbone.
  107. NOTE: The initialization of other layers are in Detectron2,
  108. if users want to change the initialization way, please
  109. change the code in Detectron2.
  110. """
  111. from detectron2.checkpoint import DetectionCheckpointer
  112. checkpointer = DetectionCheckpointer(model=self.d2_model)
  113. checkpointer.load(self.cfg.MODEL.WEIGHTS, checkpointables=[])
  114. def loss(self, batch_inputs: Tensor,
  115. batch_data_samples: SampleList) -> Union[dict, tuple]:
  116. """Calculate losses from a batch of inputs and data samples.
  117. The inputs will first convert to the Detectron2 type and feed into
  118. D2 models.
  119. Args:
  120. batch_inputs (Tensor): Input images of shape (N, C, H, W).
  121. These should usually be mean centered and std scaled.
  122. batch_data_samples (list[:obj:`DetDataSample`]): The batch
  123. data samples. It usually includes information such
  124. as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
  125. Returns:
  126. dict: A dictionary of loss components.
  127. """
  128. d2_batched_inputs = self._convert_to_d2_inputs(
  129. batch_inputs=batch_inputs,
  130. batch_data_samples=batch_data_samples,
  131. training=True)
  132. with self.storage as storage: # noqa
  133. losses = self.d2_model(d2_batched_inputs)
  134. # storage contains some training information, such as cls_accuracy.
  135. # you can use storage.latest() to get the detail information
  136. return losses
  137. def predict(self, batch_inputs: Tensor,
  138. batch_data_samples: SampleList) -> SampleList:
  139. """Predict results from a batch of inputs and data samples with post-
  140. processing.
  141. The inputs will first convert to the Detectron2 type and feed into
  142. D2 models. And the results will convert back to the MMDet type.
  143. Args:
  144. batch_inputs (Tensor): Input images of shape (N, C, H, W).
  145. These should usually be mean centered and std scaled.
  146. batch_data_samples (list[:obj:`DetDataSample`]): The batch
  147. data samples. It usually includes information such
  148. as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
  149. Returns:
  150. list[:obj:`DetDataSample`]: Detection results of the
  151. input images. Each DetDataSample usually contain
  152. 'pred_instances'. And the ``pred_instances`` usually
  153. contains following keys.
  154. - scores (Tensor): Classification scores, has a shape
  155. (num_instance, )
  156. - labels (Tensor): Labels of bboxes, has a shape
  157. (num_instances, ).
  158. - bboxes (Tensor): Has a shape (num_instances, 4),
  159. the last dimension 4 arrange as (x1, y1, x2, y2).
  160. """
  161. d2_batched_inputs = self._convert_to_d2_inputs(
  162. batch_inputs=batch_inputs,
  163. batch_data_samples=batch_data_samples,
  164. training=False)
  165. # results in detectron2 has already rescale
  166. d2_results_list = self.d2_model(d2_batched_inputs)
  167. batch_data_samples = convert_d2_pred_to_datasample(
  168. data_samples=batch_data_samples, d2_results_list=d2_results_list)
  169. return batch_data_samples
  170. def _forward(self, *args, **kwargs):
  171. """Network forward process.
  172. Usually includes backbone, neck and head forward without any post-
  173. processing.
  174. """
  175. raise NotImplementedError(
  176. f'`_forward` is not implemented in {self.__class__.__name__}')
  177. def extract_feat(self, *args, **kwargs):
  178. """Extract features from images.
  179. `extract_feat` will not be used in obj:``Detectron2Wrapper``.
  180. """
  181. pass
  182. def _convert_to_d2_inputs(self,
  183. batch_inputs: Tensor,
  184. batch_data_samples: SampleList,
  185. training=True) -> list:
  186. """Convert inputs type to support Detectron2's model.
  187. Args:
  188. batch_inputs (Tensor): Input images of shape (N, C, H, W).
  189. These should usually be mean centered and std scaled.
  190. batch_data_samples (list[:obj:`DetDataSample`]): The batch
  191. data samples. It usually includes information such
  192. as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
  193. training (bool): Whether to enable training time processing.
  194. Returns:
  195. list[dict]: A list of dict, which will be fed into Detectron2's
  196. model. And the dict usually contains following keys.
  197. - image (Tensor): Image in (C, H, W) format.
  198. - instances (Instances): GT Instance.
  199. - height (int): the output height resolution of the model
  200. - width (int): the output width resolution of the model
  201. """
  202. from detectron2.data.detection_utils import filter_empty_instances
  203. from detectron2.structures import Boxes, Instances
  204. batched_d2_inputs = []
  205. for image, data_samples in zip(batch_inputs, batch_data_samples):
  206. d2_inputs = dict()
  207. # deal with metainfo
  208. meta_info = data_samples.metainfo
  209. d2_inputs['file_name'] = meta_info['img_path']
  210. d2_inputs['height'], d2_inputs['width'] = meta_info['ori_shape']
  211. d2_inputs['image_id'] = meta_info['img_id']
  212. # deal with image
  213. if self._channel_conversion:
  214. image = image[[2, 1, 0], ...]
  215. d2_inputs['image'] = image
  216. # deal with gt_instances
  217. gt_instances = data_samples.gt_instances
  218. d2_instances = Instances(meta_info['img_shape'])
  219. gt_boxes = gt_instances.bboxes
  220. # TODO: use mmdet.structures.box.get_box_tensor after PR 8658
  221. # has merged
  222. if isinstance(gt_boxes, BaseBoxes):
  223. gt_boxes = gt_boxes.tensor
  224. d2_instances.gt_boxes = Boxes(gt_boxes)
  225. d2_instances.gt_classes = gt_instances.labels
  226. if gt_instances.get('masks', None) is not None:
  227. gt_masks = gt_instances.masks
  228. if isinstance(gt_masks, PolygonMasks):
  229. d2_instances.gt_masks = D2_PolygonMasks(gt_masks.masks)
  230. elif isinstance(gt_masks, BitmapMasks):
  231. d2_instances.gt_masks = D2_BitMasks(gt_masks.masks)
  232. else:
  233. raise TypeError('The type of `gt_mask` can be '
  234. '`PolygonMasks` or `BitMasks`, but get '
  235. f'{type(gt_masks)}.')
  236. # convert to cpu and convert back to cuda to avoid
  237. # some potential error
  238. if training:
  239. device = gt_boxes.device
  240. d2_instances = filter_empty_instances(
  241. d2_instances.to('cpu')).to(device)
  242. d2_inputs['instances'] = d2_instances
  243. batched_d2_inputs.append(d2_inputs)
  244. return batched_d2_inputs