test_coco_wholebody_metric.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import copy
  3. import os.path as osp
  4. import tempfile
  5. from collections import defaultdict
  6. from unittest import TestCase
  7. import numpy as np
  8. from mmengine.fileio import dump, load
  9. from xtcocotools.coco import COCO
  10. from mmpose.datasets.datasets.utils import parse_pose_metainfo
  11. from mmpose.evaluation.metrics import CocoWholeBodyMetric
  12. class TestCocoWholeBodyMetric(TestCase):
  13. def setUp(self):
  14. """Setup some variables which are used in every test method.
  15. TestCase calls functions in this order: setUp() -> testMethod() ->
  16. tearDown() -> cleanUp()
  17. """
  18. self.tmp_dir = tempfile.TemporaryDirectory()
  19. self.ann_file_coco = 'tests/data/coco/test_coco_wholebody.json'
  20. meta_info_coco = dict(
  21. from_file='configs/_base_/datasets/coco_wholebody.py')
  22. self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco)
  23. self.coco = COCO(self.ann_file_coco)
  24. self.dataset_meta_coco['CLASSES'] = self.coco.loadCats(
  25. self.coco.getCatIds())
  26. self.topdown_data_coco = self._convert_ann_to_topdown_batch_data(
  27. self.ann_file_coco)
  28. assert len(self.topdown_data_coco) == 14
  29. self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data(
  30. self.ann_file_coco)
  31. assert len(self.bottomup_data_coco) == 4
  32. self.target_coco = {
  33. 'coco-wholebody/AP': 1.0,
  34. 'coco-wholebody/AP .5': 1.0,
  35. 'coco-wholebody/AP .75': 1.0,
  36. 'coco-wholebody/AP (M)': 1.0,
  37. 'coco-wholebody/AP (L)': 1.0,
  38. 'coco-wholebody/AR': 1.0,
  39. 'coco-wholebody/AR .5': 1.0,
  40. 'coco-wholebody/AR .75': 1.0,
  41. 'coco-wholebody/AR (M)': 1.0,
  42. 'coco-wholebody/AR (L)': 1.0,
  43. }
  44. def _convert_ann_to_topdown_batch_data(self, ann_file):
  45. """Convert annotations to topdown-style batch data."""
  46. topdown_data = []
  47. db = load(ann_file)
  48. imgid2info = dict()
  49. for img in db['images']:
  50. imgid2info[img['id']] = img
  51. for ann in db['annotations']:
  52. w, h = ann['bbox'][2], ann['bbox'][3]
  53. bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4)
  54. bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2)
  55. _keypoints = np.array(ann['keypoints'] + ann['foot_kpts'] +
  56. ann['face_kpts'] + ann['lefthand_kpts'] +
  57. ann['righthand_kpts']).reshape(1, -1, 3)
  58. gt_instances = {
  59. 'bbox_scales': bbox_scales,
  60. 'bbox_scores': np.ones((1, ), dtype=np.float32),
  61. 'bboxes': bboxes,
  62. }
  63. pred_instances = {
  64. 'keypoints': _keypoints[..., :2],
  65. 'keypoint_scores': _keypoints[..., -1],
  66. }
  67. data = {'inputs': None}
  68. data_sample = {
  69. 'id': ann['id'],
  70. 'img_id': ann['image_id'],
  71. 'category_id': ann.get('category_id', 1),
  72. 'gt_instances': gt_instances,
  73. 'pred_instances': pred_instances,
  74. # dummy image_shape for testing
  75. 'ori_shape': [640, 480],
  76. # store the raw annotation info to test without ann_file
  77. 'raw_ann_info': copy.deepcopy(ann),
  78. }
  79. # batch size = 1
  80. data_batch = [data]
  81. data_samples = [data_sample]
  82. topdown_data.append((data_batch, data_samples))
  83. return topdown_data
  84. def _convert_ann_to_bottomup_batch_data(self, ann_file):
  85. """Convert annotations to bottomup-style batch data."""
  86. img2ann = defaultdict(list)
  87. db = load(ann_file)
  88. for ann in db['annotations']:
  89. img2ann[ann['image_id']].append(ann)
  90. bottomup_data = []
  91. for img_id, anns in img2ann.items():
  92. _keypoints = []
  93. for ann in anns:
  94. _keypoints.append(ann['keypoints'] + ann['foot_kpts'] +
  95. ann['face_kpts'] + ann['lefthand_kpts'] +
  96. ann['righthand_kpts'])
  97. keypoints = np.array(_keypoints).reshape((len(anns), -1, 3))
  98. gt_instances = {
  99. 'bbox_scores': np.ones((len(anns)), dtype=np.float32)
  100. }
  101. pred_instances = {
  102. 'keypoints': keypoints[..., :2],
  103. 'keypoint_scores': keypoints[..., -1],
  104. }
  105. data = {'inputs': None}
  106. data_sample = {
  107. 'id': [ann['id'] for ann in anns],
  108. 'img_id': img_id,
  109. 'gt_instances': gt_instances,
  110. 'pred_instances': pred_instances
  111. }
  112. # batch size = 1
  113. data_batch = [data]
  114. data_samples = [data_sample]
  115. bottomup_data.append((data_batch, data_samples))
  116. return bottomup_data
  117. def tearDown(self):
  118. self.tmp_dir.cleanup()
  119. def test_init(self):
  120. """test metric init method."""
  121. # test score_mode option
  122. with self.assertRaisesRegex(ValueError,
  123. '`score_mode` should be one of'):
  124. _ = CocoWholeBodyMetric(
  125. ann_file=self.ann_file_coco, score_mode='invalid')
  126. # test nms_mode option
  127. with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'):
  128. _ = CocoWholeBodyMetric(
  129. ann_file=self.ann_file_coco, nms_mode='invalid')
  130. # test format_only option
  131. with self.assertRaisesRegex(
  132. AssertionError,
  133. '`outfile_prefix` can not be None when `format_only` is True'):
  134. _ = CocoWholeBodyMetric(
  135. ann_file=self.ann_file_coco,
  136. format_only=True,
  137. outfile_prefix=None)
  138. def test_other_methods(self):
  139. """test other useful methods."""
  140. # test `_sort_and_unique_bboxes` method
  141. metric_coco = CocoWholeBodyMetric(
  142. ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none')
  143. metric_coco.dataset_meta = self.dataset_meta_coco
  144. # process samples
  145. for data_batch, data_samples in self.topdown_data_coco:
  146. metric_coco.process(data_batch, data_samples)
  147. # process one extra sample
  148. data_batch, data_samples = self.topdown_data_coco[0]
  149. metric_coco.process(data_batch, data_samples)
  150. # an extra sample
  151. eval_results = metric_coco.evaluate(
  152. size=len(self.topdown_data_coco) + 1)
  153. self.assertDictEqual(eval_results, self.target_coco)
  154. def test_format_only(self):
  155. """test `format_only` option."""
  156. metric_coco = CocoWholeBodyMetric(
  157. ann_file=self.ann_file_coco,
  158. format_only=True,
  159. outfile_prefix=f'{self.tmp_dir.name}/test',
  160. score_mode='bbox_keypoint',
  161. nms_mode='oks_nms')
  162. metric_coco.dataset_meta = self.dataset_meta_coco
  163. # process one sample
  164. data_batch, data_samples = self.topdown_data_coco[0]
  165. metric_coco.process(data_batch, data_samples)
  166. eval_results = metric_coco.evaluate(size=1)
  167. self.assertDictEqual(eval_results, {})
  168. self.assertTrue(
  169. osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json')))
  170. # test when gt annotations are absent
  171. db_ = load(self.ann_file_coco)
  172. del db_['annotations']
  173. tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json')
  174. dump(db_, tmp_ann_file, sort_keys=True, indent=4)
  175. with self.assertRaisesRegex(
  176. AssertionError,
  177. 'Ground truth annotations are required for evaluation'):
  178. _ = CocoWholeBodyMetric(ann_file=tmp_ann_file, format_only=False)
  179. def test_bottomup_evaluate(self):
  180. """test bottomup-style COCO metric evaluation."""
  181. # case1: score_mode='bbox', nms_mode='none'
  182. metric_coco = CocoWholeBodyMetric(
  183. ann_file=self.ann_file_coco,
  184. outfile_prefix=f'{self.tmp_dir.name}/test',
  185. score_mode='bbox',
  186. nms_mode='none')
  187. metric_coco.dataset_meta = self.dataset_meta_coco
  188. # process samples
  189. for data_batch, data_samples in self.bottomup_data_coco:
  190. metric_coco.process(data_batch, data_samples)
  191. eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco))
  192. self.assertDictEqual(eval_results, self.target_coco)
  193. self.assertTrue(
  194. osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json')))
  195. def test_topdown_evaluate(self):
  196. """test topdown-style COCO metric evaluation."""
  197. # case 1: score_mode='bbox', nms_mode='none'
  198. metric_coco = CocoWholeBodyMetric(
  199. ann_file=self.ann_file_coco,
  200. outfile_prefix=f'{self.tmp_dir.name}/test1',
  201. score_mode='bbox',
  202. nms_mode='none')
  203. metric_coco.dataset_meta = self.dataset_meta_coco
  204. # process samples
  205. for data_batch, data_samples in self.topdown_data_coco:
  206. metric_coco.process(data_batch, data_samples)
  207. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  208. self.assertDictEqual(eval_results, self.target_coco)
  209. self.assertTrue(
  210. osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json')))
  211. # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms'
  212. metric_coco = CocoWholeBodyMetric(
  213. ann_file=self.ann_file_coco,
  214. outfile_prefix=f'{self.tmp_dir.name}/test2',
  215. score_mode='bbox_keypoint',
  216. nms_mode='oks_nms')
  217. metric_coco.dataset_meta = self.dataset_meta_coco
  218. # process samples
  219. for data_batch, data_samples in self.topdown_data_coco:
  220. metric_coco.process(data_batch, data_samples)
  221. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  222. self.assertDictEqual(eval_results, self.target_coco)
  223. self.assertTrue(
  224. osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json')))
  225. # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms'
  226. metric_coco = CocoWholeBodyMetric(
  227. ann_file=self.ann_file_coco,
  228. outfile_prefix=f'{self.tmp_dir.name}/test3',
  229. score_mode='bbox_rle',
  230. nms_mode='soft_oks_nms')
  231. metric_coco.dataset_meta = self.dataset_meta_coco
  232. # process samples
  233. for data_batch, data_samples in self.topdown_data_coco:
  234. metric_coco.process(data_batch, data_samples)
  235. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  236. self.assertDictEqual(eval_results, self.target_coco)
  237. self.assertTrue(
  238. osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json')))
  239. # case 4: test without providing ann_file
  240. metric_coco = CocoWholeBodyMetric(
  241. outfile_prefix=f'{self.tmp_dir.name}/test4')
  242. metric_coco.dataset_meta = self.dataset_meta_coco
  243. # process samples
  244. for data_batch, data_samples in self.topdown_data_coco:
  245. metric_coco.process(data_batch, data_samples)
  246. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  247. self.assertDictEqual(eval_results, self.target_coco)
  248. # test whether convert the annotation to COCO format
  249. self.assertTrue(
  250. osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json')))
  251. self.assertTrue(
  252. osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json')))