test_coco_metric.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import copy
  3. import os.path as osp
  4. import tempfile
  5. from collections import defaultdict
  6. from unittest import TestCase
  7. import numpy as np
  8. from mmengine.fileio import dump, load
  9. from xtcocotools.coco import COCO
  10. from mmpose.datasets.datasets.utils import parse_pose_metainfo
  11. from mmpose.evaluation.metrics import CocoMetric
  12. class TestCocoMetric(TestCase):
  13. def setUp(self):
  14. """Setup some variables which are used in every test method.
  15. TestCase calls functions in this order: setUp() -> testMethod() ->
  16. tearDown() -> cleanUp()
  17. """
  18. self.tmp_dir = tempfile.TemporaryDirectory()
  19. self.ann_file_coco = 'tests/data/coco/test_coco.json'
  20. meta_info_coco = dict(from_file='configs/_base_/datasets/coco.py')
  21. self.dataset_meta_coco = parse_pose_metainfo(meta_info_coco)
  22. self.coco = COCO(self.ann_file_coco)
  23. self.dataset_meta_coco['CLASSES'] = self.coco.loadCats(
  24. self.coco.getCatIds())
  25. self.topdown_data_coco = self._convert_ann_to_topdown_batch_data(
  26. self.ann_file_coco)
  27. assert len(self.topdown_data_coco) == 14
  28. self.bottomup_data_coco = self._convert_ann_to_bottomup_batch_data(
  29. self.ann_file_coco)
  30. assert len(self.bottomup_data_coco) == 4
  31. self.target_coco = {
  32. 'coco/AP': 1.0,
  33. 'coco/AP .5': 1.0,
  34. 'coco/AP .75': 1.0,
  35. 'coco/AP (M)': 1.0,
  36. 'coco/AP (L)': 1.0,
  37. 'coco/AR': 1.0,
  38. 'coco/AR .5': 1.0,
  39. 'coco/AR .75': 1.0,
  40. 'coco/AR (M)': 1.0,
  41. 'coco/AR (L)': 1.0,
  42. }
  43. self.ann_file_crowdpose = 'tests/data/crowdpose/test_crowdpose.json'
  44. self.coco_crowdpose = COCO(self.ann_file_crowdpose)
  45. meta_info_crowdpose = dict(
  46. from_file='configs/_base_/datasets/crowdpose.py')
  47. self.dataset_meta_crowdpose = parse_pose_metainfo(meta_info_crowdpose)
  48. self.dataset_meta_crowdpose['CLASSES'] = self.coco_crowdpose.loadCats(
  49. self.coco_crowdpose.getCatIds())
  50. self.topdown_data_crowdpose = self._convert_ann_to_topdown_batch_data(
  51. self.ann_file_crowdpose)
  52. assert len(self.topdown_data_crowdpose) == 5
  53. self.bottomup_data_crowdpose = \
  54. self._convert_ann_to_bottomup_batch_data(self.ann_file_crowdpose)
  55. assert len(self.bottomup_data_crowdpose) == 2
  56. self.target_crowdpose = {
  57. 'crowdpose/AP': 1.0,
  58. 'crowdpose/AP .5': 1.0,
  59. 'crowdpose/AP .75': 1.0,
  60. 'crowdpose/AR': 1.0,
  61. 'crowdpose/AR .5': 1.0,
  62. 'crowdpose/AR .75': 1.0,
  63. 'crowdpose/AP(E)': -1.0,
  64. 'crowdpose/AP(M)': 1.0,
  65. 'crowdpose/AP(H)': -1.0,
  66. }
  67. self.ann_file_ap10k = 'tests/data/ap10k/test_ap10k.json'
  68. self.coco_ap10k = COCO(self.ann_file_ap10k)
  69. meta_info_ap10k = dict(from_file='configs/_base_/datasets/ap10k.py')
  70. self.dataset_meta_ap10k = parse_pose_metainfo(meta_info_ap10k)
  71. self.dataset_meta_ap10k['CLASSES'] = self.coco_ap10k.loadCats(
  72. self.coco_ap10k.getCatIds())
  73. self.topdown_data_ap10k = self._convert_ann_to_topdown_batch_data(
  74. self.ann_file_ap10k)
  75. assert len(self.topdown_data_ap10k) == 2
  76. self.bottomup_data_ap10k = self._convert_ann_to_bottomup_batch_data(
  77. self.ann_file_ap10k)
  78. assert len(self.bottomup_data_ap10k) == 2
  79. self.target_ap10k = {
  80. 'coco/AP': 1.0,
  81. 'coco/AP .5': 1.0,
  82. 'coco/AP .75': 1.0,
  83. 'coco/AP (M)': -1.0,
  84. 'coco/AP (L)': 1.0,
  85. 'coco/AR': 1.0,
  86. 'coco/AR .5': 1.0,
  87. 'coco/AR .75': 1.0,
  88. 'coco/AR (M)': -1.0,
  89. 'coco/AR (L)': 1.0,
  90. }
  91. def _convert_ann_to_topdown_batch_data(self, ann_file):
  92. """Convert annotations to topdown-style batch data."""
  93. topdown_data = []
  94. db = load(ann_file)
  95. imgid2info = dict()
  96. for img in db['images']:
  97. imgid2info[img['id']] = img
  98. for ann in db['annotations']:
  99. w, h = ann['bbox'][2], ann['bbox'][3]
  100. bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4)
  101. bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2)
  102. keypoints = np.array(ann['keypoints']).reshape((1, -1, 3))
  103. gt_instances = {
  104. 'bbox_scales': bbox_scales,
  105. 'bbox_scores': np.ones((1, ), dtype=np.float32),
  106. 'bboxes': bboxes,
  107. }
  108. pred_instances = {
  109. 'keypoints': keypoints[..., :2],
  110. 'keypoint_scores': keypoints[..., -1],
  111. }
  112. data = {'inputs': None}
  113. data_sample = {
  114. 'id': ann['id'],
  115. 'img_id': ann['image_id'],
  116. 'category_id': ann.get('category_id', 1),
  117. 'gt_instances': gt_instances,
  118. 'pred_instances': pred_instances,
  119. # dummy image_shape for testing
  120. 'ori_shape': [640, 480],
  121. # store the raw annotation info to test without ann_file
  122. 'raw_ann_info': copy.deepcopy(ann),
  123. }
  124. # add crowd_index to data_sample if it is present in the image_info
  125. if 'crowdIndex' in imgid2info[ann['image_id']]:
  126. data_sample['crowd_index'] = imgid2info[
  127. ann['image_id']]['crowdIndex']
  128. # batch size = 1
  129. data_batch = [data]
  130. data_samples = [data_sample]
  131. topdown_data.append((data_batch, data_samples))
  132. return topdown_data
  133. def _convert_ann_to_bottomup_batch_data(self, ann_file):
  134. """Convert annotations to bottomup-style batch data."""
  135. img2ann = defaultdict(list)
  136. db = load(ann_file)
  137. for ann in db['annotations']:
  138. img2ann[ann['image_id']].append(ann)
  139. bottomup_data = []
  140. for img_id, anns in img2ann.items():
  141. keypoints = np.array([ann['keypoints'] for ann in anns]).reshape(
  142. (len(anns), -1, 3))
  143. gt_instances = {
  144. 'bbox_scores': np.ones((len(anns)), dtype=np.float32)
  145. }
  146. pred_instances = {
  147. 'keypoints': keypoints[..., :2],
  148. 'keypoint_scores': keypoints[..., -1],
  149. }
  150. data = {'inputs': None}
  151. data_sample = {
  152. 'id': [ann['id'] for ann in anns],
  153. 'img_id': img_id,
  154. 'gt_instances': gt_instances,
  155. 'pred_instances': pred_instances
  156. }
  157. # batch size = 1
  158. data_batch = [data]
  159. data_samples = [data_sample]
  160. bottomup_data.append((data_batch, data_samples))
  161. return bottomup_data
  162. def tearDown(self):
  163. self.tmp_dir.cleanup()
  164. def test_init(self):
  165. """test metric init method."""
  166. # test score_mode option
  167. with self.assertRaisesRegex(ValueError,
  168. '`score_mode` should be one of'):
  169. _ = CocoMetric(ann_file=self.ann_file_coco, score_mode='invalid')
  170. # test nms_mode option
  171. with self.assertRaisesRegex(ValueError, '`nms_mode` should be one of'):
  172. _ = CocoMetric(ann_file=self.ann_file_coco, nms_mode='invalid')
  173. # test format_only option
  174. with self.assertRaisesRegex(
  175. AssertionError,
  176. '`outfile_prefix` can not be None when `format_only` is True'):
  177. _ = CocoMetric(
  178. ann_file=self.ann_file_coco,
  179. format_only=True,
  180. outfile_prefix=None)
  181. def test_other_methods(self):
  182. """test other useful methods."""
  183. # test `_sort_and_unique_bboxes` method
  184. metric_coco = CocoMetric(
  185. ann_file=self.ann_file_coco, score_mode='bbox', nms_mode='none')
  186. metric_coco.dataset_meta = self.dataset_meta_coco
  187. # process samples
  188. for data_batch, data_samples in self.topdown_data_coco:
  189. metric_coco.process(data_batch, data_samples)
  190. # process one extra sample
  191. data_batch, data_samples = self.topdown_data_coco[0]
  192. metric_coco.process(data_batch, data_samples)
  193. # an extra sample
  194. eval_results = metric_coco.evaluate(
  195. size=len(self.topdown_data_coco) + 1)
  196. self.assertDictEqual(eval_results, self.target_coco)
  197. def test_format_only(self):
  198. """test `format_only` option."""
  199. metric_coco = CocoMetric(
  200. ann_file=self.ann_file_coco,
  201. format_only=True,
  202. outfile_prefix=f'{self.tmp_dir.name}/test',
  203. score_mode='bbox_keypoint',
  204. nms_mode='oks_nms')
  205. metric_coco.dataset_meta = self.dataset_meta_coco
  206. # process one sample
  207. data_batch, data_samples = self.topdown_data_coco[0]
  208. metric_coco.process(data_batch, data_samples)
  209. eval_results = metric_coco.evaluate(size=1)
  210. self.assertDictEqual(eval_results, {})
  211. self.assertTrue(
  212. osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json')))
  213. # test when gt annotations are absent
  214. db_ = load(self.ann_file_coco)
  215. del db_['annotations']
  216. tmp_ann_file = osp.join(self.tmp_dir.name, 'temp_ann.json')
  217. dump(db_, tmp_ann_file, sort_keys=True, indent=4)
  218. with self.assertRaisesRegex(
  219. AssertionError,
  220. 'Ground truth annotations are required for evaluation'):
  221. _ = CocoMetric(ann_file=tmp_ann_file, format_only=False)
  222. def test_bottomup_evaluate(self):
  223. """test bottomup-style COCO metric evaluation."""
  224. # case1: score_mode='bbox', nms_mode='none'
  225. metric_coco = CocoMetric(
  226. ann_file=self.ann_file_coco,
  227. outfile_prefix=f'{self.tmp_dir.name}/test',
  228. score_mode='bbox',
  229. nms_mode='none')
  230. metric_coco.dataset_meta = self.dataset_meta_coco
  231. # process samples
  232. for data_batch, data_samples in self.bottomup_data_coco:
  233. metric_coco.process(data_batch, data_samples)
  234. eval_results = metric_coco.evaluate(size=len(self.bottomup_data_coco))
  235. self.assertDictEqual(eval_results, self.target_coco)
  236. self.assertTrue(
  237. osp.isfile(osp.join(self.tmp_dir.name, 'test.keypoints.json')))
  238. def test_topdown_alignment(self):
  239. """Test whether the output of CocoMetric and the original
  240. TopDownCocoDataset are the same."""
  241. topdown_data = []
  242. db = load(self.ann_file_coco)
  243. for ann in db['annotations']:
  244. w, h = ann['bbox'][2], ann['bbox'][3]
  245. bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4)
  246. bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2)
  247. keypoints = np.array(
  248. ann['keypoints'], dtype=np.float32).reshape(1, -1, 3)
  249. keypoints[..., 0] = keypoints[..., 0] * 0.98
  250. keypoints[..., 1] = keypoints[..., 1] * 1.02
  251. keypoints[..., 2] = keypoints[..., 2] * 0.8
  252. gt_instances = {
  253. 'bbox_scales': bbox_scales,
  254. 'bbox_scores': np.ones((1, ), dtype=np.float32) * 0.98,
  255. 'bboxes': bboxes,
  256. }
  257. pred_instances = {
  258. 'keypoints': keypoints[..., :2],
  259. 'keypoint_scores': keypoints[..., -1],
  260. }
  261. data = {'inputs': None}
  262. data_sample = {
  263. 'id': ann['id'],
  264. 'img_id': ann['image_id'],
  265. 'gt_instances': gt_instances,
  266. 'pred_instances': pred_instances
  267. }
  268. # batch size = 1
  269. data_batch = [data]
  270. data_samples = [data_sample]
  271. topdown_data.append((data_batch, data_samples))
  272. # case 1:
  273. # typical setting: score_mode='bbox_keypoint', nms_mode='oks_nms'
  274. metric_coco = CocoMetric(
  275. ann_file=self.ann_file_coco,
  276. outfile_prefix=f'{self.tmp_dir.name}/test_align1',
  277. score_mode='bbox_keypoint',
  278. nms_mode='oks_nms')
  279. metric_coco.dataset_meta = self.dataset_meta_coco
  280. # process samples
  281. for data_batch, data_samples in topdown_data:
  282. metric_coco.process(data_batch, data_samples)
  283. eval_results = metric_coco.evaluate(size=len(topdown_data))
  284. target = {
  285. 'coco/AP': 0.5287458745874587,
  286. 'coco/AP .5': 0.9042904290429042,
  287. 'coco/AP .75': 0.5009900990099009,
  288. 'coco/AP (M)': 0.42475247524752474,
  289. 'coco/AP (L)': 0.6219554455445544,
  290. 'coco/AR': 0.5833333333333333,
  291. 'coco/AR .5': 0.9166666666666666,
  292. 'coco/AR .75': 0.5833333333333334,
  293. 'coco/AR (M)': 0.44000000000000006,
  294. 'coco/AR (L)': 0.6857142857142857,
  295. }
  296. for key in eval_results.keys():
  297. self.assertAlmostEqual(eval_results[key], target[key])
  298. self.assertTrue(
  299. osp.isfile(
  300. osp.join(self.tmp_dir.name, 'test_align1.keypoints.json')))
  301. # case 2: score_mode='bbox_rle', nms_mode='oks_nms'
  302. metric_coco = CocoMetric(
  303. ann_file=self.ann_file_coco,
  304. outfile_prefix=f'{self.tmp_dir.name}/test_align2',
  305. score_mode='bbox_rle',
  306. nms_mode='oks_nms')
  307. metric_coco.dataset_meta = self.dataset_meta_coco
  308. # process samples
  309. for data_batch, data_samples in topdown_data:
  310. metric_coco.process(data_batch, data_samples)
  311. eval_results = metric_coco.evaluate(size=len(topdown_data))
  312. target = {
  313. 'coco/AP': 0.5004950495049505,
  314. 'coco/AP .5': 0.8836633663366337,
  315. 'coco/AP .75': 0.4679867986798679,
  316. 'coco/AP (M)': 0.42475247524752474,
  317. 'coco/AP (L)': 0.5814108910891089,
  318. 'coco/AR': 0.5833333333333333,
  319. 'coco/AR .5': 0.9166666666666666,
  320. 'coco/AR .75': 0.5833333333333334,
  321. 'coco/AR (M)': 0.44000000000000006,
  322. 'coco/AR (L)': 0.6857142857142857,
  323. }
  324. for key in eval_results.keys():
  325. self.assertAlmostEqual(eval_results[key], target[key])
  326. self.assertTrue(
  327. osp.isfile(
  328. osp.join(self.tmp_dir.name, 'test_align2.keypoints.json')))
  329. # case 3: score_mode='bbox_keypoint', nms_mode='soft_oks_nms'
  330. topdown_data = []
  331. anns = db['annotations']
  332. for i, ann in enumerate(anns):
  333. w, h = ann['bbox'][2], ann['bbox'][3]
  334. bboxes = np.array(ann['bbox'], dtype=np.float32).reshape(-1, 4)
  335. bbox_scales = np.array([w * 1.25, h * 1.25]).reshape(-1, 2)
  336. keypoints = np.array(
  337. ann['keypoints'], dtype=np.float32).reshape(1, -1, 3)
  338. keypoints[..., 0] = keypoints[..., 0] * (1 - i / 100)
  339. keypoints[..., 1] = keypoints[..., 1] * (1 + i / 100)
  340. keypoints[..., 2] = keypoints[..., 2] * (1 - i / 100)
  341. gt_instances0 = {
  342. 'bbox_scales': bbox_scales,
  343. 'bbox_scores': np.ones((1, ), dtype=np.float32),
  344. 'bboxes': bboxes,
  345. }
  346. pred_instances0 = {
  347. 'keypoints': keypoints[..., :2],
  348. 'keypoint_scores': keypoints[..., -1],
  349. }
  350. data0 = {'inputs': None}
  351. data_sample0 = {
  352. 'id': ann['id'],
  353. 'img_id': ann['image_id'],
  354. 'gt_instances': gt_instances0,
  355. 'pred_instances': pred_instances0
  356. }
  357. keypoints = np.array(
  358. ann['keypoints'], dtype=np.float32).reshape(1, -1, 3)
  359. keypoints[..., 0] = keypoints[..., 0] * (1 + i / 100)
  360. keypoints[..., 1] = keypoints[..., 1] * (1 - i / 100)
  361. keypoints[..., 2] = keypoints[..., 2] * (1 - 2 * i / 100)
  362. gt_instances1 = {
  363. 'bbox_scales': bbox_scales,
  364. 'bboxes': bboxes,
  365. 'bbox_scores': np.ones(
  366. (1, ), dtype=np.float32) * (1 - 2 * i / 100)
  367. }
  368. pred_instances1 = {
  369. 'keypoints': keypoints[..., :2],
  370. 'keypoint_scores': keypoints[..., -1],
  371. }
  372. data1 = {'inputs': None}
  373. data_sample1 = {
  374. 'id': ann['id'] + 1,
  375. 'img_id': ann['image_id'],
  376. 'gt_instances': gt_instances1,
  377. 'pred_instances': pred_instances1
  378. }
  379. # batch size = 2
  380. data_batch = [data0, data1]
  381. data_samples = [data_sample0, data_sample1]
  382. topdown_data.append((data_batch, data_samples))
  383. metric_coco = CocoMetric(
  384. ann_file=self.ann_file_coco,
  385. outfile_prefix=f'{self.tmp_dir.name}/test_align3',
  386. score_mode='bbox_keypoint',
  387. keypoint_score_thr=0.2,
  388. nms_thr=0.9,
  389. nms_mode='soft_oks_nms')
  390. metric_coco.dataset_meta = self.dataset_meta_coco
  391. # process samples
  392. for data_batch, data_samples in topdown_data:
  393. metric_coco.process(data_batch, data_samples)
  394. eval_results = metric_coco.evaluate(size=len(topdown_data) * 2)
  395. target = {
  396. 'coco/AP': 0.17073707370737073,
  397. 'coco/AP .5': 0.25055005500550054,
  398. 'coco/AP .75': 0.10671067106710669,
  399. 'coco/AP (M)': 0.0,
  400. 'coco/AP (L)': 0.29315181518151806,
  401. 'coco/AR': 0.2416666666666666,
  402. 'coco/AR .5': 0.3333333333333333,
  403. 'coco/AR .75': 0.16666666666666666,
  404. 'coco/AR (M)': 0.0,
  405. 'coco/AR (L)': 0.41428571428571426,
  406. }
  407. for key in eval_results.keys():
  408. self.assertAlmostEqual(eval_results[key], target[key])
  409. self.assertTrue(
  410. osp.isfile(
  411. osp.join(self.tmp_dir.name, 'test_align3.keypoints.json')))
  412. def test_topdown_evaluate(self):
  413. """test topdown-style COCO metric evaluation."""
  414. # case 1: score_mode='bbox', nms_mode='none'
  415. metric_coco = CocoMetric(
  416. ann_file=self.ann_file_coco,
  417. outfile_prefix=f'{self.tmp_dir.name}/test1',
  418. score_mode='bbox',
  419. nms_mode='none')
  420. metric_coco.dataset_meta = self.dataset_meta_coco
  421. # process samples
  422. for data_batch, data_samples in self.topdown_data_coco:
  423. metric_coco.process(data_batch, data_samples)
  424. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  425. self.assertDictEqual(eval_results, self.target_coco)
  426. self.assertTrue(
  427. osp.isfile(osp.join(self.tmp_dir.name, 'test1.keypoints.json')))
  428. # case 2: score_mode='bbox_keypoint', nms_mode='oks_nms'
  429. metric_coco = CocoMetric(
  430. ann_file=self.ann_file_coco,
  431. outfile_prefix=f'{self.tmp_dir.name}/test2',
  432. score_mode='bbox_keypoint',
  433. nms_mode='oks_nms')
  434. metric_coco.dataset_meta = self.dataset_meta_coco
  435. # process samples
  436. for data_batch, data_samples in self.topdown_data_coco:
  437. metric_coco.process(data_batch, data_samples)
  438. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  439. self.assertDictEqual(eval_results, self.target_coco)
  440. self.assertTrue(
  441. osp.isfile(osp.join(self.tmp_dir.name, 'test2.keypoints.json')))
  442. # case 3: score_mode='bbox_rle', nms_mode='soft_oks_nms'
  443. metric_coco = CocoMetric(
  444. ann_file=self.ann_file_coco,
  445. outfile_prefix=f'{self.tmp_dir.name}/test3',
  446. score_mode='bbox_rle',
  447. nms_mode='soft_oks_nms')
  448. metric_coco.dataset_meta = self.dataset_meta_coco
  449. # process samples
  450. for data_batch, data_samples in self.topdown_data_coco:
  451. metric_coco.process(data_batch, data_samples)
  452. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  453. self.assertDictEqual(eval_results, self.target_coco)
  454. self.assertTrue(
  455. osp.isfile(osp.join(self.tmp_dir.name, 'test3.keypoints.json')))
  456. # case 4: test without providing ann_file
  457. metric_coco = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test4')
  458. metric_coco.dataset_meta = self.dataset_meta_coco
  459. # process samples
  460. for data_batch, data_samples in self.topdown_data_coco:
  461. metric_coco.process(data_batch, data_samples)
  462. eval_results = metric_coco.evaluate(size=len(self.topdown_data_coco))
  463. self.assertDictEqual(eval_results, self.target_coco)
  464. # test whether convert the annotation to COCO format
  465. self.assertTrue(
  466. osp.isfile(osp.join(self.tmp_dir.name, 'test4.gt.json')))
  467. self.assertTrue(
  468. osp.isfile(osp.join(self.tmp_dir.name, 'test4.keypoints.json')))
  469. # case 5: test Crowdpose dataset
  470. metric_crowdpose = CocoMetric(
  471. ann_file=self.ann_file_crowdpose,
  472. outfile_prefix=f'{self.tmp_dir.name}/test5',
  473. use_area=False,
  474. iou_type='keypoints_crowd',
  475. prefix='crowdpose')
  476. metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose
  477. # process samples
  478. for data_batch, data_samples in self.topdown_data_crowdpose:
  479. metric_crowdpose.process(data_batch, data_samples)
  480. eval_results = metric_crowdpose.evaluate(
  481. size=len(self.topdown_data_crowdpose))
  482. self.assertDictEqual(eval_results, self.target_crowdpose)
  483. self.assertTrue(
  484. osp.isfile(osp.join(self.tmp_dir.name, 'test5.keypoints.json')))
  485. # case 6: test Crowdpose dataset + without ann_file
  486. metric_crowdpose = CocoMetric(
  487. outfile_prefix=f'{self.tmp_dir.name}/test6',
  488. use_area=False,
  489. iou_type='keypoints_crowd',
  490. prefix='crowdpose')
  491. metric_crowdpose.dataset_meta = self.dataset_meta_crowdpose
  492. # process samples
  493. for data_batch, data_samples in self.topdown_data_crowdpose:
  494. metric_crowdpose.process(data_batch, data_samples)
  495. eval_results = metric_crowdpose.evaluate(
  496. size=len(self.topdown_data_crowdpose))
  497. self.assertDictEqual(eval_results, self.target_crowdpose)
  498. # test whether convert the annotation to COCO format
  499. self.assertTrue(
  500. osp.isfile(osp.join(self.tmp_dir.name, 'test6.gt.json')))
  501. self.assertTrue(
  502. osp.isfile(osp.join(self.tmp_dir.name, 'test6.keypoints.json')))
  503. # case 7: test AP10k dataset
  504. metric_ap10k = CocoMetric(
  505. ann_file=self.ann_file_ap10k,
  506. outfile_prefix=f'{self.tmp_dir.name}/test7')
  507. metric_ap10k.dataset_meta = self.dataset_meta_ap10k
  508. # process samples
  509. for data_batch, data_samples in self.topdown_data_ap10k:
  510. metric_ap10k.process(data_batch, data_samples)
  511. eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k))
  512. for key in self.target_ap10k:
  513. self.assertAlmostEqual(eval_results[key], self.target_ap10k[key])
  514. self.assertTrue(
  515. osp.isfile(osp.join(self.tmp_dir.name, 'test7.keypoints.json')))
  516. # case 8: test Crowdpose dataset + without ann_file
  517. metric_ap10k = CocoMetric(outfile_prefix=f'{self.tmp_dir.name}/test8')
  518. metric_ap10k.dataset_meta = self.dataset_meta_ap10k
  519. # process samples
  520. for data_batch, data_samples in self.topdown_data_ap10k:
  521. metric_ap10k.process(data_batch, data_samples)
  522. eval_results = metric_ap10k.evaluate(size=len(self.topdown_data_ap10k))
  523. for key in self.target_ap10k:
  524. self.assertAlmostEqual(eval_results[key], self.target_ap10k[key])
  525. # test whether convert the annotation to COCO format
  526. self.assertTrue(
  527. osp.isfile(osp.join(self.tmp_dir.name, 'test8.gt.json')))
  528. self.assertTrue(
  529. osp.isfile(osp.join(self.tmp_dir.name, 'test8.keypoints.json')))