test_crowdhuman_metric.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. import os.path as osp
  2. import tempfile
  3. from unittest import TestCase
  4. import numpy as np
  5. import torch
  6. from mmdet.evaluation import CrowdHumanMetric
  7. class TestCrowdHumanMetric(TestCase):
  8. def _create_dummy_results(self):
  9. bboxes = np.array([[1330, 317, 418, 1338], [792, 24, 723, 2017],
  10. [693, 291, 307, 894], [522, 290, 285, 826],
  11. [728, 336, 175, 602], [92, 337, 267, 681]])
  12. bboxes[:, 2:4] += bboxes[:, 0:2]
  13. scores = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
  14. return dict(
  15. bboxes=torch.from_numpy(bboxes), scores=torch.from_numpy(scores))
  16. def setUp(self):
  17. self.tmp_dir = tempfile.TemporaryDirectory()
  18. self.ann_file_path = \
  19. 'tests/data/crowdhuman_dataset/test_annotation_train.odgt',
  20. def tearDown(self):
  21. self.tmp_dir.cleanup()
  22. def test_init(self):
  23. with self.assertRaisesRegex(KeyError, 'metric should be one of'):
  24. CrowdHumanMetric(ann_file=self.ann_file_path[0], metric='unknown')
  25. def test_evaluate(self):
  26. # create dummy data
  27. dummy_pred = self._create_dummy_results()
  28. crowdhuman_metric = CrowdHumanMetric(
  29. ann_file=self.ann_file_path[0],
  30. outfile_prefix=f'{self.tmp_dir.name}/test')
  31. crowdhuman_metric.process({}, [
  32. dict(
  33. pred_instances=dummy_pred,
  34. img_id='283554,35288000868e92d4',
  35. ori_shape=(1640, 1640))
  36. ])
  37. eval_results = crowdhuman_metric.evaluate(size=1)
  38. target = {
  39. 'crowd_human/mAP': 0.8333,
  40. 'crowd_human/mMR': 0.0,
  41. 'crowd_human/JI': 1.0
  42. }
  43. self.assertDictEqual(eval_results, target)
  44. self.assertTrue(osp.isfile(osp.join(self.tmp_dir.name, 'test.json')))