test_topdown.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import unittest
  3. from unittest import TestCase
  4. import torch
  5. from parameterized import parameterized
  6. from mmpose.structures import PoseDataSample
  7. from mmpose.testing import get_packed_inputs, get_pose_estimator_cfg
  8. from mmpose.utils import register_all_modules
  9. configs = [
  10. 'body_2d_keypoint/topdown_heatmap/coco/'
  11. 'td-hm_hrnet-w32_8xb64-210e_coco-256x192.py',
  12. 'configs/body_2d_keypoint/topdown_regression/coco/'
  13. 'td-reg_res50_8xb64-210e_coco-256x192.py',
  14. 'configs/body_2d_keypoint/simcc/coco/'
  15. 'simcc_mobilenetv2_wo-deconv-8xb64-210e_coco-256x192.py',
  16. ]
  17. configs_with_devices = [(config, ('cpu', 'cuda')) for config in configs]
  18. class TestTopdownPoseEstimator(TestCase):
  19. def setUp(self) -> None:
  20. register_all_modules()
  21. @parameterized.expand(configs)
  22. def test_init(self, config):
  23. model_cfg = get_pose_estimator_cfg(config)
  24. model_cfg.backbone.init_cfg = None
  25. from mmpose.models import build_pose_estimator
  26. model = build_pose_estimator(model_cfg)
  27. self.assertTrue(model.backbone)
  28. self.assertTrue(model.head)
  29. if model_cfg.get('neck', None):
  30. self.assertTrue(model.neck)
  31. @parameterized.expand(configs_with_devices)
  32. def test_forward_loss(self, config, devices):
  33. model_cfg = get_pose_estimator_cfg(config)
  34. model_cfg.backbone.init_cfg = None
  35. from mmpose.models import build_pose_estimator
  36. for device in devices:
  37. model = build_pose_estimator(model_cfg)
  38. if device == 'cuda':
  39. if not torch.cuda.is_available():
  40. return unittest.skip('test requires GPU and torch+cuda')
  41. model = model.cuda()
  42. packed_inputs = get_packed_inputs(2)
  43. data = model.data_preprocessor(packed_inputs, training=True)
  44. losses = model.forward(**data, mode='loss')
  45. self.assertIsInstance(losses, dict)
  46. @parameterized.expand(configs_with_devices)
  47. def test_forward_predict(self, config, devices):
  48. model_cfg = get_pose_estimator_cfg(config)
  49. model_cfg.backbone.init_cfg = None
  50. from mmpose.models import build_pose_estimator
  51. for device in devices:
  52. model = build_pose_estimator(model_cfg)
  53. if device == 'cuda':
  54. if not torch.cuda.is_available():
  55. return unittest.skip('test requires GPU and torch+cuda')
  56. model = model.cuda()
  57. packed_inputs = get_packed_inputs(2)
  58. model.eval()
  59. with torch.no_grad():
  60. data = model.data_preprocessor(packed_inputs, training=True)
  61. batch_results = model.forward(**data, mode='predict')
  62. self.assertEqual(len(batch_results), 2)
  63. self.assertIsInstance(batch_results[0], PoseDataSample)
  64. @parameterized.expand(configs_with_devices)
  65. def test_forward_tensor(self, config, devices):
  66. model_cfg = get_pose_estimator_cfg(config)
  67. model_cfg.backbone.init_cfg = None
  68. from mmpose.models import build_pose_estimator
  69. for device in devices:
  70. model = build_pose_estimator(model_cfg)
  71. if device == 'cuda':
  72. if not torch.cuda.is_available():
  73. return unittest.skip('test requires GPU and torch+cuda')
  74. model = model.cuda()
  75. packed_inputs = get_packed_inputs(2)
  76. data = model.data_preprocessor(packed_inputs, training=True)
  77. batch_results = model.forward(**data, mode='tensor')
  78. self.assertIsInstance(batch_results, (tuple, torch.Tensor))