test_benchmark.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. import copy
  2. import os
  3. import tempfile
  4. import unittest
  5. import torch
  6. from mmengine import Config, MMLogger
  7. from mmengine.dataset import Compose
  8. from mmengine.model import BaseModel
  9. from torch.utils.data import Dataset
  10. from mmdet.registry import DATASETS, MODELS
  11. from mmdet.utils import register_all_modules
  12. from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
  13. InferenceBenchmark)
  14. @MODELS.register_module()
  15. class ToyDetector(BaseModel):
  16. def __init__(self, *args, **kwargs):
  17. super().__init__()
  18. def forward(self, *args, **kwargs):
  19. pass
  20. @DATASETS.register_module()
  21. class ToyDataset(Dataset):
  22. METAINFO = dict() # type: ignore
  23. data = torch.randn(12, 2)
  24. label = torch.ones(12)
  25. def __init__(self):
  26. self.pipeline = Compose([lambda x: x])
  27. def __len__(self):
  28. return self.data.size(0)
  29. def get_data_info(self, index):
  30. return dict(inputs=self.data[index], data_sample=self.label[index])
  31. def __getitem__(self, index):
  32. return dict(inputs=self.data[index], data_sample=self.label[index])
  33. @DATASETS.register_module()
  34. class ToyFullInitDataset(Dataset):
  35. METAINFO = dict() # type: ignore
  36. data = torch.randn(12, 2)
  37. label = torch.ones(12)
  38. def __init__(self):
  39. self.pipeline = Compose([lambda x: x])
  40. def __len__(self):
  41. return self.data.size(0)
  42. def get_data_info(self, index):
  43. return dict(inputs=self.data[index], data_sample=self.label[index])
  44. def full_init(self):
  45. pass
  46. def __getitem__(self, index):
  47. return dict(inputs=self.data[index], data_sample=self.label[index])
  48. class TestInferenceBenchmark(unittest.TestCase):
  49. def setUp(self) -> None:
  50. register_all_modules()
  51. self.cfg = Config(
  52. dict(
  53. model=dict(type='ToyDetector'),
  54. test_dataloader=dict(
  55. dataset=dict(type='ToyDataset'),
  56. sampler=dict(type='DefaultSampler', shuffle=False),
  57. batch_size=3,
  58. num_workers=1),
  59. env_cfg=dict(dist_cfg=dict(backend='nccl'))))
  60. self.max_iter = 10
  61. self.log_interval = 5
  62. @unittest.skipIf(not torch.cuda.is_available(),
  63. 'test requires GPU and torch+cuda')
  64. def test_init_and_run(self):
  65. checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
  66. torch.save(ToyDetector().state_dict(), checkpoint_path)
  67. cfg = copy.deepcopy(self.cfg)
  68. inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
  69. False, self.max_iter,
  70. self.log_interval)
  71. results = inference_benchmark.run()
  72. self.assertTrue(isinstance(results, dict))
  73. self.assertTrue('avg_fps' in results)
  74. self.assertTrue('fps_list' in results)
  75. self.assertEqual(len(results['fps_list']), 1)
  76. self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
  77. self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
  78. results = inference_benchmark.run(1)
  79. self.assertTrue('avg_fps' in results)
  80. self.assertTrue('fps_list' in results)
  81. self.assertEqual(len(results['fps_list']), 1)
  82. self.assertTrue(inference_benchmark.data_loader.num_workers == 0)
  83. self.assertTrue(inference_benchmark.data_loader.batch_size == 1)
  84. # test repeat
  85. results = inference_benchmark.run(3)
  86. self.assertTrue('avg_fps' in results)
  87. self.assertTrue('fps_list' in results)
  88. self.assertEqual(len(results['fps_list']), 3)
  89. # test cudnn_benchmark
  90. cfg = copy.deepcopy(self.cfg)
  91. cfg.env_cfg.cudnn_benchmark = True
  92. inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
  93. False, self.max_iter,
  94. self.log_interval)
  95. inference_benchmark.run(1)
  96. # test mp_cfg
  97. cfg = copy.deepcopy(self.cfg)
  98. cfg.env_cfg.cudnn_benchmark = True
  99. cfg.env_cfg.mp_cfg = {
  100. 'mp_start_method': 'fork',
  101. 'opencv_num_threads': 1
  102. }
  103. inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
  104. False, self.max_iter,
  105. self.log_interval)
  106. inference_benchmark.run(1)
  107. # test fp16
  108. cfg = copy.deepcopy(self.cfg)
  109. cfg.fp16 = True
  110. inference_benchmark = InferenceBenchmark(cfg, checkpoint_path, False,
  111. False, self.max_iter,
  112. self.log_interval)
  113. inference_benchmark.run(1)
  114. # test logger
  115. logger = MMLogger.get_instance(
  116. 'mmdet', log_file='temp.log', log_level='INFO')
  117. inference_benchmark = InferenceBenchmark(
  118. cfg,
  119. checkpoint_path,
  120. False,
  121. False,
  122. self.max_iter,
  123. self.log_interval,
  124. logger=logger)
  125. inference_benchmark.run(1)
  126. self.assertTrue(os.path.exists('temp.log'))
  127. os.remove(checkpoint_path)
  128. os.remove('temp.log')
  129. class TestDataLoaderBenchmark(unittest.TestCase):
  130. def setUp(self) -> None:
  131. register_all_modules()
  132. self.cfg = Config(
  133. dict(
  134. model=dict(type='ToyDetector'),
  135. train_dataloader=dict(
  136. dataset=dict(type='ToyDataset'),
  137. sampler=dict(type='DefaultSampler', shuffle=True),
  138. batch_size=2,
  139. num_workers=1),
  140. val_dataloader=dict(
  141. dataset=dict(type='ToyDataset'),
  142. sampler=dict(type='DefaultSampler', shuffle=False),
  143. batch_size=1,
  144. num_workers=2),
  145. test_dataloader=dict(
  146. dataset=dict(type='ToyDataset'),
  147. sampler=dict(type='DefaultSampler', shuffle=False),
  148. batch_size=3,
  149. num_workers=1),
  150. env_cfg=dict(dist_cfg=dict(backend='nccl'))))
  151. self.max_iter = 5
  152. self.log_interval = 1
  153. self.num_warmup = 1
  154. def test_init_and_run(self):
  155. cfg = copy.deepcopy(self.cfg)
  156. dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
  157. self.max_iter,
  158. self.log_interval,
  159. self.num_warmup)
  160. results = dataloader_benchmark.run(1)
  161. self.assertTrue('avg_fps' in results)
  162. self.assertTrue('fps_list' in results)
  163. self.assertEqual(len(results['fps_list']), 1)
  164. self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
  165. self.assertTrue(dataloader_benchmark.data_loader.batch_size == 2)
  166. # test repeat
  167. results = dataloader_benchmark.run(3)
  168. self.assertTrue('avg_fps' in results)
  169. self.assertTrue('fps_list' in results)
  170. self.assertEqual(len(results['fps_list']), 3)
  171. # test dataset_type input parameters error
  172. with self.assertRaises(AssertionError):
  173. DataLoaderBenchmark(cfg, False, 'training', self.max_iter,
  174. self.log_interval, self.num_warmup)
  175. dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'val',
  176. self.max_iter,
  177. self.log_interval,
  178. self.num_warmup)
  179. self.assertTrue(dataloader_benchmark.data_loader.num_workers == 2)
  180. self.assertTrue(dataloader_benchmark.data_loader.batch_size == 1)
  181. dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'test',
  182. self.max_iter,
  183. self.log_interval,
  184. self.num_warmup)
  185. self.assertTrue(dataloader_benchmark.data_loader.num_workers == 1)
  186. self.assertTrue(dataloader_benchmark.data_loader.batch_size == 3)
  187. # test mp_cfg
  188. cfg = copy.deepcopy(self.cfg)
  189. cfg.env_cfg.mp_cfg = {
  190. 'mp_start_method': 'fork',
  191. 'opencv_num_threads': 1
  192. }
  193. dataloader_benchmark = DataLoaderBenchmark(cfg, False, 'train',
  194. self.max_iter,
  195. self.log_interval,
  196. self.num_warmup)
  197. dataloader_benchmark.run(1)
  198. class TestDatasetBenchmark(unittest.TestCase):
  199. def setUp(self) -> None:
  200. register_all_modules()
  201. self.cfg = Config(
  202. dict(
  203. model=dict(type='ToyDetector'),
  204. train_dataloader=dict(
  205. dataset=dict(type='ToyDataset'),
  206. sampler=dict(type='DefaultSampler', shuffle=True),
  207. batch_size=2,
  208. num_workers=1),
  209. val_dataloader=dict(
  210. dataset=dict(type='ToyDataset'),
  211. sampler=dict(type='DefaultSampler', shuffle=False),
  212. batch_size=1,
  213. num_workers=2),
  214. test_dataloader=dict(
  215. dataset=dict(type='ToyDataset'),
  216. sampler=dict(type='DefaultSampler', shuffle=False),
  217. batch_size=3,
  218. num_workers=1)))
  219. self.max_iter = 5
  220. self.log_interval = 1
  221. self.num_warmup = 1
  222. def test_init_and_run(self):
  223. cfg = copy.deepcopy(self.cfg)
  224. dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
  225. self.log_interval,
  226. self.num_warmup)
  227. results = dataset_benchmark.run(1)
  228. self.assertTrue('avg_fps' in results)
  229. self.assertTrue('fps_list' in results)
  230. self.assertEqual(len(results['fps_list']), 1)
  231. # test repeat
  232. results = dataset_benchmark.run(3)
  233. self.assertTrue('avg_fps' in results)
  234. self.assertTrue('fps_list' in results)
  235. self.assertEqual(len(results['fps_list']), 3)
  236. # test test dataset
  237. dataset_benchmark = DatasetBenchmark(cfg, 'test', self.max_iter,
  238. self.log_interval,
  239. self.num_warmup)
  240. dataset_benchmark.run(1)
  241. # test val dataset
  242. dataset_benchmark = DatasetBenchmark(cfg, 'val', self.max_iter,
  243. self.log_interval,
  244. self.num_warmup)
  245. dataset_benchmark.run(1)
  246. # test dataset_type input parameters error
  247. with self.assertRaises(AssertionError):
  248. DatasetBenchmark(cfg, 'training', self.max_iter, self.log_interval,
  249. self.num_warmup)
  250. # test full_init
  251. cfg = copy.deepcopy(self.cfg)
  252. cfg.test_dataloader.dataset = dict(type='ToyFullInitDataset')
  253. dataset_benchmark = DatasetBenchmark(cfg, 'train', self.max_iter,
  254. self.log_interval,
  255. self.num_warmup)
  256. dataset_benchmark.run(1)