analyze_results.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import argparse
  3. import os.path as osp
  4. from multiprocessing import Pool
  5. import mmcv
  6. import numpy as np
  7. from mmengine.config import Config, DictAction
  8. from mmengine.fileio import load
  9. from mmengine.registry import init_default_scope
  10. from mmengine.runner import Runner
  11. from mmengine.structures import InstanceData, PixelData
  12. from mmengine.utils import ProgressBar, check_file_exist, mkdir_or_exist
  13. from mmdet.datasets import get_loading_pipeline
  14. from mmdet.evaluation import eval_map
  15. from mmdet.registry import DATASETS, RUNNERS
  16. from mmdet.structures import DetDataSample
  17. from mmdet.utils import replace_cfg_vals, update_data_root
  18. from mmdet.visualization import DetLocalVisualizer
  19. def bbox_map_eval(det_result, annotation, nproc=4):
  20. """Evaluate mAP of single image det result.
  21. Args:
  22. det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
  23. The outer list indicates images, and the inner list indicates
  24. per-class detected bboxes.
  25. annotation (dict): Ground truth annotations where keys of
  26. annotations are:
  27. - bboxes: numpy array of shape (n, 4)
  28. - labels: numpy array of shape (n, )
  29. - bboxes_ignore (optional): numpy array of shape (k, 4)
  30. - labels_ignore (optional): numpy array of shape (k, )
  31. nproc (int): Processes used for computing mAP.
  32. Default: 4.
  33. Returns:
  34. float: mAP
  35. """
  36. # use only bbox det result
  37. if isinstance(det_result, tuple):
  38. bbox_det_result = [det_result[0]]
  39. else:
  40. bbox_det_result = [det_result]
  41. # mAP
  42. iou_thrs = np.linspace(
  43. .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
  44. processes = []
  45. workers = Pool(processes=nproc)
  46. for thr in iou_thrs:
  47. p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), {
  48. 'iou_thr': thr,
  49. 'logger': 'silent',
  50. 'nproc': 1
  51. })
  52. processes.append(p)
  53. workers.close()
  54. workers.join()
  55. mean_aps = []
  56. for p in processes:
  57. mean_aps.append(p.get()[0])
  58. return sum(mean_aps) / len(mean_aps)
  59. class ResultVisualizer:
  60. """Display and save evaluation results.
  61. Args:
  62. show (bool): Whether to show the image. Default: True.
  63. wait_time (float): Value of waitKey param. Default: 0.
  64. score_thr (float): Minimum score of bboxes to be shown.
  65. Default: 0.
  66. runner (:obj:`Runner`): The runner of the visualization process.
  67. """
  68. def __init__(self, show=False, wait_time=0, score_thr=0, runner=None):
  69. self.show = show
  70. self.wait_time = wait_time
  71. self.score_thr = score_thr
  72. self.visualizer = DetLocalVisualizer()
  73. self.runner = runner
  74. self.evaluator = runner.test_evaluator
  75. def _save_image_gts_results(self,
  76. dataset,
  77. results,
  78. performances,
  79. out_dir=None,
  80. task='det'):
  81. """Display or save image with groung truths and predictions from a
  82. model.
  83. Args:
  84. dataset (Dataset): A PyTorch dataset.
  85. results (list): Object detection or panoptic segmentation
  86. results from test results pkl file.
  87. performances (dict): A dict contains samples's indices
  88. in dataset and model's performance on them.
  89. out_dir (str, optional): The filename to write the image.
  90. Defaults: None.
  91. task (str): The task to be performed. Defaults: 'det'
  92. """
  93. mkdir_or_exist(out_dir)
  94. for performance_info in performances:
  95. index, performance = performance_info
  96. data_info = dataset[index]
  97. data_info['gt_instances'] = data_info['instances']
  98. # calc save file path
  99. filename = data_info['img_path']
  100. fname, name = osp.splitext(osp.basename(filename))
  101. save_filename = fname + '_' + str(round(performance, 3)) + name
  102. out_file = osp.join(out_dir, save_filename)
  103. if task == 'det':
  104. gt_instances = InstanceData()
  105. gt_instances.bboxes = results[index]['gt_instances']['bboxes']
  106. gt_instances.labels = results[index]['gt_instances']['labels']
  107. pred_instances = InstanceData()
  108. pred_instances.bboxes = results[index]['pred_instances'][
  109. 'bboxes']
  110. pred_instances.labels = results[index]['pred_instances'][
  111. 'labels']
  112. pred_instances.scores = results[index]['pred_instances'][
  113. 'scores']
  114. data_samples = DetDataSample()
  115. data_samples.pred_instances = pred_instances
  116. data_samples.gt_instances = gt_instances
  117. elif task == 'seg':
  118. gt_panoptic_seg = PixelData()
  119. gt_panoptic_seg.sem_seg = results[index]['gt_seg_map']
  120. pred_panoptic_seg = PixelData()
  121. pred_panoptic_seg.sem_seg = results[index][
  122. 'pred_panoptic_seg']['sem_seg']
  123. data_samples = DetDataSample()
  124. data_samples.pred_panoptic_seg = pred_panoptic_seg
  125. data_samples.gt_panoptic_seg = gt_panoptic_seg
  126. img = mmcv.imread(filename, channel_order='rgb')
  127. self.visualizer.add_datasample(
  128. 'image',
  129. img,
  130. data_samples,
  131. show=self.show,
  132. draw_gt=False,
  133. pred_score_thr=self.score_thr,
  134. out_file=out_file)
  135. def evaluate_and_show(self,
  136. dataset,
  137. results,
  138. topk=20,
  139. show_dir='work_dir'):
  140. """Evaluate and show results.
  141. Args:
  142. dataset (Dataset): A PyTorch dataset.
  143. results (list): Object detection or panoptic segmentation
  144. results from test results pkl file.
  145. topk (int): Number of the highest topk and
  146. lowest topk after evaluation index sorting. Default: 20.
  147. show_dir (str, optional): The filename to write the image.
  148. Default: 'work_dir'
  149. """
  150. self.visualizer.dataset_meta = dataset.metainfo
  151. assert topk > 0
  152. if (topk * 2) > len(dataset):
  153. topk = len(dataset) // 2
  154. good_dir = osp.abspath(osp.join(show_dir, 'good'))
  155. bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
  156. if 'pred_panoptic_seg' in results[0].keys():
  157. good_samples, bad_samples = self.panoptic_evaluate(
  158. dataset, results, topk=topk)
  159. self._save_image_gts_results(
  160. dataset, results, good_samples, good_dir, task='seg')
  161. self._save_image_gts_results(
  162. dataset, results, bad_samples, bad_dir, task='seg')
  163. elif 'pred_instances' in results[0].keys():
  164. good_samples, bad_samples = self.detection_evaluate(
  165. dataset, results, topk=topk)
  166. self._save_image_gts_results(
  167. dataset, results, good_samples, good_dir, task='det')
  168. self._save_image_gts_results(
  169. dataset, results, bad_samples, bad_dir, task='det')
  170. else:
  171. raise 'expect \'pred_panoptic_seg\' or \'pred_instances\' \
  172. in dict result'
  173. def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):
  174. """Evaluation for object detection.
  175. Args:
  176. dataset (Dataset): A PyTorch dataset.
  177. results (list): Object detection results from test
  178. results pkl file.
  179. topk (int): Number of the highest topk and
  180. lowest topk after evaluation index sorting. Default: 20.
  181. eval_fn (callable, optional): Eval function, Default: None.
  182. Returns:
  183. tuple: A tuple contains good samples and bad samples.
  184. good_mAPs (dict[int, float]): A dict contains good
  185. samples's indices in dataset and model's
  186. performance on them.
  187. bad_mAPs (dict[int, float]): A dict contains bad
  188. samples's indices in dataset and model's
  189. performance on them.
  190. """
  191. if eval_fn is None:
  192. eval_fn = bbox_map_eval
  193. else:
  194. assert callable(eval_fn)
  195. prog_bar = ProgressBar(len(results))
  196. _mAPs = {}
  197. data_info = {}
  198. for i, (result, ) in enumerate(zip(results)):
  199. # self.dataset[i] should not call directly
  200. # because there is a risk of mismatch
  201. data_info = dataset.prepare_data(i)
  202. data_info['bboxes'] = data_info['gt_bboxes'].tensor
  203. data_info['labels'] = data_info['gt_bboxes_labels']
  204. pred = result['pred_instances']
  205. pred_bboxes = pred['bboxes'].cpu().numpy()
  206. pred_scores = pred['scores'].cpu().numpy()
  207. pred_labels = pred['labels'].cpu().numpy()
  208. dets = []
  209. for label in range(len(dataset.metainfo['classes'])):
  210. index = np.where(pred_labels == label)[0]
  211. pred_bbox_scores = np.hstack(
  212. [pred_bboxes[index], pred_scores[index].reshape((-1, 1))])
  213. dets.append(pred_bbox_scores)
  214. mAP = eval_fn(dets, data_info)
  215. _mAPs[i] = mAP
  216. prog_bar.update()
  217. # descending select topk image
  218. _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
  219. good_mAPs = _mAPs[-topk:]
  220. bad_mAPs = _mAPs[:topk]
  221. return good_mAPs, bad_mAPs
  222. def panoptic_evaluate(self, dataset, results, topk=20):
  223. """Evaluation for panoptic segmentation.
  224. Args:
  225. dataset (Dataset): A PyTorch dataset.
  226. results (list): Panoptic segmentation results from test
  227. results pkl file.
  228. topk (int): Number of the highest topk and
  229. lowest topk after evaluation index sorting. Default: 20.
  230. Returns:
  231. tuple: A tuple contains good samples and bad samples.
  232. good_pqs (dict[int, float]): A dict contains good
  233. samples's indices in dataset and model's
  234. performance on them.
  235. bad_pqs (dict[int, float]): A dict contains bad
  236. samples's indices in dataset and model's
  237. performance on them.
  238. """
  239. pqs = {}
  240. prog_bar = ProgressBar(len(results))
  241. for i in range(len(results)):
  242. data_sample = {}
  243. for k in dataset[i].keys():
  244. data_sample[k] = dataset[i][k]
  245. for k in results[i].keys():
  246. data_sample[k] = results[i][k]
  247. self.evaluator.process([data_sample])
  248. metrics = self.evaluator.evaluate(1)
  249. pqs[i] = metrics['coco_panoptic/PQ']
  250. prog_bar.update()
  251. # descending select topk image
  252. pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))
  253. good_pqs = pqs[-topk:]
  254. bad_pqs = pqs[:topk]
  255. return good_pqs, bad_pqs
  256. def parse_args():
  257. parser = argparse.ArgumentParser(
  258. description='MMDet eval image prediction result for each')
  259. parser.add_argument('config', help='test config file path')
  260. parser.add_argument(
  261. 'prediction_path', help='prediction path where test pkl result')
  262. parser.add_argument(
  263. 'show_dir', help='directory where painted images will be saved')
  264. parser.add_argument('--show', action='store_true', help='show results')
  265. parser.add_argument(
  266. '--wait-time',
  267. type=float,
  268. default=0,
  269. help='the interval of show (s), 0 is block')
  270. parser.add_argument(
  271. '--topk',
  272. default=20,
  273. type=int,
  274. help='saved Number of the highest topk '
  275. 'and lowest topk after index sorting')
  276. parser.add_argument(
  277. '--show-score-thr',
  278. type=float,
  279. default=0,
  280. help='score threshold (default: 0.)')
  281. parser.add_argument(
  282. '--cfg-options',
  283. nargs='+',
  284. action=DictAction,
  285. help='override some settings in the used config, the key-value pair '
  286. 'in xxx=yyy format will be merged into config file. If the value to '
  287. 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
  288. 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
  289. 'Note that the quotation marks are necessary and that no white space '
  290. 'is allowed.')
  291. args = parser.parse_args()
  292. return args
  293. def main():
  294. args = parse_args()
  295. check_file_exist(args.prediction_path)
  296. cfg = Config.fromfile(args.config)
  297. # replace the ${key} with the value of cfg.key
  298. cfg = replace_cfg_vals(cfg)
  299. # update data root according to MMDET_DATASETS
  300. update_data_root(cfg)
  301. if args.cfg_options is not None:
  302. cfg.merge_from_dict(args.cfg_options)
  303. init_default_scope(cfg.get('default_scope', 'mmdet'))
  304. cfg.test_dataloader.dataset.test_mode = True
  305. cfg.test_dataloader.pop('batch_size', 0)
  306. if cfg.train_dataloader.dataset.type in ('MultiImageMixDataset',
  307. 'ClassBalancedDataset',
  308. 'RepeatDataset', 'ConcatDataset'):
  309. cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(
  310. cfg.train_dataloader.dataset.dataset.pipeline)
  311. else:
  312. cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(
  313. cfg.train_dataloader.dataset.pipeline)
  314. dataset = DATASETS.build(cfg.test_dataloader.dataset)
  315. outputs = load(args.prediction_path)
  316. cfg.work_dir = args.show_dir
  317. # build the runner from config
  318. if 'runner_type' not in cfg:
  319. # build the default runner
  320. runner = Runner.from_cfg(cfg)
  321. else:
  322. # build customized runner from the registry
  323. # if 'runner_type' is set in the cfg
  324. runner = RUNNERS.build(cfg)
  325. result_visualizer = ResultVisualizer(args.show, args.wait_time,
  326. args.show_score_thr, runner)
  327. result_visualizer.evaluate_and_show(
  328. dataset, outputs, topk=args.topk, show_dir=args.show_dir)
  329. if __name__ == '__main__':
  330. main()