coco_occluded_separated_recall.py 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. from argparse import ArgumentParser
  3. import mmengine
  4. from mmengine.logging import print_log
  5. from mmdet.datasets import CocoDataset
  6. from mmdet.evaluation import CocoOccludedSeparatedMetric
  7. def main():
  8. parser = ArgumentParser(
  9. description='Compute recall of COCO occluded and separated masks '
  10. 'presented in paper https://arxiv.org/abs/2210.10046.')
  11. parser.add_argument('result', help='result file (pkl format) path')
  12. parser.add_argument('--out', help='file path to save evaluation results')
  13. parser.add_argument(
  14. '--score-thr',
  15. type=float,
  16. default=0.3,
  17. help='Score threshold for the recall calculation. Defaults to 0.3')
  18. parser.add_argument(
  19. '--iou-thr',
  20. type=float,
  21. default=0.75,
  22. help='IoU threshold for the recall calculation. Defaults to 0.75.')
  23. parser.add_argument(
  24. '--ann',
  25. default='data/coco/annotations/instances_val2017.json',
  26. help='coco annotation file path')
  27. args = parser.parse_args()
  28. results = mmengine.load(args.result)
  29. assert 'masks' in results[0]['pred_instances'], \
  30. 'The results must be predicted by instance segmentation model.'
  31. metric = CocoOccludedSeparatedMetric(
  32. ann_file=args.ann, iou_thr=args.iou_thr, score_thr=args.score_thr)
  33. metric.dataset_meta = CocoDataset.METAINFO
  34. for datasample in results:
  35. metric.process(data_batch=None, data_samples=[datasample])
  36. metric_res = metric.compute_metrics(metric.results)
  37. if args.out is not None:
  38. mmengine.dump(metric_res, args.out)
  39. print_log(f'Evaluation results have been saved to {args.out}.')
  40. if __name__ == '__main__':
  41. main()