eval_metric.py 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import argparse
  3. import mmengine
  4. from mmengine import Config, DictAction
  5. from mmengine.evaluator import Evaluator
  6. from mmengine.registry import init_default_scope
  7. from mmdet.registry import DATASETS
  8. def parse_args():
  9. parser = argparse.ArgumentParser(description='Evaluate metric of the '
  10. 'results saved in pkl format')
  11. parser.add_argument('config', help='Config of the model')
  12. parser.add_argument('pkl_results', help='Results in pickle format')
  13. parser.add_argument(
  14. '--cfg-options',
  15. nargs='+',
  16. action=DictAction,
  17. help='override some settings in the used config, the key-value pair '
  18. 'in xxx=yyy format will be merged into config file. If the value to '
  19. 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
  20. 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
  21. 'Note that the quotation marks are necessary and that no white space '
  22. 'is allowed.')
  23. args = parser.parse_args()
  24. return args
  25. def main():
  26. args = parse_args()
  27. cfg = Config.fromfile(args.config)
  28. init_default_scope(cfg.get('default_scope', 'mmdet'))
  29. if args.cfg_options is not None:
  30. cfg.merge_from_dict(args.cfg_options)
  31. dataset = DATASETS.build(cfg.test_dataloader.dataset)
  32. predictions = mmengine.load(args.pkl_results)
  33. evaluator = Evaluator(cfg.val_evaluator)
  34. evaluator.dataset_meta = dataset.metainfo
  35. eval_results = evaluator.offline_evaluate(predictions)
  36. print(eval_results)
  37. if __name__ == '__main__':
  38. main()