123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129 |
- # Copyright (c) OpenMMLab. All rights reserved.
- import argparse
- import os
- import os.path as osp
- import mmengine
- from mmengine.config import Config, DictAction
- from mmengine.hooks import Hook
- from mmengine.runner import Runner
- def parse_args():
- parser = argparse.ArgumentParser(
- description='MMPose test (and eval) model')
- parser.add_argument('config', help='test config file path')
- parser.add_argument('checkpoint', help='checkpoint file')
- parser.add_argument(
- '--work-dir', help='the directory to save evaluation results')
- parser.add_argument('--out', help='the file to save metric results.')
- parser.add_argument(
- '--dump',
- type=str,
- help='dump predictions to a pickle file for offline evaluation')
- parser.add_argument(
- '--cfg-options',
- nargs='+',
- action=DictAction,
- default={},
- help='override some settings in the used config, the key-value pair '
- 'in xxx=yyy format will be merged into config file. For example, '
- "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
- parser.add_argument(
- '--show-dir',
- help='directory where the visualization images will be saved.')
- parser.add_argument(
- '--show',
- action='store_true',
- help='whether to display the prediction results in a window.')
- parser.add_argument(
- '--interval',
- type=int,
- default=1,
- help='visualize per interval samples.')
- parser.add_argument(
- '--wait-time',
- type=float,
- default=1,
- help='display time of every window. (second)')
- parser.add_argument(
- '--launcher',
- choices=['none', 'pytorch', 'slurm', 'mpi'],
- default='none',
- help='job launcher')
- parser.add_argument('--local_rank', type=int, default=0)
- args = parser.parse_args()
- if 'LOCAL_RANK' not in os.environ:
- os.environ['LOCAL_RANK'] = str(args.local_rank)
- return args
- def merge_args(cfg, args):
- """Merge CLI arguments to config."""
- # -------------------- visualization --------------------
- if args.show or (args.show_dir is not None):
- assert 'visualization' in cfg.default_hooks, \
- 'PoseVisualizationHook is not set in the ' \
- '`default_hooks` field of config. Please set ' \
- '`visualization=dict(type="PoseVisualizationHook")`'
- cfg.default_hooks.visualization.enable = True
- cfg.default_hooks.visualization.show = args.show
- if args.show:
- cfg.default_hooks.visualization.wait_time = args.wait_time
- cfg.default_hooks.visualization.out_dir = args.show_dir
- cfg.default_hooks.visualization.interval = args.interval
- # -------------------- Dump predictions --------------------
- if args.dump is not None:
- assert args.dump.endswith(('.pkl', '.pickle')), \
- 'The dump file must be a pkl file.'
- dump_metric = dict(type='DumpResults', out_file_path=args.dump)
- if isinstance(cfg.test_evaluator, (list, tuple)):
- cfg.test_evaluator = list(cfg.test_evaluator).append(dump_metric)
- else:
- cfg.test_evaluator = [cfg.test_evaluator, dump_metric]
- return cfg
- def main():
- args = parse_args()
- # load config
- cfg = Config.fromfile(args.config)
- cfg = merge_args(cfg, args)
- cfg.launcher = args.launcher
- if args.cfg_options is not None:
- cfg.merge_from_dict(args.cfg_options)
- # work_dir is determined in this priority: CLI > segment in file > filename
- if args.work_dir is not None:
- # update configs according to CLI args if args.work_dir is not None
- cfg.work_dir = args.work_dir
- elif cfg.get('work_dir', None) is None:
- # use config filename as default work_dir if cfg.work_dir is None
- cfg.work_dir = osp.join('./work_dirs',
- osp.splitext(osp.basename(args.config))[0])
- cfg.load_from = args.checkpoint
- # build the runner from config
- runner = Runner.from_cfg(cfg)
- if args.out:
- class SaveMetricHook(Hook):
- def after_test_epoch(self, _, metrics=None):
- if metrics is not None:
- mmengine.dump(metrics, args.out)
- runner.register_hook(SaveMetricHook(), 'LOWEST')
- # start testing
- runner.test()
- if __name__ == '__main__':
- main()
|