test_torchserver.py 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import os
  3. import os.path as osp
  4. import warnings
  5. from argparse import ArgumentParser
  6. import requests
  7. from mmpose.apis import (inference_bottom_up_pose_model,
  8. inference_top_down_pose_model, init_pose_model,
  9. vis_pose_result)
  10. from mmpose.models import AssociativeEmbedding, TopDown
  11. def parse_args():
  12. parser = ArgumentParser()
  13. parser.add_argument('img', help='Image file')
  14. parser.add_argument('config', help='Config file')
  15. parser.add_argument('checkpoint', help='Checkpoint file')
  16. parser.add_argument('model_name', help='The model name in the server')
  17. parser.add_argument(
  18. '--inference-addr',
  19. default='127.0.0.1:8080',
  20. help='Address and port of the inference server')
  21. parser.add_argument(
  22. '--device', default='cuda:0', help='Device used for inference')
  23. parser.add_argument(
  24. '--out-dir', default='vis_results', help='Visualization output path')
  25. args = parser.parse_args()
  26. return args
  27. def main(args):
  28. os.makedirs(args.out_dir, exist_ok=True)
  29. # Inference single image by native apis.
  30. model = init_pose_model(args.config, args.checkpoint, device=args.device)
  31. if isinstance(model, TopDown):
  32. pytorch_result, _ = inference_top_down_pose_model(
  33. model, args.img, person_results=None)
  34. elif isinstance(model, (AssociativeEmbedding, )):
  35. pytorch_result, _ = inference_bottom_up_pose_model(model, args.img)
  36. else:
  37. raise NotImplementedError()
  38. vis_pose_result(
  39. model,
  40. args.img,
  41. pytorch_result,
  42. out_file=osp.join(args.out_dir, 'pytorch_result.png'))
  43. # Inference single image by torchserve engine.
  44. url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
  45. with open(args.img, 'rb') as image:
  46. response = requests.post(url, image)
  47. server_result = response.json()
  48. vis_pose_result(
  49. model,
  50. args.img,
  51. server_result,
  52. out_file=osp.join(args.out_dir, 'torchserve_result.png'))
  53. if __name__ == '__main__':
  54. args = parse_args()
  55. main(args)
  56. # Following strings of text style are from colorama package
  57. bright_style, reset_style = '\x1b[1m', '\x1b[0m'
  58. red_text, blue_text = '\x1b[31m', '\x1b[34m'
  59. white_background = '\x1b[107m'
  60. msg = white_background + bright_style + red_text
  61. msg += 'DeprecationWarning: This tool will be deprecated in future. '
  62. msg += blue_text + 'Welcome to use the unified model deployment toolbox '
  63. msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
  64. msg += reset_style
  65. warnings.warn(msg)