mmpose_handler.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import base64
  3. import os
  4. import mmcv
  5. import torch
  6. from mmpose.apis import (inference_bottom_up_pose_model,
  7. inference_top_down_pose_model, init_pose_model)
  8. from mmpose.models.detectors import AssociativeEmbedding, TopDown
  9. try:
  10. from ts.torch_handler.base_handler import BaseHandler
  11. except ImportError:
  12. raise ImportError('Please install torchserve.')
  13. class MMPoseHandler(BaseHandler):
  14. def initialize(self, context):
  15. properties = context.system_properties
  16. self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
  17. self.device = torch.device(self.map_location + ':' +
  18. str(properties.get('gpu_id')) if torch.cuda.
  19. is_available() else self.map_location)
  20. self.manifest = context.manifest
  21. model_dir = properties.get('model_dir')
  22. serialized_file = self.manifest['model']['serializedFile']
  23. checkpoint = os.path.join(model_dir, serialized_file)
  24. self.config_file = os.path.join(model_dir, 'config.py')
  25. self.model = init_pose_model(self.config_file, checkpoint, self.device)
  26. self.initialized = True
  27. def preprocess(self, data):
  28. images = []
  29. for row in data:
  30. image = row.get('data') or row.get('body')
  31. if isinstance(image, str):
  32. image = base64.b64decode(image)
  33. image = mmcv.imfrombytes(image)
  34. images.append(image)
  35. return images
  36. def inference(self, data, *args, **kwargs):
  37. if isinstance(self.model, TopDown):
  38. results = self._inference_top_down_pose_model(data)
  39. elif isinstance(self.model, (AssociativeEmbedding, )):
  40. results = self._inference_bottom_up_pose_model(data)
  41. else:
  42. raise NotImplementedError(
  43. f'Model type {type(self.model)} is not supported.')
  44. return results
  45. def _inference_top_down_pose_model(self, data):
  46. results = []
  47. for image in data:
  48. # use dummy person bounding box
  49. preds, _ = inference_top_down_pose_model(
  50. self.model, image, person_results=None)
  51. results.append(preds)
  52. return results
  53. def _inference_bottom_up_pose_model(self, data):
  54. results = []
  55. for image in data:
  56. preds, _ = inference_bottom_up_pose_model(self.model, image)
  57. results.append(preds)
  58. return results
  59. def postprocess(self, data):
  60. output = [[{
  61. 'keypoints': pred['keypoints'].tolist()
  62. } for pred in preds] for preds in data]
  63. return output