human_pose.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. executor_cfg = dict(
  3. # Basic configurations of the executor
  4. name='Pose Estimation',
  5. camera_id=0,
  6. # Define nodes.
  7. # The configuration of a node usually includes:
  8. # 1. 'type': Node class name
  9. # 2. 'name': Node name
  10. # 3. I/O buffers (e.g. 'input_buffer', 'output_buffer'): specify the
  11. # input and output buffer names. This may depend on the node class.
  12. # 4. 'enable_key': assign a hot-key to toggle enable/disable this node.
  13. # This may depend on the node class.
  14. # 5. Other class-specific arguments
  15. nodes=[
  16. # 'DetectorNode':
  17. # This node performs object detection from the frame image using an
  18. # MMDetection model.
  19. dict(
  20. type='DetectorNode',
  21. name='detector',
  22. model_config='projects/rtmpose/rtmdet/person/'
  23. 'rtmdet_nano_320-8xb32_coco-person.py',
  24. model_checkpoint='https://download.openmmlab.com/mmpose/v1/'
  25. 'projects/rtmpose/rtmdet_nano_8xb32-100e_coco-obj365-person-05d8511e.pth', # noqa
  26. input_buffer='_input_', # `_input_` is an executor-reserved buffer
  27. output_buffer='det_result'),
  28. # 'TopdownPoseEstimatorNode':
  29. # This node performs keypoint detection from the frame image using an
  30. # MMPose top-down model. Detection results is needed.
  31. dict(
  32. type='TopdownPoseEstimatorNode',
  33. name='human pose estimator',
  34. model_config='projects/rtmpose/rtmpose/body_2d_keypoint/'
  35. 'rtmpose-t_8xb256-420e_coco-256x192.py',
  36. model_checkpoint='https://download.openmmlab.com/mmpose/v1/'
  37. 'projects/rtmpose/rtmpose-tiny_simcc-aic-coco_pt-aic-coco_420e-256x192-cfc8f33d_20230126.pth', # noqa
  38. labels=['person'],
  39. input_buffer='det_result',
  40. output_buffer='human_pose'),
  41. # 'ObjectAssignerNode':
  42. # This node binds the latest model inference result with the current
  43. # frame. (This means the frame image and inference result may be
  44. # asynchronous).
  45. dict(
  46. type='ObjectAssignerNode',
  47. name='object assigner',
  48. frame_buffer='_frame_', # `_frame_` is an executor-reserved buffer
  49. object_buffer='human_pose',
  50. output_buffer='frame'),
  51. # 'ObjectVisualizerNode':
  52. # This node draw the pose visualization result in the frame image.
  53. # Pose results is needed.
  54. dict(
  55. type='ObjectVisualizerNode',
  56. name='object visualizer',
  57. enable_key='v',
  58. enable=True,
  59. show_bbox=True,
  60. must_have_keypoint=False,
  61. show_keypoint=True,
  62. input_buffer='frame',
  63. output_buffer='vis'),
  64. # 'NoticeBoardNode':
  65. # This node show a notice board with given content, e.g. help
  66. # information.
  67. dict(
  68. type='NoticeBoardNode',
  69. name='instruction',
  70. enable_key='h',
  71. enable=True,
  72. input_buffer='vis',
  73. output_buffer='vis_notice',
  74. content_lines=[
  75. 'This is a demo for pose visualization and simple image '
  76. 'effects. Have fun!', '', 'Hot-keys:',
  77. '"v": Pose estimation result visualization',
  78. '"h": Show help information',
  79. '"m": Show diagnostic information', '"q": Exit'
  80. ],
  81. ),
  82. # 'MonitorNode':
  83. # This node show diagnostic information in the frame image. It can
  84. # be used for debugging or monitoring system resource status.
  85. dict(
  86. type='MonitorNode',
  87. name='monitor',
  88. enable_key='m',
  89. enable=False,
  90. input_buffer='vis_notice',
  91. output_buffer='display'),
  92. # 'RecorderNode':
  93. # This node save the output video into a file.
  94. dict(
  95. type='RecorderNode',
  96. name='recorder',
  97. out_video_file='webcam_api_demo.mp4',
  98. input_buffer='display',
  99. output_buffer='_display_'
  100. # `_display_` is an executor-reserved buffer
  101. )
  102. ])