rtmpose-m_8xb64-210e_mpii-256x256.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. _base_ = ['../../../_base_/default_runtime.py']
  2. # runtime
  3. max_epochs = 210
  4. stage2_num_epochs = 30
  5. base_lr = 4e-3
  6. train_cfg = dict(max_epochs=max_epochs, val_interval=10)
  7. # optimizer
  8. optim_wrapper = dict(
  9. type='OptimWrapper',
  10. optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
  11. paramwise_cfg=dict(
  12. norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
  13. # learning policy
  14. param_scheduler = [
  15. dict(
  16. type='LinearLR',
  17. start_factor=1.0e-5,
  18. by_epoch=False,
  19. begin=0,
  20. end=1000),
  21. dict(
  22. # use cosine lr from 210 to 420 epoch
  23. type='CosineAnnealingLR',
  24. eta_min=base_lr * 0.05,
  25. begin=max_epochs // 2,
  26. end=max_epochs,
  27. T_max=max_epochs // 2,
  28. by_epoch=True,
  29. convert_to_iter_based=True),
  30. ]
  31. # automatically scaling LR based on the actual training batch size
  32. auto_scale_lr = dict(base_batch_size=1024)
  33. # codec settings
  34. codec = dict(
  35. type='SimCCLabel',
  36. input_size=(256, 256),
  37. sigma=(5.66, 5.66),
  38. simcc_split_ratio=2.0,
  39. normalize=False,
  40. use_dark=False)
  41. # model settings
  42. model = dict(
  43. type='TopdownPoseEstimator',
  44. data_preprocessor=dict(
  45. type='PoseDataPreprocessor',
  46. mean=[123.675, 116.28, 103.53],
  47. std=[58.395, 57.12, 57.375],
  48. bgr_to_rgb=True),
  49. backbone=dict(
  50. _scope_='mmdet',
  51. type='CSPNeXt',
  52. arch='P5',
  53. expand_ratio=0.5,
  54. deepen_factor=0.67,
  55. widen_factor=0.75,
  56. out_indices=(4, ),
  57. channel_attention=True,
  58. norm_cfg=dict(type='SyncBN'),
  59. act_cfg=dict(type='SiLU'),
  60. init_cfg=dict(
  61. type='Pretrained',
  62. prefix='backbone.',
  63. checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
  64. 'rtmposev1/cspnext-m_udp-aic-coco_210e-256x192-f2f7d6f6_20230130.pth' # noqa
  65. )),
  66. head=dict(
  67. type='RTMCCHead',
  68. in_channels=768,
  69. out_channels=16,
  70. input_size=codec['input_size'],
  71. in_featuremap_size=(8, 8),
  72. simcc_split_ratio=codec['simcc_split_ratio'],
  73. final_layer_kernel_size=7,
  74. gau_cfg=dict(
  75. hidden_dims=256,
  76. s=128,
  77. expansion_factor=2,
  78. dropout_rate=0.,
  79. drop_path=0.,
  80. act_fn='SiLU',
  81. use_rel_bias=False,
  82. pos_enc=False),
  83. loss=dict(
  84. type='KLDiscretLoss',
  85. use_target_weight=True,
  86. beta=10.,
  87. label_softmax=True),
  88. decoder=codec),
  89. test_cfg=dict(flip_test=True))
  90. # base dataset settings
  91. dataset_type = 'MpiiDataset'
  92. data_mode = 'topdown'
  93. data_root = 'data/mpii/'
  94. backend_args = dict(backend='local')
  95. # backend_args = dict(
  96. # backend='petrel',
  97. # path_mapping=dict({
  98. # f'{data_root}': 's3://openmmlab/datasets/pose/MPI/',
  99. # f'{data_root}': 's3://openmmlab/datasets/pose/MPI/'
  100. # }))
  101. # pipelines
  102. train_pipeline = [
  103. dict(type='LoadImage', backend_args=backend_args),
  104. dict(type='GetBBoxCenterScale'),
  105. dict(type='RandomFlip', direction='horizontal'),
  106. dict(type='RandomHalfBody'),
  107. dict(
  108. type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
  109. dict(type='TopdownAffine', input_size=codec['input_size']),
  110. dict(type='mmdet.YOLOXHSVRandomAug'),
  111. dict(
  112. type='Albumentation',
  113. transforms=[
  114. dict(type='Blur', p=0.1),
  115. dict(type='MedianBlur', p=0.1),
  116. dict(
  117. type='CoarseDropout',
  118. max_holes=1,
  119. max_height=0.4,
  120. max_width=0.4,
  121. min_holes=1,
  122. min_height=0.2,
  123. min_width=0.2,
  124. p=1.),
  125. ]),
  126. dict(type='GenerateTarget', encoder=codec),
  127. dict(type='PackPoseInputs')
  128. ]
  129. val_pipeline = [
  130. dict(type='LoadImage', backend_args=backend_args),
  131. dict(type='GetBBoxCenterScale'),
  132. dict(type='TopdownAffine', input_size=codec['input_size']),
  133. dict(type='PackPoseInputs')
  134. ]
  135. train_pipeline_stage2 = [
  136. dict(type='LoadImage', backend_args=backend_args),
  137. dict(type='GetBBoxCenterScale'),
  138. dict(type='RandomFlip', direction='horizontal'),
  139. dict(type='RandomHalfBody'),
  140. dict(
  141. type='RandomBBoxTransform',
  142. shift_factor=0.,
  143. scale_factor=[0.75, 1.25],
  144. rotate_factor=60),
  145. dict(type='TopdownAffine', input_size=codec['input_size']),
  146. dict(type='mmdet.YOLOXHSVRandomAug'),
  147. dict(
  148. type='Albumentation',
  149. transforms=[
  150. dict(type='Blur', p=0.1),
  151. dict(type='MedianBlur', p=0.1),
  152. dict(
  153. type='CoarseDropout',
  154. max_holes=1,
  155. max_height=0.4,
  156. max_width=0.4,
  157. min_holes=1,
  158. min_height=0.2,
  159. min_width=0.2,
  160. p=0.5),
  161. ]),
  162. dict(type='GenerateTarget', encoder=codec),
  163. dict(type='PackPoseInputs')
  164. ]
  165. # data loaders
  166. train_dataloader = dict(
  167. batch_size=64,
  168. num_workers=10,
  169. persistent_workers=True,
  170. sampler=dict(type='DefaultSampler', shuffle=True),
  171. dataset=dict(
  172. type=dataset_type,
  173. data_root=data_root,
  174. data_mode=data_mode,
  175. ann_file='annotations/mpii_train.json',
  176. data_prefix=dict(img='images/'),
  177. pipeline=train_pipeline,
  178. ))
  179. val_dataloader = dict(
  180. batch_size=32,
  181. num_workers=10,
  182. persistent_workers=True,
  183. drop_last=False,
  184. sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
  185. dataset=dict(
  186. type=dataset_type,
  187. data_root=data_root,
  188. data_mode=data_mode,
  189. ann_file='annotations/mpii_val.json',
  190. headbox_file=f'{data_root}/annotations/mpii_gt_val.mat',
  191. data_prefix=dict(img='images/'),
  192. test_mode=True,
  193. pipeline=val_pipeline,
  194. ))
  195. test_dataloader = val_dataloader
  196. # hooks
  197. default_hooks = dict(
  198. checkpoint=dict(save_best='PCK', rule='greater', max_keep_ckpts=1))
  199. custom_hooks = [
  200. dict(
  201. type='EMAHook',
  202. ema_type='ExpMomentumEMA',
  203. momentum=0.0002,
  204. update_buffers=True,
  205. priority=49),
  206. dict(
  207. type='mmdet.PipelineSwitchHook',
  208. switch_epoch=max_epochs - stage2_num_epochs,
  209. switch_pipeline=train_pipeline_stage2)
  210. ]
  211. # evaluators
  212. val_evaluator = dict(type='MpiiPCKAccuracy')
  213. test_evaluator = val_evaluator