rtmdet_l_8xb32-300e_coco.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. _base_ = [
  2. '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py',
  3. '../_base_/datasets/coco_detection.py', './rtmdet_tta.py'
  4. ]
  5. model = dict(
  6. type='RTMDet',
  7. data_preprocessor=dict(
  8. type='DetDataPreprocessor',
  9. mean=[103.53, 116.28, 123.675],
  10. std=[57.375, 57.12, 58.395],
  11. bgr_to_rgb=False,
  12. batch_augments=None),
  13. backbone=dict(
  14. type='CSPNeXt',
  15. arch='P5',
  16. expand_ratio=0.5,
  17. deepen_factor=1,
  18. widen_factor=1,
  19. channel_attention=True,
  20. norm_cfg=dict(type='SyncBN'),
  21. act_cfg=dict(type='SiLU', inplace=True)),
  22. neck=dict(
  23. type='CSPNeXtPAFPN',
  24. in_channels=[256, 512, 1024],
  25. out_channels=256,
  26. num_csp_blocks=3,
  27. expand_ratio=0.5,
  28. norm_cfg=dict(type='SyncBN'),
  29. act_cfg=dict(type='SiLU', inplace=True)),
  30. bbox_head=dict(
  31. type='RTMDetSepBNHead',
  32. num_classes=80,
  33. in_channels=256,
  34. stacked_convs=2,
  35. feat_channels=256,
  36. anchor_generator=dict(
  37. type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
  38. bbox_coder=dict(type='DistancePointBBoxCoder'),
  39. loss_cls=dict(
  40. type='QualityFocalLoss',
  41. use_sigmoid=True,
  42. beta=2.0,
  43. loss_weight=1.0),
  44. loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
  45. with_objectness=False,
  46. exp_on_reg=True,
  47. share_conv=True,
  48. pred_kernel_size=1,
  49. norm_cfg=dict(type='SyncBN'),
  50. act_cfg=dict(type='SiLU', inplace=True)),
  51. train_cfg=dict(
  52. assigner=dict(type='DynamicSoftLabelAssigner', topk=13),
  53. allowed_border=-1,
  54. pos_weight=-1,
  55. debug=False),
  56. test_cfg=dict(
  57. nms_pre=30000,
  58. min_bbox_size=0,
  59. score_thr=0.001,
  60. nms=dict(type='nms', iou_threshold=0.65),
  61. max_per_img=300),
  62. )
  63. train_pipeline = [
  64. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  65. dict(type='LoadAnnotations', with_bbox=True),
  66. dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
  67. dict(
  68. type='RandomResize',
  69. scale=(1280, 1280),
  70. ratio_range=(0.1, 2.0),
  71. keep_ratio=True),
  72. dict(type='RandomCrop', crop_size=(640, 640)),
  73. dict(type='YOLOXHSVRandomAug'),
  74. dict(type='RandomFlip', prob=0.5),
  75. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  76. dict(
  77. type='CachedMixUp',
  78. img_scale=(640, 640),
  79. ratio_range=(1.0, 1.0),
  80. max_cached_images=20,
  81. pad_val=(114, 114, 114)),
  82. dict(type='PackDetInputs')
  83. ]
  84. train_pipeline_stage2 = [
  85. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  86. dict(type='LoadAnnotations', with_bbox=True),
  87. dict(
  88. type='RandomResize',
  89. scale=(640, 640),
  90. ratio_range=(0.1, 2.0),
  91. keep_ratio=True),
  92. dict(type='RandomCrop', crop_size=(640, 640)),
  93. dict(type='YOLOXHSVRandomAug'),
  94. dict(type='RandomFlip', prob=0.5),
  95. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  96. dict(type='PackDetInputs')
  97. ]
  98. test_pipeline = [
  99. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  100. dict(type='Resize', scale=(640, 640), keep_ratio=True),
  101. dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
  102. dict(
  103. type='PackDetInputs',
  104. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  105. 'scale_factor'))
  106. ]
  107. train_dataloader = dict(
  108. batch_size=32,
  109. num_workers=10,
  110. batch_sampler=None,
  111. pin_memory=True,
  112. dataset=dict(pipeline=train_pipeline))
  113. val_dataloader = dict(
  114. batch_size=5, num_workers=10, dataset=dict(pipeline=test_pipeline))
  115. test_dataloader = val_dataloader
  116. max_epochs = 300
  117. stage2_num_epochs = 20
  118. base_lr = 0.004
  119. interval = 10
  120. train_cfg = dict(
  121. max_epochs=max_epochs,
  122. val_interval=interval,
  123. dynamic_intervals=[(max_epochs - stage2_num_epochs, 1)])
  124. val_evaluator = dict(proposal_nums=(100, 1, 10))
  125. test_evaluator = val_evaluator
  126. # optimizer
  127. optim_wrapper = dict(
  128. _delete_=True,
  129. type='OptimWrapper',
  130. optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
  131. paramwise_cfg=dict(
  132. norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
  133. # learning rate
  134. param_scheduler = [
  135. dict(
  136. type='LinearLR',
  137. start_factor=1.0e-5,
  138. by_epoch=False,
  139. begin=0,
  140. end=1000),
  141. dict(
  142. # use cosine lr from 150 to 300 epoch
  143. type='CosineAnnealingLR',
  144. eta_min=base_lr * 0.05,
  145. begin=max_epochs // 2,
  146. end=max_epochs,
  147. T_max=max_epochs // 2,
  148. by_epoch=True,
  149. convert_to_iter_based=True),
  150. ]
  151. # hooks
  152. default_hooks = dict(
  153. checkpoint=dict(
  154. interval=interval,
  155. max_keep_ckpts=3 # only keep latest 3 checkpoints
  156. ))
  157. custom_hooks = [
  158. dict(
  159. type='EMAHook',
  160. ema_type='ExpMomentumEMA',
  161. momentum=0.0002,
  162. update_buffers=True,
  163. priority=49),
  164. dict(
  165. type='PipelineSwitchHook',
  166. switch_epoch=max_epochs - stage2_num_epochs,
  167. switch_pipeline=train_pipeline_stage2)
  168. ]