atss_swin-l-p4-w12_fpn_dyhead_ms-2x_coco.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. _base_ = [
  2. '../_base_/datasets/coco_detection.py',
  3. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
  4. ]
  5. pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
  6. model = dict(
  7. type='ATSS',
  8. data_preprocessor=dict(
  9. type='DetDataPreprocessor',
  10. mean=[123.675, 116.28, 103.53],
  11. std=[58.395, 57.12, 57.375],
  12. bgr_to_rgb=True,
  13. pad_size_divisor=128),
  14. backbone=dict(
  15. type='SwinTransformer',
  16. pretrain_img_size=384,
  17. embed_dims=192,
  18. depths=[2, 2, 18, 2],
  19. num_heads=[6, 12, 24, 48],
  20. window_size=12,
  21. mlp_ratio=4,
  22. qkv_bias=True,
  23. qk_scale=None,
  24. drop_rate=0.,
  25. attn_drop_rate=0.,
  26. drop_path_rate=0.2,
  27. patch_norm=True,
  28. out_indices=(1, 2, 3),
  29. # Please only add indices that would be used
  30. # in FPN, otherwise some parameter will not be used
  31. with_cp=False,
  32. convert_weights=True,
  33. init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
  34. neck=[
  35. dict(
  36. type='FPN',
  37. in_channels=[384, 768, 1536],
  38. out_channels=256,
  39. start_level=0,
  40. add_extra_convs='on_output',
  41. num_outs=5),
  42. dict(
  43. type='DyHead',
  44. in_channels=256,
  45. out_channels=256,
  46. num_blocks=6,
  47. # disable zero_init_offset to follow official implementation
  48. zero_init_offset=False)
  49. ],
  50. bbox_head=dict(
  51. type='ATSSHead',
  52. num_classes=80,
  53. in_channels=256,
  54. pred_kernel_size=1, # follow DyHead official implementation
  55. stacked_convs=0,
  56. feat_channels=256,
  57. anchor_generator=dict(
  58. type='AnchorGenerator',
  59. ratios=[1.0],
  60. octave_base_scale=8,
  61. scales_per_octave=1,
  62. strides=[8, 16, 32, 64, 128],
  63. center_offset=0.5), # follow DyHead official implementation
  64. bbox_coder=dict(
  65. type='DeltaXYWHBBoxCoder',
  66. target_means=[.0, .0, .0, .0],
  67. target_stds=[0.1, 0.1, 0.2, 0.2]),
  68. loss_cls=dict(
  69. type='FocalLoss',
  70. use_sigmoid=True,
  71. gamma=2.0,
  72. alpha=0.25,
  73. loss_weight=1.0),
  74. loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
  75. loss_centerness=dict(
  76. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
  77. # training and testing settings
  78. train_cfg=dict(
  79. assigner=dict(type='ATSSAssigner', topk=9),
  80. allowed_border=-1,
  81. pos_weight=-1,
  82. debug=False),
  83. test_cfg=dict(
  84. nms_pre=1000,
  85. min_bbox_size=0,
  86. score_thr=0.05,
  87. nms=dict(type='nms', iou_threshold=0.6),
  88. max_per_img=100))
  89. # dataset settings
  90. train_pipeline = [
  91. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  92. dict(type='LoadAnnotations', with_bbox=True),
  93. dict(
  94. type='RandomResize',
  95. scale=[(2000, 480), (2000, 1200)],
  96. keep_ratio=True,
  97. backend='pillow'),
  98. dict(type='RandomFlip', prob=0.5),
  99. dict(type='PackDetInputs')
  100. ]
  101. test_pipeline = [
  102. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  103. dict(type='Resize', scale=(2000, 1200), keep_ratio=True, backend='pillow'),
  104. dict(type='LoadAnnotations', with_bbox=True),
  105. dict(
  106. type='PackDetInputs',
  107. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  108. 'scale_factor'))
  109. ]
  110. train_dataloader = dict(
  111. dataset=dict(
  112. _delete_=True,
  113. type='RepeatDataset',
  114. times=2,
  115. dataset=dict(
  116. type={{_base_.dataset_type}},
  117. data_root={{_base_.data_root}},
  118. ann_file='annotations/instances_train2017.json',
  119. data_prefix=dict(img='train2017/'),
  120. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  121. pipeline=train_pipeline,
  122. backend_args={{_base_.backend_args}})))
  123. val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
  124. test_dataloader = val_dataloader
  125. # optimizer
  126. optim_wrapper = dict(
  127. _delete_=True,
  128. type='OptimWrapper',
  129. optimizer=dict(
  130. type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05),
  131. paramwise_cfg=dict(
  132. custom_keys={
  133. 'absolute_pos_embed': dict(decay_mult=0.),
  134. 'relative_position_bias_table': dict(decay_mult=0.),
  135. 'norm': dict(decay_mult=0.)
  136. }),
  137. clip_grad=None)