atss_r50-caffe_fpn_dyhead_1x_coco.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. _base_ = [
  2. '../_base_/datasets/coco_detection.py',
  3. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
  4. ]
  5. model = dict(
  6. type='ATSS',
  7. data_preprocessor=dict(
  8. type='DetDataPreprocessor',
  9. mean=[103.530, 116.280, 123.675],
  10. std=[1.0, 1.0, 1.0],
  11. bgr_to_rgb=False,
  12. pad_size_divisor=128),
  13. backbone=dict(
  14. type='ResNet',
  15. depth=50,
  16. num_stages=4,
  17. out_indices=(0, 1, 2, 3),
  18. frozen_stages=1,
  19. norm_cfg=dict(type='BN', requires_grad=False),
  20. norm_eval=True,
  21. style='caffe',
  22. init_cfg=dict(
  23. type='Pretrained',
  24. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
  25. neck=[
  26. dict(
  27. type='FPN',
  28. in_channels=[256, 512, 1024, 2048],
  29. out_channels=256,
  30. start_level=1,
  31. add_extra_convs='on_output',
  32. num_outs=5),
  33. dict(
  34. type='DyHead',
  35. in_channels=256,
  36. out_channels=256,
  37. num_blocks=6,
  38. # disable zero_init_offset to follow official implementation
  39. zero_init_offset=False)
  40. ],
  41. bbox_head=dict(
  42. type='ATSSHead',
  43. num_classes=80,
  44. in_channels=256,
  45. pred_kernel_size=1, # follow DyHead official implementation
  46. stacked_convs=0,
  47. feat_channels=256,
  48. anchor_generator=dict(
  49. type='AnchorGenerator',
  50. ratios=[1.0],
  51. octave_base_scale=8,
  52. scales_per_octave=1,
  53. strides=[8, 16, 32, 64, 128],
  54. center_offset=0.5), # follow DyHead official implementation
  55. bbox_coder=dict(
  56. type='DeltaXYWHBBoxCoder',
  57. target_means=[.0, .0, .0, .0],
  58. target_stds=[0.1, 0.1, 0.2, 0.2]),
  59. loss_cls=dict(
  60. type='FocalLoss',
  61. use_sigmoid=True,
  62. gamma=2.0,
  63. alpha=0.25,
  64. loss_weight=1.0),
  65. loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
  66. loss_centerness=dict(
  67. type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
  68. # training and testing settings
  69. train_cfg=dict(
  70. assigner=dict(type='ATSSAssigner', topk=9),
  71. allowed_border=-1,
  72. pos_weight=-1,
  73. debug=False),
  74. test_cfg=dict(
  75. nms_pre=1000,
  76. min_bbox_size=0,
  77. score_thr=0.05,
  78. nms=dict(type='nms', iou_threshold=0.6),
  79. max_per_img=100))
  80. # optimizer
  81. optim_wrapper = dict(optimizer=dict(lr=0.01))
  82. train_pipeline = [
  83. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  84. dict(type='LoadAnnotations', with_bbox=True),
  85. dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
  86. dict(type='RandomFlip', prob=0.5),
  87. dict(type='PackDetInputs')
  88. ]
  89. test_pipeline = [
  90. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  91. dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend='pillow'),
  92. dict(type='LoadAnnotations', with_bbox=True),
  93. dict(
  94. type='PackDetInputs',
  95. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  96. 'scale_factor'))
  97. ]
  98. train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
  99. val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
  100. test_dataloader = val_dataloader