centernet-update_r50-caffe_fpn_ms-1x_coco.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. _base_ = [
  2. '../_base_/datasets/coco_detection.py',
  3. '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
  4. ]
  5. model = dict(
  6. type='CenterNet',
  7. # use caffe img_norm
  8. data_preprocessor=dict(
  9. type='DetDataPreprocessor',
  10. mean=[103.530, 116.280, 123.675],
  11. std=[1.0, 1.0, 1.0],
  12. bgr_to_rgb=False,
  13. pad_size_divisor=32),
  14. backbone=dict(
  15. type='ResNet',
  16. depth=50,
  17. num_stages=4,
  18. out_indices=(0, 1, 2, 3),
  19. frozen_stages=1,
  20. norm_cfg=dict(type='BN', requires_grad=False),
  21. norm_eval=True,
  22. style='caffe',
  23. init_cfg=dict(
  24. type='Pretrained',
  25. checkpoint='open-mmlab://detectron2/resnet50_caffe')),
  26. neck=dict(
  27. type='FPN',
  28. in_channels=[256, 512, 1024, 2048],
  29. out_channels=256,
  30. start_level=1,
  31. add_extra_convs='on_output',
  32. num_outs=5,
  33. # There is a chance to get 40.3 after switching init_cfg,
  34. # otherwise it is about 39.9~40.1
  35. init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
  36. relu_before_extra_convs=True),
  37. bbox_head=dict(
  38. type='CenterNetUpdateHead',
  39. num_classes=80,
  40. in_channels=256,
  41. stacked_convs=4,
  42. feat_channels=256,
  43. strides=[8, 16, 32, 64, 128],
  44. hm_min_radius=4,
  45. hm_min_overlap=0.8,
  46. more_pos_thresh=0.2,
  47. more_pos_topk=9,
  48. soft_weight_on_reg=False,
  49. loss_cls=dict(
  50. type='GaussianFocalLoss',
  51. pos_weight=0.25,
  52. neg_weight=0.75,
  53. loss_weight=1.0),
  54. loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
  55. ),
  56. train_cfg=None,
  57. test_cfg=dict(
  58. nms_pre=1000,
  59. min_bbox_size=0,
  60. score_thr=0.05,
  61. nms=dict(type='nms', iou_threshold=0.6),
  62. max_per_img=100))
  63. # single-scale training is about 39.3
  64. train_pipeline = [
  65. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  66. dict(type='LoadAnnotations', with_bbox=True),
  67. dict(
  68. type='RandomChoiceResize',
  69. scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
  70. (1333, 768), (1333, 800)],
  71. keep_ratio=True),
  72. dict(type='RandomFlip', prob=0.5),
  73. dict(type='PackDetInputs')
  74. ]
  75. train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
  76. # learning rate
  77. param_scheduler = [
  78. dict(
  79. type='LinearLR',
  80. start_factor=0.00025,
  81. by_epoch=False,
  82. begin=0,
  83. end=4000),
  84. dict(
  85. type='MultiStepLR',
  86. begin=0,
  87. end=12,
  88. by_epoch=True,
  89. milestones=[8, 11],
  90. gamma=0.1)
  91. ]
  92. optim_wrapper = dict(
  93. optimizer=dict(lr=0.01),
  94. # Experiments show that there is no need to turn on clip_grad.
  95. paramwise_cfg=dict(norm_decay_mult=0.))
  96. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  97. # USER SHOULD NOT CHANGE ITS VALUES.
  98. # base_batch_size = (8 GPUs) x (2 samples per GPU)
  99. auto_scale_lr = dict(base_batch_size=16)