ssd300_voc0712.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. _base_ = [
  2. '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
  3. '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
  4. ]
  5. model = dict(
  6. bbox_head=dict(
  7. num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
  8. 0.9))))
  9. # dataset settings
  10. dataset_type = 'VOCDataset'
  11. data_root = 'data/VOCdevkit/'
  12. input_size = 300
  13. train_pipeline = [
  14. dict(type='LoadImageFromFile'),
  15. dict(type='LoadAnnotations', with_bbox=True),
  16. dict(
  17. type='Expand',
  18. mean={{_base_.model.data_preprocessor.mean}},
  19. to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
  20. ratio_range=(1, 4)),
  21. dict(
  22. type='MinIoURandomCrop',
  23. min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
  24. min_crop_size=0.3),
  25. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  26. dict(type='RandomFlip', prob=0.5),
  27. dict(
  28. type='PhotoMetricDistortion',
  29. brightness_delta=32,
  30. contrast_range=(0.5, 1.5),
  31. saturation_range=(0.5, 1.5),
  32. hue_delta=18),
  33. dict(type='PackDetInputs')
  34. ]
  35. test_pipeline = [
  36. dict(type='LoadImageFromFile'),
  37. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  38. # avoid bboxes being resized
  39. dict(type='LoadAnnotations', with_bbox=True),
  40. dict(
  41. type='PackDetInputs',
  42. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  43. 'scale_factor'))
  44. ]
  45. train_dataloader = dict(
  46. batch_size=8,
  47. num_workers=3,
  48. dataset=dict( # RepeatDataset
  49. # the dataset is repeated 10 times, and the training schedule is 2x,
  50. # so the actual epoch = 12 * 10 = 120.
  51. times=10,
  52. dataset=dict( # ConcatDataset
  53. # VOCDataset will add different `dataset_type` in dataset.metainfo,
  54. # which will get error if using ConcatDataset. Adding
  55. # `ignore_keys` can avoid this error.
  56. ignore_keys=['dataset_type'],
  57. datasets=[
  58. dict(
  59. type=dataset_type,
  60. data_root=data_root,
  61. ann_file='VOC2007/ImageSets/Main/trainval.txt',
  62. data_prefix=dict(sub_data_root='VOC2007/'),
  63. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  64. pipeline=train_pipeline),
  65. dict(
  66. type=dataset_type,
  67. data_root=data_root,
  68. ann_file='VOC2012/ImageSets/Main/trainval.txt',
  69. data_prefix=dict(sub_data_root='VOC2012/'),
  70. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  71. pipeline=train_pipeline)
  72. ])))
  73. val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
  74. test_dataloader = val_dataloader
  75. custom_hooks = [
  76. dict(type='NumClassCheckHook'),
  77. dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
  78. ]
  79. # optimizer
  80. optim_wrapper = dict(
  81. type='OptimWrapper',
  82. optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
  83. # learning policy
  84. param_scheduler = [
  85. dict(
  86. type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
  87. dict(
  88. type='MultiStepLR',
  89. begin=0,
  90. end=24,
  91. by_epoch=True,
  92. milestones=[16, 20],
  93. gamma=0.1)
  94. ]
  95. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  96. # USER SHOULD NOT CHANGE ITS VALUES.
  97. # base_batch_size = (8 GPUs) x (8 samples per GPU)
  98. auto_scale_lr = dict(base_batch_size=64)