ssd300_coco.py 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. _base_ = [
  2. '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
  3. '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
  4. ]
  5. # dataset settings
  6. input_size = 300
  7. train_pipeline = [
  8. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  9. dict(type='LoadAnnotations', with_bbox=True),
  10. dict(
  11. type='Expand',
  12. mean={{_base_.model.data_preprocessor.mean}},
  13. to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
  14. ratio_range=(1, 4)),
  15. dict(
  16. type='MinIoURandomCrop',
  17. min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
  18. min_crop_size=0.3),
  19. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  20. dict(type='RandomFlip', prob=0.5),
  21. dict(
  22. type='PhotoMetricDistortion',
  23. brightness_delta=32,
  24. contrast_range=(0.5, 1.5),
  25. saturation_range=(0.5, 1.5),
  26. hue_delta=18),
  27. dict(type='PackDetInputs')
  28. ]
  29. test_pipeline = [
  30. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  31. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  32. dict(type='LoadAnnotations', with_bbox=True),
  33. dict(
  34. type='PackDetInputs',
  35. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  36. 'scale_factor'))
  37. ]
  38. train_dataloader = dict(
  39. batch_size=8,
  40. num_workers=2,
  41. batch_sampler=None,
  42. dataset=dict(
  43. _delete_=True,
  44. type='RepeatDataset',
  45. times=5,
  46. dataset=dict(
  47. type={{_base_.dataset_type}},
  48. data_root={{_base_.data_root}},
  49. ann_file='annotations/instances_train2017.json',
  50. data_prefix=dict(img='train2017/'),
  51. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  52. pipeline=train_pipeline,
  53. backend_args={{_base_.backend_args}})))
  54. val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
  55. test_dataloader = val_dataloader
  56. # optimizer
  57. optim_wrapper = dict(
  58. type='OptimWrapper',
  59. optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
  60. custom_hooks = [
  61. dict(type='NumClassCheckHook'),
  62. dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
  63. ]
  64. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  65. # USER SHOULD NOT CHANGE ITS VALUES.
  66. # base_batch_size = (8 GPUs) x (8 samples per GPU)
  67. auto_scale_lr = dict(base_batch_size=64)