ssd300_32xb8-36e_openimages.py 2.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. _base_ = [
  2. '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
  3. '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
  4. ]
  5. model = dict(
  6. bbox_head=dict(
  7. num_classes=601,
  8. anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
  9. # dataset settings
  10. dataset_type = 'OpenImagesDataset'
  11. data_root = 'data/OpenImages/'
  12. input_size = 300
  13. train_pipeline = [
  14. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  15. dict(type='LoadAnnotations', with_bbox=True),
  16. dict(
  17. type='PhotoMetricDistortion',
  18. brightness_delta=32,
  19. contrast_range=(0.5, 1.5),
  20. saturation_range=(0.5, 1.5),
  21. hue_delta=18),
  22. dict(
  23. type='Expand',
  24. mean={{_base_.model.data_preprocessor.mean}},
  25. to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
  26. ratio_range=(1, 4)),
  27. dict(
  28. type='MinIoURandomCrop',
  29. min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
  30. min_crop_size=0.3),
  31. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  32. dict(type='RandomFlip', prob=0.5),
  33. dict(type='PackDetInputs')
  34. ]
  35. test_pipeline = [
  36. dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
  37. dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
  38. # avoid bboxes being resized
  39. dict(type='LoadAnnotations', with_bbox=True),
  40. dict(
  41. type='PackDetInputs',
  42. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  43. 'scale_factor', 'instances'))
  44. ]
  45. train_dataloader = dict(
  46. batch_size=8, # using 32 GPUS while training. total batch size is 32 x 8
  47. batch_sampler=None,
  48. dataset=dict(
  49. _delete_=True,
  50. type='RepeatDataset',
  51. times=3, # repeat 3 times, total epochs are 12 x 3
  52. dataset=dict(
  53. type=dataset_type,
  54. data_root=data_root,
  55. ann_file='annotations/oidv6-train-annotations-bbox.csv',
  56. data_prefix=dict(img='OpenImages/train/'),
  57. label_file='annotations/class-descriptions-boxable.csv',
  58. hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
  59. meta_file='annotations/train-image-metas.pkl',
  60. pipeline=train_pipeline)))
  61. val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
  62. test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline))
  63. # optimizer
  64. optim_wrapper = dict(
  65. optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4))
  66. # learning rate
  67. param_scheduler = [
  68. dict(
  69. type='LinearLR',
  70. start_factor=0.001,
  71. by_epoch=False,
  72. begin=0,
  73. end=20000),
  74. dict(
  75. type='MultiStepLR',
  76. begin=0,
  77. end=12,
  78. by_epoch=True,
  79. milestones=[8, 11],
  80. gamma=0.1)
  81. ]
  82. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  83. # USER SHOULD NOT CHANGE ITS VALUES.
  84. # base_batch_size = (32 GPUs) x (8 samples per GPU)
  85. auto_scale_lr = dict(base_batch_size=256)