lsj-100e_coco-instance.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. _base_ = '../_base_/default_runtime.py'
  2. # dataset settings
  3. dataset_type = 'CocoDataset'
  4. data_root = 'data/coco/'
  5. image_size = (1024, 1024)
  6. # Example to use different file client
  7. # Method 1: simply set the data root and let the file I/O module
  8. # automatically infer from prefix (not support LMDB and Memcache yet)
  9. # data_root = 's3://openmmlab/datasets/detection/coco/'
  10. # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
  11. # backend_args = dict(
  12. # backend='petrel',
  13. # path_mapping=dict({
  14. # './data/': 's3://openmmlab/datasets/detection/',
  15. # 'data/': 's3://openmmlab/datasets/detection/'
  16. # }))
  17. backend_args = None
  18. train_pipeline = [
  19. dict(type='LoadImageFromFile', backend_args=backend_args),
  20. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
  21. dict(
  22. type='RandomResize',
  23. scale=image_size,
  24. ratio_range=(0.1, 2.0),
  25. keep_ratio=True),
  26. dict(
  27. type='RandomCrop',
  28. crop_type='absolute_range',
  29. crop_size=image_size,
  30. recompute_bbox=True,
  31. allow_negative_crop=True),
  32. dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
  33. dict(type='RandomFlip', prob=0.5),
  34. dict(type='PackDetInputs')
  35. ]
  36. test_pipeline = [
  37. dict(type='LoadImageFromFile', backend_args=backend_args),
  38. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  39. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
  40. dict(
  41. type='PackDetInputs',
  42. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  43. 'scale_factor'))
  44. ]
  45. # Use RepeatDataset to speed up training
  46. train_dataloader = dict(
  47. batch_size=2,
  48. num_workers=2,
  49. persistent_workers=True,
  50. sampler=dict(type='DefaultSampler', shuffle=True),
  51. dataset=dict(
  52. type='RepeatDataset',
  53. times=4, # simply change this from 2 to 16 for 50e - 400e training.
  54. dataset=dict(
  55. type=dataset_type,
  56. data_root=data_root,
  57. ann_file='annotations/instances_train2017.json',
  58. data_prefix=dict(img='train2017/'),
  59. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  60. pipeline=train_pipeline,
  61. backend_args=backend_args)))
  62. val_dataloader = dict(
  63. batch_size=1,
  64. num_workers=2,
  65. persistent_workers=True,
  66. drop_last=False,
  67. sampler=dict(type='DefaultSampler', shuffle=False),
  68. dataset=dict(
  69. type=dataset_type,
  70. data_root=data_root,
  71. ann_file='annotations/instances_val2017.json',
  72. data_prefix=dict(img='val2017/'),
  73. test_mode=True,
  74. pipeline=test_pipeline,
  75. backend_args=backend_args))
  76. test_dataloader = val_dataloader
  77. val_evaluator = dict(
  78. type='CocoMetric',
  79. ann_file=data_root + 'annotations/instances_val2017.json',
  80. metric=['bbox', 'segm'],
  81. format_only=False,
  82. backend_args=backend_args)
  83. test_evaluator = val_evaluator
  84. max_epochs = 25
  85. train_cfg = dict(
  86. type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
  87. val_cfg = dict(type='ValLoop')
  88. test_cfg = dict(type='TestLoop')
  89. # optimizer assumes bs=64
  90. optim_wrapper = dict(
  91. type='OptimWrapper',
  92. optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
  93. # learning rate
  94. param_scheduler = [
  95. dict(
  96. type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
  97. dict(
  98. type='MultiStepLR',
  99. begin=0,
  100. end=max_epochs,
  101. by_epoch=True,
  102. milestones=[22, 24],
  103. gamma=0.1)
  104. ]
  105. # only keep latest 2 checkpoints
  106. default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
  107. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  108. # USER SHOULD NOT CHANGE ITS VALUES.
  109. # base_batch_size = (32 GPUs) x (2 samples per GPU)
  110. auto_scale_lr = dict(base_batch_size=64)