detr_r50_8xb2-500e_coco.py 613 B

123456789101112131415161718192021222324
  1. _base_ = './detr_r50_8xb2-150e_coco.py'
  2. # learning policy
  3. max_epochs = 500
  4. train_cfg = dict(
  5. type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=10)
  6. param_scheduler = [
  7. dict(
  8. type='MultiStepLR',
  9. begin=0,
  10. end=max_epochs,
  11. by_epoch=True,
  12. milestones=[334],
  13. gamma=0.1)
  14. ]
  15. # only keep latest 2 checkpoints
  16. default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
  17. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  18. # USER SHOULD NOT CHANGE ITS VALUES.
  19. # base_batch_size = (8 GPUs) x (2 samples per GPU)
  20. auto_scale_lr = dict(base_batch_size=16)