faster-rcnn_r50_fpn_1x_cityscapes.py 1.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. _base_ = [
  2. '../_base_/models/faster-rcnn_r50_fpn.py',
  3. '../_base_/datasets/cityscapes_detection.py',
  4. '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
  5. ]
  6. model = dict(
  7. backbone=dict(init_cfg=None),
  8. roi_head=dict(
  9. bbox_head=dict(
  10. num_classes=8,
  11. loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
  12. # optimizer
  13. # lr is set for a batch size of 8
  14. optim_wrapper = dict(optimizer=dict(lr=0.01))
  15. # learning rate
  16. param_scheduler = [
  17. dict(
  18. type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
  19. dict(
  20. type='MultiStepLR',
  21. begin=0,
  22. end=8,
  23. by_epoch=True,
  24. # [7] yields higher performance than [6]
  25. milestones=[7],
  26. gamma=0.1)
  27. ]
  28. # actual epoch = 8 * 8 = 64
  29. train_cfg = dict(max_epochs=8)
  30. # For better, more stable performance initialize from COCO
  31. load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
  32. # NOTE: `auto_scale_lr` is for automatically scaling LR,
  33. # USER SHOULD NOT CHANGE ITS VALUES.
  34. # base_batch_size = (8 GPUs) x (1 samples per GPU)
  35. # TODO: support auto scaling lr
  36. # auto_scale_lr = dict(base_batch_size=8)