123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125 |
- _base_ = '../_base_/default_runtime.py'
- # dataset settings
- dataset_type = 'CocoDataset'
- data_root = 'data/coco/'
- image_size = (1024, 1024)
- # Example to use different file client
- # Method 1: simply set the data root and let the file I/O module
- # automatically infer from prefix (not support LMDB and Memcache yet)
- # data_root = 's3://openmmlab/datasets/detection/coco/'
- # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
- # backend_args = dict(
- # backend='petrel',
- # path_mapping=dict({
- # './data/': 's3://openmmlab/datasets/detection/',
- # 'data/': 's3://openmmlab/datasets/detection/'
- # }))
- backend_args = None
- # Standard Scale Jittering (SSJ) resizes and crops an image
- # with a resize range of 0.8 to 1.25 of the original image size.
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='RandomResize',
- scale=image_size,
- ratio_range=(0.8, 1.25),
- keep_ratio=True),
- dict(
- type='RandomCrop',
- crop_type='absolute_range',
- crop_size=image_size,
- recompute_bbox=True,
- allow_negative_crop=True),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PackDetInputs')
- ]
- test_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='Resize', scale=(1333, 800), keep_ratio=True),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
- ]
- train_dataloader = dict(
- batch_size=2,
- num_workers=2,
- persistent_workers=True,
- sampler=dict(type='InfiniteSampler'),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='annotations/instances_train2017.json',
- data_prefix=dict(img='train2017/'),
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=train_pipeline,
- backend_args=backend_args))
- val_dataloader = dict(
- batch_size=1,
- num_workers=2,
- persistent_workers=True,
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='annotations/instances_val2017.json',
- data_prefix=dict(img='val2017/'),
- test_mode=True,
- pipeline=test_pipeline,
- backend_args=backend_args))
- test_dataloader = val_dataloader
- val_evaluator = dict(
- type='CocoMetric',
- ann_file=data_root + 'annotations/instances_val2017.json',
- metric=['bbox', 'segm'],
- format_only=False,
- backend_args=backend_args)
- test_evaluator = val_evaluator
- # The model is trained by 270k iterations with batch_size 64,
- # which is roughly equivalent to 144 epochs.
- max_iters = 270000
- train_cfg = dict(
- type='IterBasedTrainLoop', max_iters=max_iters, val_interval=10000)
- val_cfg = dict(type='ValLoop')
- test_cfg = dict(type='TestLoop')
- # optimizer assumes bs=64
- optim_wrapper = dict(
- type='OptimWrapper',
- optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
- # learning rate policy
- # lr steps at [0.9, 0.95, 0.975] of the maximum iterations
- param_scheduler = [
- dict(
- type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
- end=1000),
- dict(
- type='MultiStepLR',
- begin=0,
- end=270000,
- by_epoch=False,
- milestones=[243000, 256500, 263250],
- gamma=0.1)
- ]
- default_hooks = dict(checkpoint=dict(by_epoch=False, interval=10000))
- log_processor = dict(by_epoch=False)
- # NOTE: `auto_scale_lr` is for automatically scaling LR,
- # USER SHOULD NOT CHANGE ITS VALUES.
- # base_batch_size = (32 GPUs) x (2 samples per GPU)
- auto_scale_lr = dict(base_batch_size=64)
|