123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596 |
- _base_ = [
- '../_base_/models/mask-rcnn_r50_fpn.py',
- '../_base_/datasets/coco_instance.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
- ]
- # TODO: delete custom_imports after mmcls supports auto import
- # please install mmcls>=1.0
- # import mmcls.models to trigger register_module in mmcls
- custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
- checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
- model = dict(
- backbone=dict(
- _delete_=True,
- type='mmcls.ConvNeXt',
- arch='tiny',
- out_indices=[0, 1, 2, 3],
- drop_path_rate=0.4,
- layer_scale_init_value=1.0,
- gap_before_final_norm=False,
- init_cfg=dict(
- type='Pretrained', checkpoint=checkpoint_file,
- prefix='backbone.')),
- neck=dict(in_channels=[96, 192, 384, 768]))
- # augmentation strategy originates from DETR / Sparse RCNN
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='RandomFlip', prob=0.5),
- dict(
- type='RandomChoice',
- transforms=[[
- dict(
- type='RandomChoiceResize',
- scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
- (736, 1333), (768, 1333), (800, 1333)],
- keep_ratio=True)
- ],
- [
- dict(
- type='RandomChoiceResize',
- scales=[(400, 1333), (500, 1333), (600, 1333)],
- keep_ratio=True),
- dict(
- type='RandomCrop',
- crop_type='absolute_range',
- crop_size=(384, 600),
- allow_negative_crop=True),
- dict(
- type='RandomChoiceResize',
- scales=[(480, 1333), (512, 1333), (544, 1333),
- (576, 1333), (608, 1333), (640, 1333),
- (672, 1333), (704, 1333), (736, 1333),
- (768, 1333), (800, 1333)],
- keep_ratio=True)
- ]]),
- dict(type='PackDetInputs')
- ]
- train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
- max_epochs = 36
- train_cfg = dict(max_epochs=max_epochs)
- # learning rate
- param_scheduler = [
- dict(
- type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
- end=1000),
- dict(
- type='MultiStepLR',
- begin=0,
- end=max_epochs,
- by_epoch=True,
- milestones=[27, 33],
- gamma=0.1)
- ]
- # Enable automatic-mixed-precision training with AmpOptimWrapper.
- optim_wrapper = dict(
- type='AmpOptimWrapper',
- constructor='LearningRateDecayOptimizerConstructor',
- paramwise_cfg={
- 'decay_rate': 0.95,
- 'decay_type': 'layer_wise',
- 'num_layers': 6
- },
- optimizer=dict(
- _delete_=True,
- type='AdamW',
- lr=0.0001,
- betas=(0.9, 0.999),
- weight_decay=0.05,
- ))
|