12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091 |
- _base_ = [
- 'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
- 'mmdet::_base_/datasets/coco_instance.py',
- 'mmdet::_base_/schedules/schedule_1x.py',
- 'mmdet::_base_/default_runtime.py'
- ]
- # please install the mmclassification dev-1.x branch
- # import mmcls.models to trigger register_module in mmcls
- custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
- checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
- image_size = (1024, 1024)
- model = dict(
- backbone=dict(
- _delete_=True,
- type='mmcls.ConvNeXt',
- arch='base',
- out_indices=[0, 1, 2, 3],
- # TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
- drop_path_rate=0.4,
- layer_scale_init_value=0., # disable layer scale when using GRN
- gap_before_final_norm=False,
- use_grn=True, # V2 uses GRN
- init_cfg=dict(
- type='Pretrained', checkpoint=checkpoint_file,
- prefix='backbone.')),
- neck=dict(in_channels=[128, 256, 512, 1024]),
- test_cfg=dict(
- rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
- rcnn=dict(nms=dict(type='soft_nms'))))
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='RandomResize',
- scale=image_size,
- ratio_range=(0.1, 2.0),
- keep_ratio=True),
- dict(
- type='RandomCrop',
- crop_type='absolute_range',
- crop_size=image_size,
- recompute_bbox=True,
- allow_negative_crop=True),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PackDetInputs')
- ]
- train_dataloader = dict(
- batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
- num_workers=8,
- dataset=dict(pipeline=train_pipeline))
- max_epochs = 36
- train_cfg = dict(max_epochs=max_epochs)
- # learning rate
- param_scheduler = [
- dict(
- type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
- end=1000),
- dict(
- type='MultiStepLR',
- begin=0,
- end=max_epochs,
- by_epoch=True,
- milestones=[27, 33],
- gamma=0.1)
- ]
- # Enable automatic-mixed-precision training with AmpOptimWrapper.
- optim_wrapper = dict(
- type='AmpOptimWrapper',
- constructor='LearningRateDecayOptimizerConstructor',
- paramwise_cfg={
- 'decay_rate': 0.95,
- 'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
- 'num_layers': 12
- },
- optimizer=dict(
- _delete_=True,
- type='AdamW',
- lr=0.0001,
- betas=(0.9, 0.999),
- weight_decay=0.05,
- ))
- default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|