mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. _base_ = ['./mask2former_swin-t-p4-w7-224_8xb2-lsj-50e_coco-panoptic.py']
  2. pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa
  3. depths = [2, 2, 18, 2]
  4. model = dict(
  5. backbone=dict(
  6. pretrain_img_size=384,
  7. embed_dims=128,
  8. depths=depths,
  9. num_heads=[4, 8, 16, 32],
  10. window_size=12,
  11. init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
  12. panoptic_head=dict(in_channels=[128, 256, 512, 1024]))
  13. # set all layers in backbone to lr_mult=0.1
  14. # set all norm layers, position_embeding,
  15. # query_embeding, level_embeding to decay_multi=0.0
  16. backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
  17. backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
  18. embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
  19. custom_keys = {
  20. 'backbone': dict(lr_mult=0.1, decay_mult=1.0),
  21. 'backbone.patch_embed.norm': backbone_norm_multi,
  22. 'backbone.norm': backbone_norm_multi,
  23. 'absolute_pos_embed': backbone_embed_multi,
  24. 'relative_position_bias_table': backbone_embed_multi,
  25. 'query_embed': embed_multi,
  26. 'query_feat': embed_multi,
  27. 'level_embed': embed_multi
  28. }
  29. custom_keys.update({
  30. f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
  31. for stage_id, num_blocks in enumerate(depths)
  32. for block_id in range(num_blocks)
  33. })
  34. custom_keys.update({
  35. f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
  36. for stage_id in range(len(depths) - 1)
  37. })
  38. # optimizer
  39. optim_wrapper = dict(
  40. paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))