1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071 |
- _base_ = '../common/lsj-200e_coco-instance.py'
- image_size = (1024, 1024)
- batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
- # model settings
- model = dict(
- type='SOLO',
- data_preprocessor=dict(
- type='DetDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_size_divisor=32,
- batch_augments=batch_augments),
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
- style='pytorch'),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- start_level=0,
- num_outs=5),
- mask_head=dict(
- type='SOLOHead',
- num_classes=80,
- in_channels=256,
- stacked_convs=7,
- feat_channels=256,
- strides=[8, 8, 16, 32, 32],
- scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)),
- pos_scale=0.2,
- num_grids=[40, 36, 24, 16, 12],
- cls_down_index=0,
- loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
- loss_cls=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)),
- # model training and testing settings
- test_cfg=dict(
- nms_pre=500,
- score_thr=0.1,
- mask_thr=0.5,
- filter_thr=0.05,
- kernel='gaussian', # gaussian/linear
- sigma=2.0,
- max_per_img=100))
- train_dataloader = dict(batch_size=8, num_workers=4)
- # Enable automatic-mixed-precision training with AmpOptimWrapper.
- optim_wrapper = dict(
- type='AmpOptimWrapper',
- optimizer=dict(
- type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
- clip_grad=dict(max_norm=35, norm_type=2))
- # NOTE: `auto_scale_lr` is for automatically scaling LR,
- # USER SHOULD NOT CHANGE ITS VALUES.
- # base_batch_size = (8 GPUs) x (8 samples per GPU)
- auto_scale_lr = dict(base_batch_size=64)
|