123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475 |
- _base_ = '../common/lsj-200e_coco-detection.py'
- image_size = (1024, 1024)
- batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
- # model settings
- model = dict(
- type='FCOS',
- data_preprocessor=dict(
- type='DetDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_size_divisor=32,
- batch_augments=batch_augments),
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch',
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- start_level=1,
- add_extra_convs='on_output', # use P5
- num_outs=5,
- relu_before_extra_convs=True),
- bbox_head=dict(
- type='FCOSHead',
- num_classes=80,
- in_channels=256,
- stacked_convs=4,
- feat_channels=256,
- strides=[8, 16, 32, 64, 128],
- norm_on_bbox=True,
- centerness_on_reg=True,
- dcn_on_last_conv=False,
- center_sampling=True,
- conv_bias=True,
- loss_cls=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=1.0),
- loss_centerness=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
- # testing settings
- test_cfg=dict(
- nms_pre=1000,
- min_bbox_size=0,
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.6),
- max_per_img=100))
- train_dataloader = dict(batch_size=8, num_workers=4)
- # Enable automatic-mixed-precision training with AmpOptimWrapper.
- optim_wrapper = dict(
- type='AmpOptimWrapper',
- optimizer=dict(
- type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
- paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.),
- clip_grad=dict(max_norm=35, norm_type=2))
- # NOTE: `auto_scale_lr` is for automatically scaling LR,
- # USER SHOULD NOT CHANGE ITS VALUES.
- # base_batch_size = (8 GPUs) x (8 samples per GPU)
- auto_scale_lr = dict(base_batch_size=64)
|