123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170 |
- _base_ = [
- '../_base_/datasets/coco_instance.py', '../_base_/default_runtime.py'
- ]
- img_norm_cfg = dict(
- mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True)
- # model settings
- input_size = 550
- model = dict(
- type='YOLACT',
- data_preprocessor=dict(
- type='DetDataPreprocessor',
- mean=img_norm_cfg['mean'],
- std=img_norm_cfg['std'],
- bgr_to_rgb=img_norm_cfg['to_rgb'],
- pad_mask=True),
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=-1, # do not freeze stem
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=False, # update the statistics of bn
- zero_init_residual=False,
- style='pytorch',
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- start_level=1,
- add_extra_convs='on_input',
- num_outs=5,
- upsample_cfg=dict(mode='bilinear')),
- bbox_head=dict(
- type='YOLACTHead',
- num_classes=80,
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type='AnchorGenerator',
- octave_base_scale=3,
- scales_per_octave=1,
- base_sizes=[8, 16, 32, 64, 128],
- ratios=[0.5, 1.0, 2.0],
- strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
- centers=[(550 * 0.5 / x, 550 * 0.5 / x)
- for x in [69, 35, 18, 9, 5]]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- reduction='none',
- loss_weight=1.0),
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
- num_head_convs=1,
- num_protos=32,
- use_ohem=True),
- mask_head=dict(
- type='YOLACTProtonet',
- in_channels=256,
- num_protos=32,
- num_classes=80,
- max_masks_to_train=100,
- loss_mask_weight=6.125,
- with_seg_branch=True,
- loss_segm=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
- # training and testing settings
- train_cfg=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.4,
- min_pos_iou=0.,
- ignore_iof_thr=-1,
- gt_max_assign_all=False),
- sampler=dict(type='PseudoSampler'), # YOLACT should use PseudoSampler
- # smoothl1_beta=1.,
- allowed_border=-1,
- pos_weight=-1,
- neg_pos_ratio=3,
- debug=False),
- test_cfg=dict(
- nms_pre=1000,
- min_bbox_size=0,
- score_thr=0.05,
- mask_thr=0.5,
- iou_thr=0.5,
- top_k=200,
- max_per_img=100,
- mask_thr_binary=0.5))
- # dataset settings
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)),
- dict(
- type='Expand',
- mean=img_norm_cfg['mean'],
- to_rgb=img_norm_cfg['to_rgb'],
- ratio_range=(1, 4)),
- dict(
- type='MinIoURandomCrop',
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
- min_crop_size=0.3),
- dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
- dict(type='RandomFlip', prob=0.5),
- dict(
- type='PhotoMetricDistortion',
- brightness_delta=32,
- contrast_range=(0.5, 1.5),
- saturation_range=(0.5, 1.5),
- hue_delta=18),
- dict(type='PackDetInputs')
- ]
- test_pipeline = [
- dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
- dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
- ]
- train_dataloader = dict(
- batch_size=8,
- num_workers=4,
- batch_sampler=None,
- dataset=dict(pipeline=train_pipeline))
- val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
- test_dataloader = val_dataloader
- max_epochs = 55
- # training schedule for 55e
- train_cfg = dict(
- type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
- val_cfg = dict(type='ValLoop')
- test_cfg = dict(type='TestLoop')
- # learning rate
- param_scheduler = [
- dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=500),
- dict(
- type='MultiStepLR',
- begin=0,
- end=max_epochs,
- by_epoch=True,
- milestones=[20, 42, 49, 52],
- gamma=0.1)
- ]
- # optimizer
- optim_wrapper = dict(
- type='OptimWrapper',
- optimizer=dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4))
- custom_hooks = [
- dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
- ]
- env_cfg = dict(cudnn_benchmark=True)
- # NOTE: `auto_scale_lr` is for automatically scaling LR,
- # USER SHOULD NOT CHANGE ITS VALUES.
- # base_batch_size = (1 GPUs) x (8 samples per GPU)
- auto_scale_lr = dict(base_batch_size=8)
|