123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178 |
- # dataset settings
- dataset_type = 'CocoDataset'
- data_root = 'data/coco/'
- # Example to use different file client
- # Method 1: simply set the data root and let the file I/O module
- # automatically infer from prefix (not support LMDB and Memcache yet)
- # data_root = 's3://openmmlab/datasets/detection/coco/'
- # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
- # backend_args = dict(
- # backend='petrel',
- # path_mapping=dict({
- # './data/': 's3://openmmlab/datasets/detection/',
- # 'data/': 's3://openmmlab/datasets/detection/'
- # }))
- backend_args = None
- color_space = [
- [dict(type='ColorTransform')],
- [dict(type='AutoContrast')],
- [dict(type='Equalize')],
- [dict(type='Sharpness')],
- [dict(type='Posterize')],
- [dict(type='Solarize')],
- [dict(type='Color')],
- [dict(type='Contrast')],
- [dict(type='Brightness')],
- ]
- geometric = [
- [dict(type='Rotate')],
- [dict(type='ShearX')],
- [dict(type='ShearY')],
- [dict(type='TranslateX')],
- [dict(type='TranslateY')],
- ]
- scale = [(1333, 400), (1333, 1200)]
- branch_field = ['sup', 'unsup_teacher', 'unsup_student']
- # pipeline used to augment labeled data,
- # which will be sent to student model for supervised training.
- sup_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='RandomResize', scale=scale, keep_ratio=True),
- dict(type='RandomFlip', prob=0.5),
- dict(type='RandAugment', aug_space=color_space, aug_num=1),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
- dict(
- type='MultiBranch',
- branch_field=branch_field,
- sup=dict(type='PackDetInputs'))
- ]
- # pipeline used to augment unlabeled data weakly,
- # which will be sent to teacher model for predicting pseudo instances.
- weak_pipeline = [
- dict(type='RandomResize', scale=scale, keep_ratio=True),
- dict(type='RandomFlip', prob=0.5),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor', 'flip', 'flip_direction',
- 'homography_matrix')),
- ]
- # pipeline used to augment unlabeled data strongly,
- # which will be sent to student model for unsupervised training.
- strong_pipeline = [
- dict(type='RandomResize', scale=scale, keep_ratio=True),
- dict(type='RandomFlip', prob=0.5),
- dict(
- type='RandomOrder',
- transforms=[
- dict(type='RandAugment', aug_space=color_space, aug_num=1),
- dict(type='RandAugment', aug_space=geometric, aug_num=1),
- ]),
- dict(type='RandomErasing', n_patches=(1, 5), ratio=(0, 0.2)),
- dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor', 'flip', 'flip_direction',
- 'homography_matrix')),
- ]
- # pipeline used to augment unlabeled data into different views
- unsup_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='LoadEmptyAnnotations'),
- dict(
- type='MultiBranch',
- branch_field=branch_field,
- unsup_teacher=weak_pipeline,
- unsup_student=strong_pipeline,
- )
- ]
- test_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='Resize', scale=(1333, 800), keep_ratio=True),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
- ]
- batch_size = 5
- num_workers = 5
- # There are two common semi-supervised learning settings on the coco dataset:
- # (1) Divide the train2017 into labeled and unlabeled datasets
- # by a fixed percentage, such as 1%, 2%, 5% and 10%.
- # The format of labeled_ann_file and unlabeled_ann_file are
- # instances_train2017.{fold}@{percent}.json, and
- # instances_train2017.{fold}@{percent}-unlabeled.json
- # `fold` is used for cross-validation, and `percent` represents
- # the proportion of labeled data in the train2017.
- # (2) Choose the train2017 as the labeled dataset
- # and unlabeled2017 as the unlabeled dataset.
- # The labeled_ann_file and unlabeled_ann_file are
- # instances_train2017.json and image_info_unlabeled2017.json
- # We use this configuration by default.
- labeled_dataset = dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='annotations/instances_train2017.json',
- data_prefix=dict(img='train2017/'),
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=sup_pipeline,
- backend_args=backend_args)
- unlabeled_dataset = dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='annotations/instances_unlabeled2017.json',
- data_prefix=dict(img='unlabeled2017/'),
- filter_cfg=dict(filter_empty_gt=False),
- pipeline=unsup_pipeline,
- backend_args=backend_args)
- train_dataloader = dict(
- batch_size=batch_size,
- num_workers=num_workers,
- persistent_workers=True,
- sampler=dict(
- type='GroupMultiSourceSampler',
- batch_size=batch_size,
- source_ratio=[1, 4]),
- dataset=dict(
- type='ConcatDataset', datasets=[labeled_dataset, unlabeled_dataset]))
- val_dataloader = dict(
- batch_size=1,
- num_workers=2,
- persistent_workers=True,
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='annotations/instances_val2017.json',
- data_prefix=dict(img='val2017/'),
- test_mode=True,
- pipeline=test_pipeline,
- backend_args=backend_args))
- test_dataloader = val_dataloader
- val_evaluator = dict(
- type='CocoMetric',
- ann_file=data_root + 'annotations/instances_val2017.json',
- metric='bbox',
- format_only=False,
- backend_args=backend_args)
- test_evaluator = val_evaluator
|