12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273 |
- # dataset settings
- dataset_type = 'WIDERFaceDataset'
- data_root = 'data/WIDERFace/'
- # Example to use different file client
- # Method 1: simply set the data root and let the file I/O module
- # automatically infer from prefix (not support LMDB and Memcache yet)
- # data_root = 's3://openmmlab/datasets/detection/cityscapes/'
- # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
- # backend_args = dict(
- # backend='petrel',
- # path_mapping=dict({
- # './data/': 's3://openmmlab/datasets/detection/',
- # 'data/': 's3://openmmlab/datasets/detection/'
- # }))
- backend_args = None
- img_scale = (640, 640) # VGA resolution
- train_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(type='Resize', scale=img_scale, keep_ratio=True),
- dict(type='RandomFlip', prob=0.5),
- dict(type='PackDetInputs')
- ]
- test_pipeline = [
- dict(type='LoadImageFromFile', backend_args=backend_args),
- dict(type='Resize', scale=img_scale, keep_ratio=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
- ]
- train_dataloader = dict(
- batch_size=2,
- num_workers=2,
- persistent_workers=True,
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=True),
- batch_sampler=dict(type='AspectRatioBatchSampler'),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='train.txt',
- data_prefix=dict(img='WIDER_train'),
- filter_cfg=dict(filter_empty_gt=True, bbox_min_size=17, min_size=32),
- pipeline=train_pipeline))
- val_dataloader = dict(
- batch_size=1,
- num_workers=2,
- persistent_workers=True,
- drop_last=False,
- sampler=dict(type='DefaultSampler', shuffle=False),
- dataset=dict(
- type=dataset_type,
- data_root=data_root,
- ann_file='val.txt',
- data_prefix=dict(img='WIDER_val'),
- test_mode=True,
- pipeline=test_pipeline))
- test_dataloader = val_dataloader
- val_evaluator = dict(
- # TODO: support WiderFace-Evaluation for easy, medium, hard cases
- type='VOCMetric',
- metric='mAP',
- eval_mode='11points')
- test_evaluator = val_evaluator
|