coco_instance_semantic.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. # dataset settings
  2. dataset_type = 'CocoDataset'
  3. data_root = 'data/coco/'
  4. # Example to use different file client
  5. # Method 1: simply set the data root and let the file I/O module
  6. # automatically infer from prefix (not support LMDB and Memcache yet)
  7. # data_root = 's3://openmmlab/datasets/detection/coco/'
  8. # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
  9. # backend_args = dict(
  10. # backend='petrel',
  11. # path_mapping=dict({
  12. # './data/': 's3://openmmlab/datasets/detection/',
  13. # 'data/': 's3://openmmlab/datasets/detection/'
  14. # }))
  15. backend_args = None
  16. train_pipeline = [
  17. dict(type='LoadImageFromFile', backend_args=backend_args),
  18. dict(
  19. type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
  20. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  21. dict(type='RandomFlip', prob=0.5),
  22. dict(type='PackDetInputs')
  23. ]
  24. test_pipeline = [
  25. dict(type='LoadImageFromFile', backend_args=backend_args),
  26. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  27. # If you don't have a gt annotation, delete the pipeline
  28. dict(
  29. type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
  30. dict(
  31. type='PackDetInputs',
  32. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  33. 'scale_factor'))
  34. ]
  35. train_dataloader = dict(
  36. batch_size=2,
  37. num_workers=2,
  38. persistent_workers=True,
  39. sampler=dict(type='DefaultSampler', shuffle=True),
  40. batch_sampler=dict(type='AspectRatioBatchSampler'),
  41. dataset=dict(
  42. type=dataset_type,
  43. data_root=data_root,
  44. ann_file='annotations/instances_train2017.json',
  45. data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
  46. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  47. pipeline=train_pipeline,
  48. backend_args=backend_args))
  49. val_dataloader = dict(
  50. batch_size=1,
  51. num_workers=2,
  52. persistent_workers=True,
  53. drop_last=False,
  54. sampler=dict(type='DefaultSampler', shuffle=False),
  55. dataset=dict(
  56. type=dataset_type,
  57. data_root=data_root,
  58. ann_file='annotations/instances_val2017.json',
  59. data_prefix=dict(img='val2017/'),
  60. test_mode=True,
  61. pipeline=test_pipeline,
  62. backend_args=backend_args))
  63. test_dataloader = val_dataloader
  64. val_evaluator = dict(
  65. type='CocoMetric',
  66. ann_file=data_root + 'annotations/instances_val2017.json',
  67. metric=['bbox', 'segm'],
  68. format_only=False,
  69. backend_args=backend_args)
  70. test_evaluator = val_evaluator