cityscapes_instance.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. # dataset settings
  2. dataset_type = 'CityscapesDataset'
  3. data_root = 'data/cityscapes/'
  4. # Example to use different file client
  5. # Method 1: simply set the data root and let the file I/O module
  6. # automatically infer from prefix (not support LMDB and Memcache yet)
  7. # data_root = 's3://openmmlab/datasets/segmentation/cityscapes/'
  8. # Method 2: Use backend_args, file_client_args in versions before 3.0.0rc6
  9. # backend_args = dict(
  10. # backend='petrel',
  11. # path_mapping=dict({
  12. # './data/': 's3://openmmlab/datasets/segmentation/',
  13. # 'data/': 's3://openmmlab/datasets/segmentation/'
  14. # }))
  15. backend_args = None
  16. train_pipeline = [
  17. dict(type='LoadImageFromFile', backend_args=backend_args),
  18. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
  19. dict(
  20. type='RandomResize',
  21. scale=[(2048, 800), (2048, 1024)],
  22. keep_ratio=True),
  23. dict(type='RandomFlip', prob=0.5),
  24. dict(type='PackDetInputs')
  25. ]
  26. test_pipeline = [
  27. dict(type='LoadImageFromFile', backend_args=backend_args),
  28. dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
  29. # If you don't have a gt annotation, delete the pipeline
  30. dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
  31. dict(
  32. type='PackDetInputs',
  33. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  34. 'scale_factor'))
  35. ]
  36. train_dataloader = dict(
  37. batch_size=1,
  38. num_workers=2,
  39. persistent_workers=True,
  40. sampler=dict(type='DefaultSampler', shuffle=True),
  41. batch_sampler=dict(type='AspectRatioBatchSampler'),
  42. dataset=dict(
  43. type='RepeatDataset',
  44. times=8,
  45. dataset=dict(
  46. type=dataset_type,
  47. data_root=data_root,
  48. ann_file='annotations/instancesonly_filtered_gtFine_train.json',
  49. data_prefix=dict(img='leftImg8bit/train/'),
  50. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  51. pipeline=train_pipeline,
  52. backend_args=backend_args)))
  53. val_dataloader = dict(
  54. batch_size=1,
  55. num_workers=2,
  56. persistent_workers=True,
  57. drop_last=False,
  58. sampler=dict(type='DefaultSampler', shuffle=False),
  59. dataset=dict(
  60. type=dataset_type,
  61. data_root=data_root,
  62. ann_file='annotations/instancesonly_filtered_gtFine_val.json',
  63. data_prefix=dict(img='leftImg8bit/val/'),
  64. test_mode=True,
  65. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  66. pipeline=test_pipeline,
  67. backend_args=backend_args))
  68. test_dataloader = val_dataloader
  69. val_evaluator = [
  70. dict(
  71. type='CocoMetric',
  72. ann_file=data_root +
  73. 'annotations/instancesonly_filtered_gtFine_val.json',
  74. metric=['bbox', 'segm'],
  75. backend_args=backend_args),
  76. dict(
  77. type='CityScapesMetric',
  78. seg_prefix=data_root + 'gtFine/val',
  79. outfile_prefix='./work_dirs/cityscapes_metric/instance',
  80. backend_args=backend_args)
  81. ]
  82. test_evaluator = val_evaluator
  83. # inference on test dataset and
  84. # format the output results for submission.
  85. # test_dataloader = dict(
  86. # batch_size=1,
  87. # num_workers=2,
  88. # persistent_workers=True,
  89. # drop_last=False,
  90. # sampler=dict(type='DefaultSampler', shuffle=False),
  91. # dataset=dict(
  92. # type=dataset_type,
  93. # data_root=data_root,
  94. # ann_file='annotations/instancesonly_filtered_gtFine_test.json',
  95. # data_prefix=dict(img='leftImg8bit/test/'),
  96. # test_mode=True,
  97. # filter_cfg=dict(filter_empty_gt=True, min_size=32),
  98. # pipeline=test_pipeline))
  99. # test_evaluator = dict(
  100. # type='CityScapesMetric',
  101. # format_only=True,
  102. # outfile_prefix='./work_dirs/cityscapes_metric/test')