coco_detection.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. # dataset settings
  2. dataset_type = 'CocoDataset'
  3. data_root = 'data/coco/'
  4. # Example to use different file client
  5. # Method 1: simply set the data root and let the file I/O module
  6. # automatically infer from prefix (not support LMDB and Memcache yet)
  7. # data_root = 's3://openmmlab/datasets/detection/coco/'
  8. # Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
  9. # backend_args = dict(
  10. # backend='petrel',
  11. # path_mapping=dict({
  12. # './data/': 's3://openmmlab/datasets/detection/',
  13. # 'data/': 's3://openmmlab/datasets/detection/'
  14. # }))
  15. backend_args = None
  16. train_pipeline = [
  17. dict(type='LoadImageFromFile', backend_args=backend_args),
  18. dict(type='LoadAnnotations', with_bbox=True),
  19. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  20. dict(type='RandomFlip', prob=0.5),
  21. dict(type='PackDetInputs')
  22. ]
  23. test_pipeline = [
  24. dict(type='LoadImageFromFile', backend_args=backend_args),
  25. dict(type='Resize', scale=(1333, 800), keep_ratio=True),
  26. # If you don't have a gt annotation, delete the pipeline
  27. dict(type='LoadAnnotations', with_bbox=True),
  28. dict(
  29. type='PackDetInputs',
  30. meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
  31. 'scale_factor'))
  32. ]
  33. train_dataloader = dict(
  34. batch_size=2,
  35. num_workers=2,
  36. persistent_workers=True,
  37. sampler=dict(type='DefaultSampler', shuffle=True),
  38. batch_sampler=dict(type='AspectRatioBatchSampler'),
  39. dataset=dict(
  40. type=dataset_type,
  41. data_root=data_root,
  42. ann_file='annotations/instances_train2017.json',
  43. data_prefix=dict(img='train2017/'),
  44. filter_cfg=dict(filter_empty_gt=True, min_size=32),
  45. pipeline=train_pipeline,
  46. backend_args=backend_args))
  47. val_dataloader = dict(
  48. batch_size=1,
  49. num_workers=2,
  50. persistent_workers=True,
  51. drop_last=False,
  52. sampler=dict(type='DefaultSampler', shuffle=False),
  53. dataset=dict(
  54. type=dataset_type,
  55. data_root=data_root,
  56. ann_file='annotations/instances_val2017.json',
  57. data_prefix=dict(img='val2017/'),
  58. test_mode=True,
  59. pipeline=test_pipeline,
  60. backend_args=backend_args))
  61. test_dataloader = val_dataloader
  62. val_evaluator = dict(
  63. type='CocoMetric',
  64. ann_file=data_root + 'annotations/instances_val2017.json',
  65. metric='bbox',
  66. format_only=False,
  67. backend_args=backend_args)
  68. test_evaluator = val_evaluator
  69. # inference on test dataset and
  70. # format the output results for submission.
  71. # test_dataloader = dict(
  72. # batch_size=1,
  73. # num_workers=2,
  74. # persistent_workers=True,
  75. # drop_last=False,
  76. # sampler=dict(type='DefaultSampler', shuffle=False),
  77. # dataset=dict(
  78. # type=dataset_type,
  79. # data_root=data_root,
  80. # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
  81. # data_prefix=dict(img='test2017/'),
  82. # test_mode=True,
  83. # pipeline=test_pipeline))
  84. # test_evaluator = dict(
  85. # type='CocoMetric',
  86. # metric='bbox',
  87. # format_only=True,
  88. # ann_file=data_root + 'annotations/image_info_test-dev2017.json',
  89. # outfile_prefix='./work_dirs/coco_detection/test')