download_dataset.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. import argparse
  2. import tarfile
  3. from itertools import repeat
  4. from multiprocessing.pool import ThreadPool
  5. from pathlib import Path
  6. from tarfile import TarFile
  7. from zipfile import ZipFile
  8. import torch
  9. from mmengine.utils.path import mkdir_or_exist
  10. def parse_args():
  11. parser = argparse.ArgumentParser(
  12. description='Download datasets for training')
  13. parser.add_argument(
  14. '--dataset-name', type=str, help='dataset name', default='coco2017')
  15. parser.add_argument(
  16. '--save-dir',
  17. type=str,
  18. help='the dir to save dataset',
  19. default='data/coco')
  20. parser.add_argument(
  21. '--unzip',
  22. action='store_true',
  23. help='whether unzip dataset or not, zipped files will be saved')
  24. parser.add_argument(
  25. '--delete',
  26. action='store_true',
  27. help='delete the download zipped files')
  28. parser.add_argument(
  29. '--threads', type=int, help='number of threading', default=4)
  30. args = parser.parse_args()
  31. return args
  32. def download(url, dir, unzip=True, delete=False, threads=1):
  33. def download_one(url, dir):
  34. f = dir / Path(url).name
  35. if Path(url).is_file():
  36. Path(url).rename(f)
  37. elif not f.exists():
  38. print(f'Downloading {url} to {f}')
  39. torch.hub.download_url_to_file(url, f, progress=True)
  40. if unzip and f.suffix in ('.zip', '.tar'):
  41. print(f'Unzipping {f.name}')
  42. if f.suffix == '.zip':
  43. ZipFile(f).extractall(path=dir)
  44. elif f.suffix == '.tar':
  45. TarFile(f).extractall(path=dir)
  46. if delete:
  47. f.unlink()
  48. print(f'Delete {f}')
  49. dir = Path(dir)
  50. if threads > 1:
  51. pool = ThreadPool(threads)
  52. pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
  53. pool.close()
  54. pool.join()
  55. else:
  56. for u in [url] if isinstance(url, (str, Path)) else url:
  57. download_one(u, dir)
  58. def download_objects365v2(url, dir, unzip=True, delete=False, threads=1):
  59. def download_single(url, dir):
  60. if 'train' in url:
  61. saving_dir = dir / Path('train_zip')
  62. mkdir_or_exist(saving_dir)
  63. f = saving_dir / Path(url).name
  64. unzip_dir = dir / Path('train')
  65. mkdir_or_exist(unzip_dir)
  66. elif 'val' in url:
  67. saving_dir = dir / Path('val')
  68. mkdir_or_exist(saving_dir)
  69. f = saving_dir / Path(url).name
  70. unzip_dir = dir / Path('val')
  71. mkdir_or_exist(unzip_dir)
  72. else:
  73. raise NotImplementedError
  74. if Path(url).is_file():
  75. Path(url).rename(f)
  76. elif not f.exists():
  77. print(f'Downloading {url} to {f}')
  78. torch.hub.download_url_to_file(url, f, progress=True)
  79. if unzip and str(f).endswith('.tar.gz'):
  80. print(f'Unzipping {f.name}')
  81. tar = tarfile.open(f)
  82. tar.extractall(path=unzip_dir)
  83. if delete:
  84. f.unlink()
  85. print(f'Delete {f}')
  86. # process annotations
  87. full_url = []
  88. for _url in url:
  89. if 'zhiyuan_objv2_train.tar.gz' in _url or \
  90. 'zhiyuan_objv2_val.json' in _url:
  91. full_url.append(_url)
  92. elif 'train' in _url:
  93. for i in range(51):
  94. full_url.append(f'{_url}patch{i}.tar.gz')
  95. elif 'val/images/v1' in _url:
  96. for i in range(16):
  97. full_url.append(f'{_url}patch{i}.tar.gz')
  98. elif 'val/images/v2' in _url:
  99. for i in range(16, 44):
  100. full_url.append(f'{_url}patch{i}.tar.gz')
  101. else:
  102. raise NotImplementedError
  103. dir = Path(dir)
  104. if threads > 1:
  105. pool = ThreadPool(threads)
  106. pool.imap(lambda x: download_single(*x), zip(full_url, repeat(dir)))
  107. pool.close()
  108. pool.join()
  109. else:
  110. for u in full_url:
  111. download_single(u, dir)
  112. def main():
  113. args = parse_args()
  114. path = Path(args.save_dir)
  115. if not path.exists():
  116. path.mkdir(parents=True, exist_ok=True)
  117. data2url = dict(
  118. # TODO: Support for downloading Panoptic Segmentation of COCO
  119. coco2017=[
  120. 'http://images.cocodataset.org/zips/train2017.zip',
  121. 'http://images.cocodataset.org/zips/val2017.zip',
  122. 'http://images.cocodataset.org/zips/test2017.zip',
  123. 'http://images.cocodataset.org/zips/unlabeled2017.zip',
  124. 'http://images.cocodataset.org/annotations/annotations_trainval2017.zip', # noqa
  125. 'http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', # noqa
  126. 'http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip', # noqa
  127. 'http://images.cocodataset.org/annotations/image_info_test2017.zip', # noqa
  128. 'http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip', # noqa
  129. ],
  130. lvis=[
  131. 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
  132. 'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
  133. ],
  134. voc2007=[
  135. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
  136. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
  137. 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
  138. ],
  139. # Note: There is no download link for Objects365-V1 right now. If you
  140. # would like to download Objects365-V1, please visit
  141. # http://www.objects365.org/ to concat the author.
  142. objects365v2=[
  143. # training annotations
  144. 'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/zhiyuan_objv2_train.tar.gz', # noqa
  145. # validation annotations
  146. 'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/zhiyuan_objv2_val.json', # noqa
  147. # training url root
  148. 'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/', # noqa
  149. # validation url root_1
  150. 'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v1/', # noqa
  151. # validation url root_2
  152. 'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v2/' # noqa
  153. ])
  154. url = data2url.get(args.dataset_name, None)
  155. if url is None:
  156. print('Only support COCO, VOC, LVIS, and Objects365v2 now!')
  157. return
  158. if args.dataset_name == 'objects365v2':
  159. download_objects365v2(
  160. url,
  161. dir=path,
  162. unzip=args.unzip,
  163. delete=args.delete,
  164. threads=args.threads)
  165. else:
  166. download(
  167. url,
  168. dir=path,
  169. unzip=args.unzip,
  170. delete=args.delete,
  171. threads=args.threads)
  172. if __name__ == '__main__':
  173. main()