JeffreyJsam commited on
Commit
34441e2
·
verified ·
1 Parent(s): 9d49b0c

Update utils/download_swim.py

Browse files
Files changed (1) hide show
  1. utils/download_swim.py +2 -8
utils/download_swim.py CHANGED
@@ -1,23 +1,17 @@
1
  """
2
  download_swim.py
3
-
4
  Streams and downloads the full paired dataset (images + label txt files) from a Hugging Face Hub repository.
5
  It recursively processes all available chunk subfolders (e.g., '000', '001', ...) under given parent paths.
6
-
7
  Features:
8
  - Recursively discovers subdirs (chunks) using HfFileSystem
9
  - Optionally flattens the directory structure by removing the deepest chunk level
10
  - Saves each .png image with its corresponding .txt label
11
-
12
  Use this script if you want to download the complete dataset for model training or offline access.
13
-
14
  Usage:
15
  # Download all chunks (flattened)
16
  python utils/download_swim.py --output-dir ./SWiM --flatten
17
-
18
  # Download specific chunks
19
  python utils/download_swim.py --chunks 000 001 002 --flatten False
20
-
21
  Arguments:
22
  --repo-id Hugging Face dataset repository ID
23
  --images-parent Parent directory for image chunks (e.g., Baseline/images/train)
@@ -56,7 +50,7 @@ def sample_dataset(
56
  labels_parent: str,
57
  output_dir: str,
58
  # max_files: int = 500,
59
- flatten: bool = True,
60
  chunks: list = None
61
  ):
62
  total_downloaded = 0
@@ -126,7 +120,7 @@ def parse_args():
126
  parser.add_argument("--labels-parent", default="Baseline/labels", help="Parent directory for label chunks.")
127
  parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.")
128
  #parser.add_argument("--count", type=int, default=500, help="How many samples to download in total.")
129
- parser.add_argument("--flatten", default=True, type=bool, help="Save all samples in a single folder without subdirectories.")
130
  parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.")
131
  return parser.parse_args()
132
 
 
1
  """
2
  download_swim.py
 
3
  Streams and downloads the full paired dataset (images + label txt files) from a Hugging Face Hub repository.
4
  It recursively processes all available chunk subfolders (e.g., '000', '001', ...) under given parent paths.
 
5
  Features:
6
  - Recursively discovers subdirs (chunks) using HfFileSystem
7
  - Optionally flattens the directory structure by removing the deepest chunk level
8
  - Saves each .png image with its corresponding .txt label
 
9
  Use this script if you want to download the complete dataset for model training or offline access.
 
10
  Usage:
11
  # Download all chunks (flattened)
12
  python utils/download_swim.py --output-dir ./SWiM --flatten
 
13
  # Download specific chunks
14
  python utils/download_swim.py --chunks 000 001 002 --flatten False
 
15
  Arguments:
16
  --repo-id Hugging Face dataset repository ID
17
  --images-parent Parent directory for image chunks (e.g., Baseline/images/train)
 
50
  labels_parent: str,
51
  output_dir: str,
52
  # max_files: int = 500,
53
+ flatten: bool,
54
  chunks: list = None
55
  ):
56
  total_downloaded = 0
 
120
  parser.add_argument("--labels-parent", default="Baseline/labels", help="Parent directory for label chunks.")
121
  parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.")
122
  #parser.add_argument("--count", type=int, default=500, help="How many samples to download in total.")
123
+ parser.add_argument("--flatten", action='store_true', help="Save all samples in a single folder without subdirectories.")
124
  parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.")
125
  return parser.parse_args()
126