Support skeleton (#155)
* pose * pose * pose * pose * 你的提交消息 * pose * pose * Delete train1.sh * pretreatment * configs * pose * reference * Update gaittr.py * naming * naming * Update transform.py * update for datasets * update README * update name and README * update * Update transform.py
This commit is contained in:
+11
-2
@@ -24,15 +24,24 @@ ls *.tgz | xargs -n1 tar xzvf
|
||||
|
||||
After unpacking these compressed files, run this command:
|
||||
|
||||
Step2 : To rearrange directory of GREW dataset, turning to id-type-view structure, Run
|
||||
Step2-1 : To rearrange directory of GREW dataset(for silhouette), turning to id-type-view structure, Run
|
||||
```
|
||||
python datasets/GREW/rearrange_GREW.py --input_path Path_of_GREW-raw --output_path Path_of_GREW-rearranged
|
||||
```
|
||||
Step2-2 : To rearrange directory of GREW dataset(for pose), turning to id-type-view structure, Run
|
||||
```
|
||||
python datasets/GREW/rearrange_GREW_pose.py --input_path Path_of_GREW-pose --output_path Path_of_GREW-pose-rearranged
|
||||
```
|
||||
|
||||
Step3: Transforming images to pickle file, run
|
||||
Step3-1: Transforming images to pickle file, run
|
||||
```
|
||||
python datasets/pretreatment.py --input_path Path_of_GREW-rearranged --output_path Path_of_GREW-pkl --dataset GREW
|
||||
```
|
||||
Step3-2: Transforming pose txts to pickle file, run
|
||||
```
|
||||
python datasets/pretreatment.py --input_path Path_of_GREW-pose-rearranged --output_path Path_of_GREW-pose-pkl --pose --dataset GREW
|
||||
```
|
||||
|
||||
Then you will see the structure like:
|
||||
|
||||
- Processed
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
TOTAL_Test = 24000
|
||||
TOTAL_Train = 20000
|
||||
|
||||
def rearrange_train(train_path: Path, output_path: Path) -> None:
|
||||
progress = tqdm(total=TOTAL_Train)
|
||||
for sid in train_path.iterdir():
|
||||
if not sid.is_dir():
|
||||
continue
|
||||
for sub_seq in sid.iterdir():
|
||||
if not sub_seq.is_dir():
|
||||
continue
|
||||
for subfile in os.listdir(sub_seq):
|
||||
src = os.path.join(train_path, sid.name, sub_seq.name)
|
||||
dst = os.path.join(output_path, sid.name+'train', '00', sub_seq.name)
|
||||
os.makedirs(dst,exist_ok=True)
|
||||
if subfile not in os.listdir(dst) and subfile.endswith('_2d_pose.txt'):
|
||||
pose_subfile = 'pose_'+subfile
|
||||
os.symlink(os.path.join(src, subfile),
|
||||
os.path.join(dst, pose_subfile))
|
||||
progress.update(1)
|
||||
|
||||
def rearrange_test(test_path: Path, output_path: Path) -> None:
|
||||
# for gallery
|
||||
gallery = Path(os.path.join(test_path, 'gallery'))
|
||||
probe = Path(os.path.join(test_path, 'probe'))
|
||||
progress = tqdm(total=TOTAL_Test)
|
||||
for sid in gallery.iterdir():
|
||||
if not sid.is_dir():
|
||||
continue
|
||||
cnt = 1
|
||||
for sub_seq in sid.iterdir():
|
||||
if not sub_seq.is_dir():
|
||||
continue
|
||||
for subfile in sorted(os.listdir(sub_seq)):
|
||||
src = os.path.join(gallery, sid.name, sub_seq.name)
|
||||
dst = os.path.join(output_path, sid.name, '%02d'%cnt, sub_seq.name)
|
||||
os.makedirs(dst,exist_ok=True)
|
||||
if subfile not in os.listdir(dst) and subfile.endswith('_2d_pose.txt'):
|
||||
pose_subfile = 'pose_'+subfile
|
||||
os.symlink(os.path.join(src, subfile),
|
||||
os.path.join(dst, pose_subfile))
|
||||
cnt += 1
|
||||
progress.update(1)
|
||||
# for probe
|
||||
for sub_seq in probe.iterdir():
|
||||
if not sub_seq.is_dir():
|
||||
continue
|
||||
for subfile in os.listdir(sub_seq):
|
||||
src = os.path.join(probe, sub_seq.name)
|
||||
dst = os.path.join(output_path, 'probe', '03', sub_seq.name)
|
||||
os.makedirs(dst,exist_ok=True)
|
||||
if subfile not in os.listdir(dst) and subfile.endswith('_2d_pose.txt'):
|
||||
pose_subfile = 'pose_'+subfile
|
||||
os.symlink(os.path.join(src, subfile),
|
||||
os.path.join(dst, pose_subfile))
|
||||
progress.update(1)
|
||||
|
||||
def rearrange_GREW(input_path: Path, output_path: Path) -> None:
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
for folder in input_path.iterdir():
|
||||
if not folder.is_dir():
|
||||
continue
|
||||
|
||||
print(f'Rearranging {folder}')
|
||||
if folder.name == 'train':
|
||||
rearrange_train(folder,output_path)
|
||||
if folder.name == 'test':
|
||||
rearrange_test(folder, output_path)
|
||||
if folder.name == 'distractor':
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='GREW rearrange tool')
|
||||
parser.add_argument('-i', '--input_path', required=True, type=str,
|
||||
help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='GREW_rearranged', type=str,
|
||||
help='Root path for output.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
input_path = Path(args.input_path).resolve()
|
||||
output_path = Path(args.output_path).resolve()
|
||||
rearrange_GREW(input_path, output_path)
|
||||
@@ -0,0 +1,19 @@
|
||||
# Datasets for MSGG
|
||||
MSGG needs to convert the pose key format of other datasets(such as CASIA-B, GREW, Gait3D,) from coco17 to the input format of Pyramid keys.
|
||||
|
||||
## Data Pretreatment
|
||||
```python
|
||||
python datasets/MSGG/pyramid_keypoints_msgg.py --input_path Path_of_pose_pkl --output_path Path_of_pose_pyramid_pkl
|
||||
```
|
||||
|
||||
## Citation
|
||||
```
|
||||
@article{peng2023learning,
|
||||
title={Learning rich features for gait recognition by integrating skeletons and silhouettes},
|
||||
author={Peng, Yunjie and Ma, Kang and Zhang, Yang and He, Zhiqiang},
|
||||
journal={Multimedia Tools and Applications},
|
||||
pages={1--22},
|
||||
year={2023},
|
||||
publisher={Springer}
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,94 @@
|
||||
import os
|
||||
import os.path as osp
|
||||
import numpy as np
|
||||
import pdb
|
||||
import argparse
|
||||
import pickle
|
||||
|
||||
ORG_KEYPOINTS = {
|
||||
'nose' :0,
|
||||
'left_eye' :1,
|
||||
'right_eye' :2,
|
||||
'left_ear' :3,
|
||||
'right_ear' :4,
|
||||
'left_shoulder' :5,
|
||||
'right_shoulder':6,
|
||||
'left_elbow' :7,
|
||||
'right_elbow' :8,
|
||||
'left_wrist' :9,
|
||||
'right_wrist' :10,
|
||||
'left_hip' :11,
|
||||
'right_hip' :12,
|
||||
'left_knee' :13,
|
||||
'right_knee' :14,
|
||||
'left_ankle' :15,
|
||||
'right_ankle' :16,
|
||||
}
|
||||
|
||||
NEW_KEYPOINTS = {
|
||||
0: 'right_shoulder',
|
||||
1: 'right_elbow',
|
||||
2: 'right_knee',
|
||||
3: 'right_hip',
|
||||
4: 'left_elbow',
|
||||
5: 'left_knee',
|
||||
6: 'left_shoulder',
|
||||
7: 'right_wrist',
|
||||
8: 'right_ankle',
|
||||
9: 'left_hip',
|
||||
10: 'left_wrist',
|
||||
11: 'left_ankle',
|
||||
}
|
||||
|
||||
def get_index_mapping():
|
||||
index_mapping = {}
|
||||
for _key in NEW_KEYPOINTS.keys():
|
||||
map_index = ORG_KEYPOINTS[NEW_KEYPOINTS[_key]]
|
||||
index_mapping[_key] = map_index
|
||||
return index_mapping
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='OpenGait dataset pretreatment module.')
|
||||
parser.add_argument('-i', '--input_path', default='', type=str, help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='', type=str, help='Output path of pickled dataset.')
|
||||
args = parser.parse_args()
|
||||
|
||||
index_mapping = get_index_mapping()
|
||||
data_path = args.input_path
|
||||
des_path = args.output_path
|
||||
|
||||
id_list = sorted(os.listdir(data_path))
|
||||
for _id in id_list:
|
||||
type_list = sorted(os.listdir(osp.join(data_path, _id)))
|
||||
for _type in type_list:
|
||||
view_list = sorted(os.listdir(osp.join(data_path, _id, _type)))
|
||||
for _view in view_list:
|
||||
seq_info = [_id, _type, _view]
|
||||
seq_info_str = '-'.join(seq_info)
|
||||
seq_dir = osp.join(data_path, *seq_info)
|
||||
des_dir = osp.join(des_path, *seq_info)
|
||||
if osp.exists(des_dir) is False:
|
||||
os.makedirs(des_dir)
|
||||
|
||||
keypoints_list = os.listdir(seq_dir)
|
||||
pkl_name = "{}.pkl".format(_view)
|
||||
seq_path = osp.join(seq_dir, pkl_name)
|
||||
save_path = osp.join(des_dir, pkl_name)
|
||||
seq_path_exists = osp.exists(seq_path)
|
||||
|
||||
if seq_path_exists is False:
|
||||
print("seq:{} input:{}. ".format(seq_info_str, seq_path_exists))
|
||||
continue
|
||||
with open(seq_path, 'rb') as f:
|
||||
keypoints_data = pickle.load(f)
|
||||
to_pickle = []
|
||||
for keypoint in keypoints_data:
|
||||
mapped_keypoints = np.zeros((12, 3))
|
||||
for i in range(mapped_keypoints.shape[0]):
|
||||
mapped_keypoints[i] = keypoint[index_mapping[i]]
|
||||
to_pickle.append(mapped_keypoints)
|
||||
keypoints = np.stack(to_pickle)
|
||||
pickle.dump(keypoints, open(save_path, 'wb'))
|
||||
|
||||
print("FINISHED: " + "-".join(seq_info))
|
||||
|
||||
@@ -35,15 +35,27 @@ python datasets/OUMVLP/extractor.py --input_path Path_of_OUMVLP-base --output_pa
|
||||
......
|
||||
......
|
||||
```
|
||||
Step3 : To rearrange directory of OUMVLP dataset, turning to id-type-view structure, Run
|
||||
Step3-1 : To rearrange directory of OUMVLP dataset(for silhouette), turning to id-type-view structure, Run
|
||||
```
|
||||
python datasets/OUMVLP/rearrange_OUMVLP.py --input_path Path_of_OUMVLP-raw --output_path Path_of_OUMVLP-rearranged
|
||||
```
|
||||
Step3-2 : To rearrange directory of OUMVLP dataset(for pose), turning to id-type-view structure, Run
|
||||
```
|
||||
python datasets/OUMVLP/rearrange_OUMVLP_pose.py --input_path Path_of_OUMVLP-pose --output_path Path_of_OUMVLP-pose-rearranged
|
||||
```
|
||||
|
||||
Step4: Transforming images to pickle file, run
|
||||
Step4-1: Transforming images to pickle file, run
|
||||
```
|
||||
python datasets/pretreatment.py --input_path Path_of_OUMVLP-rearranged --output_path Path_of_OUMVLP-pkl
|
||||
```
|
||||
Step4-2: Transforming pose txts to pickle file, run
|
||||
```
|
||||
python datasets/pretreatment.py --input_path Path_of_GREW-pose-rearranged --output_path Path_of_GREW-pose-pkl --pose --dataset GREW
|
||||
```
|
||||
gernerate the 17 Number of Pose Points Format from 18 Number of Pose Points
|
||||
```
|
||||
python datasets/OUMVLP/rearrange_OUMVLP_pose.py --input_path Path_of_OUMVLP-pose18 --output_path Path_of_OUMVLP-pose17
|
||||
```
|
||||
|
||||
- Processed
|
||||
```
|
||||
|
||||
@@ -0,0 +1,85 @@
|
||||
import pickle
|
||||
from tqdm import tqdm
|
||||
from pathlib import Path
|
||||
import os
|
||||
import os.path as osp
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
'''
|
||||
gernerate the 17 Number of Pose Points Format from 18 Number of Pose Points
|
||||
OUMVLP 17
|
||||
# keypoints = {
|
||||
# 0: "nose",
|
||||
# 1: "left_eye",
|
||||
# 2: "right_eye",
|
||||
# 3: "left_ear",
|
||||
# 4: "right_ear",
|
||||
# 5: "left_shoulder",
|
||||
# 6: "right_shoulder",
|
||||
# 7: "left_elbow",
|
||||
# 8: "right_elbow",
|
||||
# 9: "left_wrist",
|
||||
# 10: "right_wrist",
|
||||
# 11: "left_hip",
|
||||
# 12: "right_hip",
|
||||
# 13: "left_knee",
|
||||
# 14: "right_knee",
|
||||
# 15: "left_ankle",
|
||||
# 16: "right_ankle"
|
||||
# }
|
||||
OUMVLP 18
|
||||
mask=[0,15,14,17,16,5,2,6,3,7,4,11,8,12,9,13,10]
|
||||
# keypoints = {
|
||||
# 0: "nose",
|
||||
# 1: "neck",
|
||||
# 2: "Rshoulder",
|
||||
# 3: "Relbow",
|
||||
# 4: "Rwrist",
|
||||
# 5: "Lshoudler",
|
||||
# 6: "Lelbow",
|
||||
# 7: "Lwrist",
|
||||
# 8: "Rhip",
|
||||
# 9: "Rknee",
|
||||
# 10: "Rankle",
|
||||
# 11: "Lhip",
|
||||
# 12: "Lknee",
|
||||
# 13: "Lankle",
|
||||
# 14: "Reye",
|
||||
# 15: "Leye",
|
||||
# 16: "Rear",
|
||||
# 17: "Lear"
|
||||
# }
|
||||
'''
|
||||
|
||||
def ToOUMVLP17(input_path: Path, output_path: Path):
|
||||
mask=[0,15,14,17,16,5,2,6,3,7,4,11,8,12,9,13,10]
|
||||
TOTAL_SUBJECTS = 10307
|
||||
progress = tqdm(total=TOTAL_SUBJECTS)
|
||||
|
||||
for subject in input_path.iterdir():
|
||||
output_subject = subject.name
|
||||
for seq in subject.iterdir():
|
||||
output_seq = seq.name
|
||||
for view in seq.iterdir():
|
||||
src = os.path.join(view, f"{view.name}.pkl")
|
||||
dst = os.path.join(output_path, output_subject, output_seq, view.name)
|
||||
os.makedirs(dst, exist_ok=True)
|
||||
with open(src,'rb') as f:
|
||||
srcdata = pickle.load(f)
|
||||
#[T,18,3]
|
||||
data = srcdata[...,mask,:].copy()
|
||||
# #[T,17,3]
|
||||
pkl_path = os.path.join(dst,f'{view.name}.pkl')
|
||||
pickle.dump(data,open(pkl_path,'wb'))
|
||||
progress.update(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='OpenGait dataset pretreatment module.')
|
||||
parser.add_argument('-i', '--input_path', default='', type=str, help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='', type=str, help='Output path of pickled dataset.')
|
||||
parser.add_argument('-l', '--log_to_file', default='./pretreatment.log', type=str, help='Log file path. Default: ./pretreatment.log')
|
||||
args = parser.parse_args()
|
||||
logging.info('Begin')
|
||||
ToOUMVLP17(input_path=Path(args.input_path), output_path=Path(args.output_path))
|
||||
logging.info('Done')
|
||||
@@ -0,0 +1,44 @@
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
TOTAL_SUBJECTS = 10307
|
||||
|
||||
|
||||
def sanitize(name: str) -> Tuple[str, str]:
|
||||
return name.split('_')
|
||||
|
||||
|
||||
def rearrange(input_path: Path, output_path: Path) -> None:
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
progress = tqdm(total=TOTAL_SUBJECTS)
|
||||
for folder in input_path.iterdir():
|
||||
subject = folder.name
|
||||
for sid in folder.iterdir():
|
||||
view, seq = sanitize(sid.name)
|
||||
src = os.path.join(input_path, subject,sid.name)
|
||||
dst = os.path.join(output_path, subject, seq, view)
|
||||
os.makedirs(dst, exist_ok=True)
|
||||
for subfile in os.listdir(src):
|
||||
if subfile not in os.listdir(dst) and subfile.endswith('.json'):
|
||||
os.symlink(os.path.join(src, subfile),
|
||||
os.path.join(dst, subfile))
|
||||
progress.update(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='OUMVLP rearrange tool')
|
||||
parser.add_argument('-i', '--input_path', required=True, type=str,
|
||||
help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='OUMVLP_rearranged', type=str,
|
||||
help='Root path for output.')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
input_path = Path(args.input_path).resolve()
|
||||
output_path = Path(args.output_path).resolve()
|
||||
rearrange(input_path, output_path)
|
||||
@@ -12,7 +12,7 @@ from typing import Tuple
|
||||
import cv2
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
import json
|
||||
|
||||
def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbose: bool = False, dataset='CASIAB') -> None:
|
||||
"""Reads a group of images and saves the data in pickle format.
|
||||
@@ -127,6 +127,92 @@ def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: i
|
||||
progress.update(1)
|
||||
logging.info('Done')
|
||||
|
||||
def txts2pickle(txt_groups: Tuple, output_path: Path, verbose: bool = False, dataset='CASIAB') -> None:
|
||||
"""
|
||||
Reads a group of images and saves the data in pickle format.
|
||||
|
||||
Args:
|
||||
img_groups (Tuple): Tuple of (sid, seq, view) and list of image paths.
|
||||
output_path (Path): Output path.
|
||||
img_size (int, optional): Image resizing size. Defaults to 64.
|
||||
verbose (bool, optional): Display debug info. Defaults to False.
|
||||
"""
|
||||
|
||||
sinfo = txt_groups[0]
|
||||
txt_paths = txt_groups[1]
|
||||
to_pickle = []
|
||||
if dataset == 'OUMVLP':
|
||||
for txt_file in sorted(txt_paths):
|
||||
try:
|
||||
with open(txt_file) as f:
|
||||
jsondata = json.load(f)
|
||||
if len(jsondata['people'])==0:
|
||||
continue
|
||||
data = np.array(jsondata["people"][0]["pose_keypoints_2d"]).reshape(-1,3)
|
||||
to_pickle.append(data)
|
||||
except:
|
||||
print(txt_file)
|
||||
else:
|
||||
for txt_file in sorted(txt_paths):
|
||||
if verbose:
|
||||
logging.debug(f'Reading sid {sinfo[0]}, seq {sinfo[1]}, view {sinfo[2]} from {txt_file}')
|
||||
data = np.genfromtxt(txt_file, delimiter=',')[2:].reshape(-1,3)
|
||||
to_pickle.append(data)
|
||||
|
||||
if to_pickle:
|
||||
dst_path = os.path.join(output_path, *sinfo)
|
||||
keypoints = np.stack(to_pickle)
|
||||
os.makedirs(dst_path, exist_ok=True)
|
||||
pkl_path = os.path.join(dst_path, f'{sinfo[2]}.pkl')
|
||||
if verbose:
|
||||
logging.debug(f'Saving {pkl_path}...')
|
||||
pickle.dump(keypoints, open(pkl_path, 'wb'))
|
||||
logging.info(f'Saved {len(to_pickle)} valid frames\' keypoints to {pkl_path}.')
|
||||
|
||||
if len(to_pickle) < 5:
|
||||
logging.warning(f'{sinfo} has less than 5 valid data.')
|
||||
|
||||
|
||||
|
||||
def pretreat_pose(input_path: Path, output_path: Path, workers: int = 4, verbose: bool = False, dataset='CASIAB') -> None:
|
||||
"""Reads a dataset and saves the data in pickle format.
|
||||
|
||||
Args:
|
||||
input_path (Path): Dataset root path.
|
||||
output_path (Path): Output path.
|
||||
img_size (int, optional): Image resizing size. Defaults to 64.
|
||||
workers (int, optional): Number of thread workers. Defaults to 4.
|
||||
verbose (bool, optional): Display debug info. Defaults to False.
|
||||
"""
|
||||
txt_groups = defaultdict(list)
|
||||
logging.info(f'Listing {input_path}')
|
||||
total_files = 0
|
||||
if dataset == 'OUMVLP':
|
||||
for json_path in input_path.rglob('*.json'):
|
||||
if verbose:
|
||||
logging.debug(f'Adding {json_path}')
|
||||
*_, sid, seq, view, _ = json_path.as_posix().split('/')
|
||||
txt_groups[(sid, seq, view)].append(json_path)
|
||||
total_files += 1
|
||||
else:
|
||||
for txt_path in input_path.rglob('*.txt'):
|
||||
if verbose:
|
||||
logging.debug(f'Adding {txt_path}')
|
||||
*_, sid, seq, view, _ = txt_path.as_posix().split('/')
|
||||
txt_groups[(sid, seq, view)].append(txt_path)
|
||||
total_files += 1
|
||||
|
||||
logging.info(f'Total files listed: {total_files}')
|
||||
|
||||
progress = tqdm(total=len(txt_groups), desc='Pretreating', unit='folder')
|
||||
|
||||
with mp.Pool(workers) as pool:
|
||||
logging.info(f'Start pretreating {input_path}')
|
||||
for _ in pool.imap_unordered(partial(txts2pickle, output_path=output_path, verbose=verbose, dataset=args.dataset), txt_groups.items()):
|
||||
progress.update(1)
|
||||
logging.info('Done')
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='OpenGait dataset pretreatment module.')
|
||||
@@ -137,6 +223,7 @@ if __name__ == '__main__':
|
||||
parser.add_argument('-r', '--img_size', default=64, type=int, help='Image resizing size. Default 64')
|
||||
parser.add_argument('-d', '--dataset', default='CASIAB', type=str, help='Dataset for pretreatment.')
|
||||
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Display debug info.')
|
||||
parser.add_argument('-p', '--pose', default=False, action='store_true', help='Processing pose.')
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format='[%(asctime)s - %(levelname)s]: %(message)s')
|
||||
@@ -146,5 +233,7 @@ if __name__ == '__main__':
|
||||
logging.info('Verbose mode is on.')
|
||||
for k, v in args.__dict__.items():
|
||||
logging.debug(f'{k}: {v}')
|
||||
|
||||
pretreat(input_path=Path(args.input_path), output_path=Path(args.output_path), img_size=args.img_size, workers=args.n_workers, verbose=args.verbose, dataset=args.dataset)
|
||||
if args.pose:
|
||||
pretreat_pose(input_path=Path(args.input_path), output_path=Path(args.output_path), workers=args.n_workers, verbose=args.verbose, dataset=args.dataset)
|
||||
else:
|
||||
pretreat(input_path=Path(args.input_path), output_path=Path(args.output_path), img_size=args.img_size, workers=args.n_workers, verbose=args.verbose, dataset=args.dataset)
|
||||
|
||||
Reference in New Issue
Block a user