Update ParsingGait (#160)
* Update ParsingGait * Clear up the confusion Clear up the confusion about gait3d and gait3d-parsing. * Update 0.get_started.md * Add BaseParsingCuttingTransform * Update gcn.py * Create gaitbase_gait3d_parsing_btz32x2_fixed.yaml * Add gait3d_parsing config file * Update 1.model_zoo.md Update Gait3D-Parsing checkpoints * Update 1.model_zoo.md add configuration * Update 1.model_zoo.md center text --------- Co-authored-by: Junhao Liang <43094337+darkliang@users.noreply.github.com>
This commit is contained in:
@@ -0,0 +1,43 @@
|
||||
# Gait3D-Parsing
|
||||
This is the pre-processing instructions for the Gait3D-Parsing dataset. The original dataset can be found [here](https://gait3d.github.io/gait3d-parsing-hp/). The original dataset is not publicly available. You need to request access to the dataset in order to download it. This README explains how to extract the original dataset and convert it to a format suitable for OpenGait.
|
||||
## Data Preparation
|
||||
https://github.com/Gait3D/Gait3D-Benchmark#data-preparation
|
||||
## Data Pretreatment
|
||||
```python
|
||||
python datasets/Gait3D-Parsing/pretreatment_gps.py -i 'Gait3D/2D_Parsings' -o 'Gait3D-pars-64-64-pkl' -r 64 -p
|
||||
```
|
||||
|
||||
## Train
|
||||
### ParsingGait model:
|
||||
`CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --master_port 12345 --nproc_per_node=4 opengait/main.py --cfgs ./configs/parsinggait/parsinggait_gait3d_parsing.yaml --phase train`
|
||||
|
||||
## Citation
|
||||
If you use this dataset in your research, please cite the following paper:
|
||||
```
|
||||
@inproceedings{zheng2023parsinggait,
|
||||
title={Parsing is All You Need for Accurate Gait Recognition in the Wild},
|
||||
author={Jinkai Zheng, Xinchen Liu, Shuai Wang, Lihao Wang, Chenggang Yan, Wu Liu},
|
||||
booktitle={ACM International Conference on Multimedia (ACM MM)},
|
||||
year={2023}
|
||||
}
|
||||
|
||||
@inproceedings{zheng2022gait3d,
|
||||
title={Gait Recognition in the Wild with Dense 3D Representations and A Benchmark},
|
||||
author={Jinkai Zheng, Xinchen Liu, Wu Liu, Lingxiao He, Chenggang Yan, Tao Mei},
|
||||
booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
|
||||
year={2022}
|
||||
}
|
||||
```
|
||||
If you think the re-implementation of OpenGait is useful, please cite the following paper:
|
||||
```
|
||||
@misc{fan2022opengait,
|
||||
title={OpenGait: Revisiting Gait Recognition Toward Better Practicality},
|
||||
author={Chao Fan and Junhao Liang and Chuanfu Shen and Saihui Hou and Yongzhen Huang and Shiqi Yu},
|
||||
year={2022},
|
||||
eprint={2211.06597},
|
||||
archivePrefix={arXiv},
|
||||
primaryClass={cs.CV}
|
||||
}
|
||||
```
|
||||
## Acknowledgements
|
||||
This dataset was collected by the [Zheng at. al.](https://gait3d.github.io/). The pre-processing instructions are modified from (https://github.com/Gait3D/Gait3D-Benchmark).
|
||||
@@ -0,0 +1,164 @@
|
||||
# This source is based on https://github.com/AbnerHqC/GaitSet/blob/master/pretreatment.py
|
||||
import argparse
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbose: bool = False, parsing: bool = False, dataset='CASIAB') -> None:
|
||||
"""Reads a group of images and saves the data in pickle format.
|
||||
|
||||
Args:
|
||||
img_groups (Tuple): Tuple of (sid, seq, view) and list of image paths.
|
||||
output_path (Path): Output path.
|
||||
img_size (int, optional): Image resizing size. Defaults to 64.
|
||||
verbose (bool, optional): Display debug info. Defaults to False.
|
||||
"""
|
||||
sinfo = img_groups[0]
|
||||
img_paths = img_groups[1]
|
||||
to_pickle = []
|
||||
for img_file in sorted(img_paths):
|
||||
if verbose:
|
||||
logging.debug(f'Reading sid {sinfo[0]}, seq {sinfo[1]}, view {sinfo[2]} from {img_file}')
|
||||
|
||||
img = cv2.imread(str(img_file), cv2.IMREAD_GRAYSCALE)
|
||||
|
||||
if dataset == 'GREW':
|
||||
to_pickle.append(img.astype('uint8'))
|
||||
continue
|
||||
|
||||
if parsing:
|
||||
img_sil = (img>0).astype('uint8') * 255
|
||||
else:
|
||||
img_sil = img
|
||||
if img_sil.sum() <= 10000:
|
||||
if verbose:
|
||||
logging.debug(f'Image sum: {img_sil.sum()}')
|
||||
logging.warning(f'{img_file} has no data.')
|
||||
continue
|
||||
|
||||
# Get the upper and lower points
|
||||
y_sum = img_sil.sum(axis=1)
|
||||
y_top = (y_sum != 0).argmax(axis=0)
|
||||
y_btm = (y_sum != 0).cumsum(axis=0).argmax(axis=0)
|
||||
img = img[y_top: y_btm + 1, :]
|
||||
img_sil = img_sil[y_top: y_btm + 1, :]
|
||||
|
||||
# As the height of a person is larger than the width,
|
||||
# use the height to calculate resize ratio.
|
||||
ratio = img.shape[1] / img.shape[0]
|
||||
ratio_sil = img_sil.shape[1] / img_sil.shape[0]
|
||||
assert ratio == ratio_sil
|
||||
if parsing:
|
||||
img = cv2.resize(img, (int(img_size * ratio), img_size), interpolation=cv2.INTER_NEAREST)
|
||||
img_sil = cv2.resize(img_sil, (int(img_size * ratio), img_size), interpolation=cv2.INTER_NEAREST)
|
||||
else:
|
||||
img = cv2.resize(img, (int(img_size * ratio), img_size), interpolation=cv2.INTER_CUBIC)
|
||||
img_sil = cv2.resize(img_sil, (int(img_size * ratio), img_size), interpolation=cv2.INTER_CUBIC)
|
||||
|
||||
# Get the median of the x-axis and take it as the person's x-center.
|
||||
x_csum = img_sil.sum(axis=0).cumsum()
|
||||
x_center = None
|
||||
for idx, csum in enumerate(x_csum):
|
||||
if csum > img_sil.sum() / 2:
|
||||
x_center = idx
|
||||
break
|
||||
|
||||
if not x_center:
|
||||
logging.warning(f'{img_file} has no center.')
|
||||
continue
|
||||
|
||||
# Get the left and right points
|
||||
half_width = img_size // 2
|
||||
left = x_center - half_width
|
||||
right = x_center + half_width
|
||||
if left <= 0 or right >= img.shape[1]:
|
||||
left += half_width
|
||||
right += half_width
|
||||
_ = np.zeros((img.shape[0], half_width))
|
||||
img = np.concatenate([_, img, _], axis=1)
|
||||
|
||||
to_pickle.append(img[:, left: right].astype('uint8'))
|
||||
|
||||
if to_pickle:
|
||||
to_pickle = np.asarray(to_pickle)
|
||||
dst_path = os.path.join(output_path, *sinfo)
|
||||
# print(img_paths[0].as_posix().split('/'),img_paths[0].as_posix().split('/')[-5])
|
||||
# dst_path = os.path.join(output_path, img_paths[0].as_posix().split('/')[-5], *sinfo) if dataset == 'GREW' else dst
|
||||
os.makedirs(dst_path, exist_ok=True)
|
||||
pkl_path = os.path.join(dst_path, f'{sinfo[2]}.pkl')
|
||||
if verbose:
|
||||
logging.debug(f'Saving {pkl_path}...')
|
||||
pickle.dump(to_pickle, open(pkl_path, 'wb'))
|
||||
logging.info(f'Saved {len(to_pickle)} valid frames to {pkl_path}.')
|
||||
|
||||
|
||||
if len(to_pickle) < 5:
|
||||
logging.warning(f'{sinfo} has less than 5 valid data.')
|
||||
|
||||
|
||||
|
||||
def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: int = 4, verbose: bool = False, parsing: bool = False, dataset: str = 'CASIAB') -> None:
|
||||
"""Reads a dataset and saves the data in pickle format.
|
||||
|
||||
Args:
|
||||
input_path (Path): Dataset root path.
|
||||
output_path (Path): Output path.
|
||||
img_size (int, optional): Image resizing size. Defaults to 64.
|
||||
workers (int, optional): Number of thread workers. Defaults to 4.
|
||||
verbose (bool, optional): Display debug info. Defaults to False.
|
||||
"""
|
||||
img_groups = defaultdict(list)
|
||||
logging.info(f'Listing {input_path}')
|
||||
total_files = 0
|
||||
for img_path in input_path.rglob('*.png'):
|
||||
if 'gei.png' in img_path.as_posix():
|
||||
continue
|
||||
if verbose:
|
||||
logging.debug(f'Adding {img_path}')
|
||||
*_, sid, seq, view, _ = img_path.as_posix().split('/')
|
||||
img_groups[(sid, seq, view)].append(img_path)
|
||||
total_files += 1
|
||||
|
||||
logging.info(f'Total files listed: {total_files}')
|
||||
|
||||
progress = tqdm(total=len(img_groups), desc='Pretreating', unit='folder')
|
||||
|
||||
with mp.Pool(workers) as pool:
|
||||
logging.info(f'Start pretreating {input_path}')
|
||||
for _ in pool.imap_unordered(partial(imgs2pickle, output_path=output_path, img_size=img_size, verbose=verbose, parsing=parsing, dataset=dataset), img_groups.items()):
|
||||
progress.update(1)
|
||||
logging.info('Done')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='OpenGait dataset pretreatment module.')
|
||||
parser.add_argument('-i', '--input_path', default='', type=str, help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='', type=str, help='Output path of pickled dataset.')
|
||||
parser.add_argument('-l', '--log_file', default='./pretreatment.log', type=str, help='Log file path. Default: ./pretreatment.log')
|
||||
parser.add_argument('-n', '--n_workers', default=4, type=int, help='Number of thread workers. Default: 4')
|
||||
parser.add_argument('-r', '--img_size', default=64, type=int, help='Image resizing size. Default 64')
|
||||
parser.add_argument('-d', '--dataset', default='CASIAB', type=str, help='Dataset for pretreatment.')
|
||||
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Display debug info.')
|
||||
parser.add_argument('-p', '--parsing', default=False, action='store_true', help='Display debug info.')
|
||||
args = parser.parse_args()
|
||||
|
||||
logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format='[%(asctime)s - %(levelname)s]: %(message)s')
|
||||
|
||||
if args.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
logging.info('Verbose mode is on.')
|
||||
for k, v in args.__dict__.items():
|
||||
logging.debug(f'{k}: {v}')
|
||||
|
||||
print(f"parsing: {args.parsing}")
|
||||
pretreat(input_path=Path(args.input_path), output_path=Path(args.output_path), img_size=args.img_size, workers=args.n_workers, verbose=args.verbose, parsing=args.parsing, dataset=args.dataset)
|
||||
@@ -5,7 +5,7 @@ https://github.com/Gait3D/Gait3D-Benchmark#data-preparation
|
||||
## Data Pretreatment
|
||||
```python
|
||||
python datasets/pretreatment.py --input_path 'Gait3D/2D_Silhouettes' --output_path 'Gait3D-sils-64-64-pkl'
|
||||
python datasets/pretreatment_smpl.py --input_path 'Gait3D/3D_SMPLs' --output_path 'Gait3D-smpls-pkl'
|
||||
python datasets/Gait3D/pretreatment_smpl.py --input_path 'Gait3D/3D_SMPLs' --output_path 'Gait3D-smpls-pkl'
|
||||
|
||||
(optional) python datasets/pretreatment.py --input_path 'Gait3D/2D_Silhouettes' --img_size 128 --output_path 'Gait3D-sils-128-128-pkl'
|
||||
|
||||
|
||||
@@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Author : jinkai Zheng
|
||||
date: 2021/10/30
|
||||
E-mail: zhengjinkai3@qq.com
|
||||
"""
|
||||
|
||||
|
||||
import os.path as osp
|
||||
import time
|
||||
import os
|
||||
import threading
|
||||
import itertools
|
||||
import numpy as np
|
||||
import pickle
|
||||
import argparse
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Test')
|
||||
parser.add_argument('-i', '--input_path', default='', type=str,
|
||||
help='Root path of raw dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='', type=str,
|
||||
help='Root path for output.')
|
||||
opt = parser.parse_args()
|
||||
|
||||
|
||||
def get_pickle(thread_id, id_list, save_dir):
|
||||
for id in sorted(id_list):
|
||||
print(f"Process threadID-PID: {thread_id}-{id}")
|
||||
cam_list = os.listdir(osp.join(data_dir, id))
|
||||
cam_list.sort()
|
||||
for cam in cam_list:
|
||||
seq_list = os.listdir(osp.join(data_dir, id, cam))
|
||||
seq_list.sort()
|
||||
for seq in seq_list:
|
||||
npz_list = os.listdir(osp.join(data_dir, id, cam, seq))
|
||||
npz_list.sort()
|
||||
smpl_paras_fras = []
|
||||
for npz in npz_list:
|
||||
npz_path = osp.join(data_dir, id, cam, seq, npz)
|
||||
frame = np.load(npz_path, allow_pickle=True)['results'][()][0]
|
||||
smpl_cam = frame['cam'] # 3-D
|
||||
smpl_pose = frame['poses'] # 72-D
|
||||
smpl_shape = frame['betas'] # 10-D
|
||||
smpl_paras = np.concatenate((smpl_cam, smpl_pose, smpl_shape), 0)
|
||||
smpl_paras_fras.append(smpl_paras)
|
||||
smpl_paras_fras = np.asarray(smpl_paras_fras)
|
||||
|
||||
out_dir = osp.join(save_dir, id, cam, seq)
|
||||
os.makedirs(out_dir)
|
||||
smpl_paras_fras_pkl = os.path.join(out_dir, '{}.pkl'.format(seq))
|
||||
pickle.dump(smpl_paras_fras, open(smpl_paras_fras_pkl, 'wb'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
data_dir = opt.input_path
|
||||
|
||||
save_dir = opt.output_path
|
||||
|
||||
start_time = time.time()
|
||||
maxnum_thread = 8
|
||||
|
||||
all_ids = sorted(os.listdir(data_dir))
|
||||
num_ids = len(all_ids)
|
||||
|
||||
proces = []
|
||||
for thread_id in range(maxnum_thread):
|
||||
indices = itertools.islice(range(num_ids), thread_id, num_ids, maxnum_thread)
|
||||
id_list = [all_ids[i] for i in indices]
|
||||
thread_func = threading.Thread(target=get_pickle, args=(thread_id, id_list, save_dir))
|
||||
|
||||
thread_func.start()
|
||||
proces.append(thread_func)
|
||||
|
||||
for proc in proces:
|
||||
proc.join()
|
||||
|
||||
time_elapsed = time.time() - start_time
|
||||
print('Training complete in {:.0f}h {:.0f}m {:.0f}s'.format(
|
||||
time_elapsed // 3600,
|
||||
(time_elapsed - (time_elapsed // 3600) * 3600) // 60,
|
||||
time_elapsed % 60))
|
||||
Reference in New Issue
Block a user