support CCPG

This commit is contained in:
darkliang
2023-06-27 21:25:38 +08:00
parent 9ff2520c05
commit 78171dba91
6 changed files with 625 additions and 2 deletions
+206
View File
@@ -0,0 +1,206 @@
{
"TRAIN_SET": [
"000",
"001",
"002",
"003",
"004",
"005",
"006",
"007",
"008",
"009",
"010",
"011",
"012",
"013",
"014",
"015",
"016",
"017",
"018",
"019",
"020",
"021",
"022",
"023",
"024",
"025",
"026",
"027",
"028",
"029",
"030",
"031",
"032",
"033",
"034",
"035",
"036",
"037",
"038",
"039",
"040",
"041",
"042",
"043",
"044",
"045",
"046",
"047",
"048",
"049",
"050",
"051",
"052",
"053",
"054",
"055",
"056",
"057",
"058",
"059",
"060",
"061",
"062",
"063",
"064",
"065",
"066",
"067",
"068",
"069",
"070",
"071",
"072",
"073",
"074",
"075",
"076",
"077",
"078",
"079",
"080",
"081",
"082",
"083",
"084",
"085",
"086",
"087",
"088",
"089",
"090",
"091",
"092",
"093",
"094",
"095",
"096",
"097",
"098",
"099"
],
"TEST_SET": [
"100",
"101",
"102",
"103",
"104",
"105",
"106",
"107",
"108",
"109",
"110",
"111",
"112",
"113",
"114",
"115",
"116",
"117",
"118",
"119",
"120",
"121",
"122",
"123",
"124",
"125",
"126",
"127",
"128",
"129",
"130",
"131",
"132",
"133",
"134",
"135",
"136",
"137",
"138",
"139",
"140",
"141",
"142",
"143",
"144",
"145",
"146",
"147",
"148",
"149",
"150",
"151",
"152",
"153",
"154",
"155",
"156",
"157",
"158",
"159",
"160",
"161",
"162",
"163",
"164",
"165",
"166",
"167",
"168",
"169",
"170",
"171",
"172",
"173",
"174",
"175",
"176",
"177",
"178",
"179",
"180",
"181",
"182",
"183",
"184",
"185",
"186",
"187",
"188",
"189",
"190",
"191",
"192",
"193",
"194",
"195",
"196",
"197",
"198",
"199"
]
}
+27
View File
@@ -0,0 +1,27 @@
# The CCPG Benchmark
A Cloth-Changing Benchmark for Person re-identification and Gait Recognition (CCPG).
The original dataset can be found [here](https://github.com/BNU-IVC/CCPG). The original dataset is not publicly available. You need to request access to the dataset in order to download it.
## Data Pretreatment
```python
python datasets/CCPG/organize_ccpg.py --sil_path 'CCPG/CCPG_D_MASK_FACE_SHOE' --rgb_path 'CCPG/CCPG_G_SIL' --output_path 'CCPG/CCPG-end2end-pkl'
```
## Train
### GatiBase model:
`CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs ./configs/gaitbase/gaitbase_ccpg.yaml --phase train`
## Citation
If you use this dataset in your research, please cite the following paper:
```
@InProceedings{Li_2023_CVPR,
author = {Li, Weijia and Hou, Saihui and Zhang, Chunjie and Cao, Chunshui and Liu, Xu and Huang, Yongzhen and Zhao, Yao},
title = {An In-Depth Exploration of Person Re-Identification and Gait Recognition in Cloth-Changing Conditions},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2023},
pages = {13824-13833}
}
```
+101
View File
@@ -0,0 +1,101 @@
import os
import pickle
import numpy as np
import cv2
from tqdm import tqdm
import argparse
T_W = 64
T_H = 64
def cut_img(img):
# A silhouette contains too little white pixels
# might be not valid for identification.
# Get the top and bottom point
y = img.sum(axis=1)
y_top = (y != 0).argmax(axis=0)
y_btm = (y != 0).cumsum(axis=0).argmax(axis=0)
img = img[y_top:y_btm + 1, :]
# As the height of a person is larger than the width,
# use the height to calculate resize ratio.
_r = img.shape[1] / img.shape[0]
_t_w = int(T_H * _r)
img = cv2.resize(img, (_t_w, T_H), interpolation=cv2.INTER_AREA)
# Get the median of x axis and regard it as the x center of the person.
sum_point = img.sum()
sum_column = img.sum(axis=0).cumsum()
x_center = -1
for i in range(sum_column.size):
if sum_column[i] > sum_point / 2:
x_center = i
break
if x_center < 0:
return None
h_T_W = int(T_W / 2)
left = x_center - h_T_W
right = x_center + h_T_W
if left <= 0 or right >= img.shape[1]:
left += h_T_W
right += h_T_W
_ = np.zeros((img.shape[0], h_T_W))
img = np.concatenate([_, img, _], axis=1)
img = img[:, left:right]
return img.astype('uint8')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CCPG dataset Preprocessing.')
parser.add_argument('--sil_path', default='', type=str,
help='Root path of raw silhouette dataset.')
parser.add_argument('--rgb_path', default='', type=str,
help='Root path of raw RGB dataset.')
parser.add_argument('-o', '--output_path', default='',
type=str, help='Output path of pickled dataset.')
args = parser.parse_args()
RGB_SIZE = (128, 128)
for _id in tqdm(sorted(os.listdir(args.sil_path))):
for _type in sorted(os.listdir(os.path.join(args.rgb_path, _id))):
for _view in sorted(os.listdir(os.path.join(args.rgb_path, _id, _type))):
imgs = []
segs = []
ratios = []
aligned_segs = []
for img_file in sorted(os.listdir(os.path.join(args.rgb_path, _id, _type, _view))):
seg_file = img_file.split(".")[0]+".png"
img_path = os.path.join(
args.rgb_path, _id, _type, _view, img_file)
seg_path = os.path.join(
args.rgb_path, _id, _type, _view, seg_file)
if not os.path.exists(seg_path):
print("Not Found: "+seg_path)
continue
img = cv2.imread(img_path)
seg = cv2.imread(seg_path, cv2.IMREAD_GRAYSCALE)
ratio = img.shape[1]/img.shape[0]
aligned_seg = cut_img(seg)
img = np.transpose(cv2.cvtColor(cv2.resize(
img, RGB_SIZE), cv2.COLOR_BGR2RGB), (2, 0, 1))
imgs.append(img)
segs.append(cv2.resize(
seg, RGB_SIZE))
aligned_segs.append(aligned_seg)
ratios.append(ratio)
if len(imgs) > 0:
output_path = os.path.join(
args.output_path, _id, _type, _view)
os.makedirs(output_path, exist_ok=True)
pickle.dump(np.asarray(imgs), open(os.path.join(
output_path, _view+"-rgbs.pkl"), "wb"))
pickle.dump(np.asarray(segs), open(os.path.join(
output_path, _view+"-sils.pkl"), "wb"))
pickle.dump(np.asarray(ratios), open(os.path.join(
output_path, _view+"-ratios.pkl"), "wb"))
pickle.dump(np.asarray(aligned_segs), open(os.path.join(
output_path, _view+"-aligned-sils.pkl"), "wb"))
else:
print("No imgs Found: " +
os.path.join(args.rgb_path, _id, _type, _view))
continue