add Gait3D support
This commit is contained in:
@@ -0,0 +1,104 @@
|
||||
data_cfg:
|
||||
dataset_name: Gait3D
|
||||
dataset_root: your_path
|
||||
dataset_partition: datasets/Gait3D/Gait3D.json
|
||||
data_in_use: [true, false]
|
||||
num_workers: 1
|
||||
remove_no_gallery: false # Remove probe if no gallery for it
|
||||
test_dataset_name: Gait3D
|
||||
|
||||
evaluator_cfg:
|
||||
enable_float16: true
|
||||
restore_ckpt_strict: true
|
||||
restore_hint: 60000
|
||||
save_name: Baseline
|
||||
eval_func: evaluate_Gait3D
|
||||
sampler:
|
||||
batch_shuffle: false
|
||||
batch_size: 16
|
||||
sample_type: all_ordered # all indicates whole sequence used to test, while ordered means input sequence by its natural order; Other options: fixed_unordered
|
||||
frames_all_limit: 720 # limit the number of sampled frames to prevent out of memory
|
||||
metric: euc # cos euc
|
||||
transform:
|
||||
- type: BaseSilCuttingTransform
|
||||
|
||||
loss_cfg:
|
||||
- loss_term_weight: 1.0
|
||||
margin: 0.2
|
||||
type: TripletLoss
|
||||
log_prefix: triplet
|
||||
- loss_term_weight: 0.1
|
||||
scale: 16
|
||||
type: CrossEntropyLoss
|
||||
log_prefix: softmax
|
||||
log_accuracy: true
|
||||
|
||||
model_cfg:
|
||||
model: Baseline
|
||||
backbone_cfg:
|
||||
in_channels: 1
|
||||
layers_cfg: # Layers configuration for automatically model construction
|
||||
- BC-64
|
||||
- BC-64
|
||||
- M
|
||||
- BC-128
|
||||
- BC-128
|
||||
- M
|
||||
- BC-256
|
||||
- BC-256
|
||||
# - M
|
||||
# - BC-512
|
||||
# - BC-512
|
||||
type: Plain
|
||||
SeparateFCs:
|
||||
in_channels: 256
|
||||
out_channels: 256
|
||||
parts_num: 31
|
||||
SeparateBNNecks:
|
||||
class_num: 3000
|
||||
in_channels: 256
|
||||
parts_num: 31
|
||||
bin_num:
|
||||
- 16
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
|
||||
optimizer_cfg:
|
||||
lr: 0.1
|
||||
momentum: 0.9
|
||||
solver: SGD
|
||||
weight_decay: 0.0005
|
||||
|
||||
scheduler_cfg:
|
||||
gamma: 0.1
|
||||
milestones: # Learning Rate Reduction at each milestones
|
||||
- 20000
|
||||
- 40000
|
||||
- 50000
|
||||
scheduler: MultiStepLR
|
||||
|
||||
trainer_cfg:
|
||||
enable_float16: true # half_percesion float for memory reduction and speedup
|
||||
fix_BN: false
|
||||
log_iter: 100
|
||||
with_test: 10000
|
||||
restore_ckpt_strict: true
|
||||
restore_hint: 0
|
||||
save_iter: 10000
|
||||
save_name: Baseline
|
||||
sync_BN: true
|
||||
total_iter: 60000
|
||||
sampler:
|
||||
batch_shuffle: true
|
||||
batch_size:
|
||||
- 32 # TripletSampler, batch_size[0] indicates Number of Identity
|
||||
- 4 # batch_size[1] indicates Samples sequqnce for each Identity
|
||||
frames_num_fixed: 30 # fixed frames number for training
|
||||
frames_num_max: 50 # max frames number for unfixed training
|
||||
frames_num_min: 25 # min frames number for unfixed traing
|
||||
sample_type: fixed_unordered # fixed control input frames number, unordered for controlling order of input tensor; Other options: unfixed_ordered or all_ordered
|
||||
type: TripletSampler
|
||||
transform:
|
||||
- type: BaseSilCuttingTransform
|
||||
@@ -0,0 +1,108 @@
|
||||
data_cfg:
|
||||
dataset_name: Gait3D
|
||||
dataset_root: your_path
|
||||
dataset_partition: datasets/Gait3D/Gait3D.json
|
||||
data_in_use: [true, true]
|
||||
num_workers: 1
|
||||
remove_no_gallery: false # Remove probe if no gallery for it
|
||||
test_dataset_name: Gait3D
|
||||
|
||||
evaluator_cfg:
|
||||
enable_float16: true
|
||||
restore_ckpt_strict: true
|
||||
restore_hint: 60000
|
||||
save_name: SMPLGait
|
||||
eval_func: evaluate_Gait3D
|
||||
sampler:
|
||||
batch_shuffle: false
|
||||
batch_size: 16
|
||||
sample_type: all_ordered # all indicates whole sequence used to test, while ordered means input sequence by its natural order; Other options: fixed_unordered
|
||||
frames_all_limit: 720 # limit the number of sampled frames to prevent out of memory
|
||||
metric: euc # cos euc
|
||||
transform:
|
||||
- type: BaseSilCuttingTransform
|
||||
img_w: 64
|
||||
- type: NoOperation
|
||||
|
||||
loss_cfg:
|
||||
- loss_term_weight: 1.0
|
||||
margin: 0.2
|
||||
type: TripletLoss
|
||||
log_prefix: triplet
|
||||
- loss_term_weight: 0.1
|
||||
scale: 16
|
||||
type: CrossEntropyLoss
|
||||
log_prefix: softmax
|
||||
log_accuracy: true
|
||||
|
||||
model_cfg:
|
||||
model: SMPLGait
|
||||
backbone_cfg:
|
||||
in_channels: 1
|
||||
layers_cfg: # Layers configuration for automatically model construction
|
||||
- BC-64
|
||||
- BC-64
|
||||
- M
|
||||
- BC-128
|
||||
- BC-128
|
||||
- M
|
||||
- BC-256
|
||||
- BC-256
|
||||
# - M
|
||||
# - BC-512
|
||||
# - BC-512
|
||||
type: Plain
|
||||
SeparateFCs:
|
||||
in_channels: 256
|
||||
out_channels: 256
|
||||
parts_num: 31
|
||||
SeparateBNNecks:
|
||||
class_num: 3000
|
||||
in_channels: 256
|
||||
parts_num: 31
|
||||
bin_num:
|
||||
- 16
|
||||
- 8
|
||||
- 4
|
||||
- 2
|
||||
- 1
|
||||
|
||||
optimizer_cfg:
|
||||
lr: 0.1
|
||||
momentum: 0.9
|
||||
solver: SGD
|
||||
weight_decay: 0.0005
|
||||
|
||||
scheduler_cfg:
|
||||
gamma: 0.1
|
||||
milestones: # Learning Rate Reduction at each milestones
|
||||
- 20000
|
||||
- 40000
|
||||
- 50000
|
||||
scheduler: MultiStepLR
|
||||
|
||||
trainer_cfg:
|
||||
enable_float16: true # half_percesion float for memory reduction and speedup
|
||||
fix_BN: false
|
||||
log_iter: 100
|
||||
with_test: 10000
|
||||
restore_ckpt_strict: true
|
||||
restore_hint: 0
|
||||
save_iter: 10000
|
||||
save_name: SMPLGait
|
||||
sync_BN: true
|
||||
total_iter: 60000
|
||||
sampler:
|
||||
batch_shuffle: true
|
||||
batch_size:
|
||||
- 32 # TripletSampler, batch_size[0] indicates Number of Identity
|
||||
- 4 # batch_size[1] indicates Samples sequqnce for each Identity
|
||||
frames_num_fixed: 30 # fixed frames number for training
|
||||
frames_num_max: 50 # max frames number for unfixed training
|
||||
frames_num_min: 25 # min frames number for unfixed traing
|
||||
sample_type: fixed_unordered # fixed control input frames number, unordered for controlling order of input tensor; Other options: unfixed_ordered or all_ordered
|
||||
type: TripletSampler
|
||||
transform:
|
||||
- type: BaseSilCuttingTransform
|
||||
img_w: 64
|
||||
- type: NoOperation
|
||||
@@ -13,7 +13,7 @@ Download URL: http://www.cbsr.ia.ac.cn/GaitDatasetB-silh.zip
|
||||
......
|
||||
......
|
||||
```
|
||||
- Run `python misc/pretreatment.py --input_path CASIA-B --output_path CASIA-B-pkl`
|
||||
- Run `python datasets/pretreatment.py --input_path CASIA-B --output_path CASIA-B-pkl`
|
||||
- Processed
|
||||
```
|
||||
CASIA-B-pkl
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,33 @@
|
||||
# Gait3D
|
||||
This is the pre-processing instructions for the Gait3D dataset. The original dataset can be found [here](https://gait3d.github.io/). The original dataset is not publicly available. You need to request access to the dataset in order to download it. This README explains how to extract the original dataset and convert it to a format suitable for OpenGait.
|
||||
## Data Preparation
|
||||
https://github.com/Gait3D/Gait3D-Benchmark#data-preparation
|
||||
## Data Pretreatment
|
||||
```python
|
||||
python datasets/pretreatment.py --input_path 'Gait3D/2D_Silhouettes' --output_path 'Gait3D-sils-64-64-pkl'
|
||||
python datasets/pretreatment_smpl.py --input_path 'Gait3D/3D_SMPLs' --output_path 'Gait3D-smpls-pkl'
|
||||
|
||||
(optional) python datasets/pretreatment.py --input_path 'Gait3D/2D_Silhouettes' --img_size 128 --output_path 'Gait3D-sils-128-128-pkl'
|
||||
|
||||
python datasets/Gait3D/merge_two_modality.py --sils_path 'Gait3D-sils-64-64-pkl' --smpls_path 'Gait3D-smpls-pkl' --output_path 'Gait3D-merged-pkl' --link 'hard'
|
||||
```
|
||||
|
||||
## Train
|
||||
### Baseline model:
|
||||
`CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs ./configs/baseline/baseline_Gait3D.yaml --phase train`
|
||||
### SMPLGait model:
|
||||
`CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs ./configs/smplgait/smplgait.yaml --phase train`
|
||||
|
||||
## Citation
|
||||
If you use this dataset in your research, please cite the following paper:
|
||||
```
|
||||
@inproceedings{zheng2022gait3d,
|
||||
title={Gait Recognition in the Wild with Dense 3D Representations and A Benchmark},
|
||||
author={Jinkai Zheng, Xinchen Liu, Wu Liu, Lingxiao He, Chenggang Yan, Tao Mei},
|
||||
booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
|
||||
year={2022}
|
||||
}
|
||||
```
|
||||
|
||||
## Acknowledgements
|
||||
This dataset was collected by the [Zheng at. al.](https://gait3d.github.io/). The pre-processing instructions are based on (https://github.com/Gait3D/Gait3D-Benchmark).
|
||||
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
|
||||
def merge(sils_path, smpls_path, output_path, link):
|
||||
if link == 'hard':
|
||||
link_method = os.link
|
||||
elif link == 'soft':
|
||||
link_method = os.symlink
|
||||
else:
|
||||
link_method = shutil.copyfile
|
||||
for _id in os.listdir(sils_path):
|
||||
id_path = os.path.join(sils_path, _id)
|
||||
for _type in os.listdir(id_path):
|
||||
type_path = os.path.join(id_path, _type)
|
||||
for _view in os.listdir(type_path):
|
||||
view_path = os.path.join(type_path, _view)
|
||||
for _seq in os.listdir(view_path):
|
||||
sils_seq_path = os.path.join(view_path, _seq)
|
||||
smpls_seq_path = os.path.join(
|
||||
smpls_path, _id, _type, _view, _seq)
|
||||
output_seq_path = os.path.join(output_path, _id, _type, _view)
|
||||
os.makedirs(output_seq_path, exist_ok=True)
|
||||
link_method(sils_seq_path, os.path.join(
|
||||
output_seq_path, "sils-"+_seq))
|
||||
link_method(smpls_seq_path, os.path.join(
|
||||
output_seq_path, "smpls-"+_seq))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Gait3D dataset mergence.')
|
||||
parser.add_argument('--sils_path', default='', type=str,
|
||||
help='Root path of raw silhs dataset.')
|
||||
parser.add_argument('--smpls_path', default='', type=str,
|
||||
help='Root path of raw smpls dataset.')
|
||||
parser.add_argument('-o', '--output_path', default='',
|
||||
type=str, help='Output path of pickled dataset.')
|
||||
parser.add_argument('-l', '--link', default='hard', type=str,
|
||||
choices=['hard', 'soft', 'copy'], help='Link type of output data.')
|
||||
args = parser.parse_args()
|
||||
|
||||
merge(sils_path=Path(args.sils_path), smpls_path=Path(
|
||||
args.smpls_path), output_path=Path(args.output_path), link=args.link)
|
||||
|
||||
@@ -6,19 +6,19 @@ This is the official support for competition of [Human Identification at a Dista
|
||||
Download the raw dataset from the [official link](http://hid2022.iapr-tc4.org/). You will get three compressed files, i.e. `train.tar`, `HID2022_test_gallery.zip` and `HID2022_test_probe.zip`.
|
||||
After unpacking these three files, run this command:
|
||||
```shell
|
||||
python misc/HID/pretreatment_HID.py --input_train_path="train" --input_gallery_path="HID2022_test_gallery" --input_probe_path="HID2022_test_probe" --output_path="HID-128-pkl"
|
||||
python datasets/HID/pretreatment_HID.py --input_train_path="train" --input_gallery_path="HID2022_test_gallery" --input_probe_path="HID2022_test_probe" --output_path="HID-128-pkl"
|
||||
```
|
||||
|
||||
## Train the dataset
|
||||
Modify the `dataset_root` in `./misc/HID/baseline_hid.yaml`, and then run this command:
|
||||
Modify the `dataset_root` in `configs/baseline/baseline_hid.yaml`, and then run this command:
|
||||
```shell
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs ./misc/HID/baseline_hid.yaml --phase train
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs configs/baseline/baseline_hid.yaml --phase train
|
||||
```
|
||||
You can also download the [trained model](https://github.com/ShiqiYu/OpenGait/releases/download/v1.1/pretrained_hid_model.zip) and place it in `output` after unzipping.
|
||||
|
||||
## Get the submission file
|
||||
```shell
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs ./misc/HID/baseline_hid.yaml --phase test
|
||||
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 opengait/main.py --cfgs configs/baseline/baseline_hid.yaml --phase test
|
||||
```
|
||||
The result will be generated in your working directory.
|
||||
|
||||
|
||||
+9
-7
@@ -39,16 +39,18 @@
|
||||
| [GaitPart(CVPR2020)](http://home.ustc.edu.cn/~saihui/papers/cvpr2020_gaitpart.pdf) | 47.6 | [gaitpart.yaml](../configs/gaitpart/gaitpart_GREW.yaml) | 64x44 | - | - |
|
||||
| [GaitGL(ICCV2021)](https://openaccess.thecvf.com/content/ICCV2021/papers/Lin_Gait_Recognition_via_Effective_Global-Local_Feature_Representation_and_Local_Temporal_ICCV_2021_paper.pdf) | 41.5 | [gaitgl.yaml](../configs/gaitgl/gaitgl_GREW.yaml) | 64x44 | - | - |
|
||||
| [GaitGL(BNNeck)(ICCV2021)](https://openaccess.thecvf.com/content/ICCV2021/papers/Lin_Gait_Recognition_via_Effective_Global-Local_Feature_Representation_and_Local_Temporal_ICCV_2021_paper.pdf) | 51.7 | [gaitgl.yaml](../configs/gaitgl/gaitgl_GREW_BNNeck.yaml) | 64x44 | - | - |
|
||||
| [GaitBase](https://arxiv.org/abs/2211.06597) | 60.1 | -| 64x44 | - | - |
|
||||
| [GaitBase](https://arxiv.org/abs/2211.06597) | 60.1 | - | 64x44 | - | - |
|
||||
|
||||
|
||||
## [Gait3D](https://github.com/Gait3D/Gait3D-Benchmark)
|
||||
| Model | `Rank@1` | Configuration | Input Size | Inference Time | Model Size |
|
||||
| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----------: | :--------: | :------------: | :--------: |
|
||||
| [GaitSet(AAAI2019)](https://arxiv.org/pdf/1811.06186.pdf) | 36.7 | - | 64x44 | - | - |
|
||||
| [GaitPart(CVPR2020)](http://home.ustc.edu.cn/~saihui/papers/cvpr2020_gaitpart.pdf) | 28.2 | - | 64x44 | - | - |
|
||||
| [GaitGL(ICCV2021)](https://openaccess.thecvf.com/content/ICCV2021/papers/Lin_Gait_Recognition_via_Effective_Global-Local_Feature_Representation_and_Local_Temporal_ICCV_2021_paper.pdf) | 29.7 | - | 64x44 | - | - |
|
||||
| [GaitBase](https://arxiv.org/abs/2211.06597) | 65.6 | - | 64x44 | - | - |
|
||||
| Model | `Rank@1` | Configuration | Input Size | Input modality |
|
||||
| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------: | :-----------: | :--------: | :--------:|
|
||||
| Baseline | 46.9 | [baseling_Gait3D.yaml](../configs/baseline/baseline_Gait3D.yaml) | 64x44 | Silhouette |
|
||||
| [GaitSet(AAAI2019)](https://arxiv.org/pdf/1811.06186.pdf) | - | - | 64x44 | Silhouette |
|
||||
| [GaitPart(CVPR2020)](http://home.ustc.edu.cn/~saihui/papers/cvpr2020_gaitpart.pdf) | - | - | 64x44 | Silhouette |
|
||||
| [GaitGL(ICCV2021)](https://openaccess.thecvf.com/content/ICCV2021/papers/Lin_Gait_Recognition_via_Effective_Global-Local_Feature_Representation_and_Local_Temporal_ICCV_2021_paper.pdf) | - | - | 64x44 | Silhouette |
|
||||
| [SMPLGait(CVPR 2022)](https://gait3d.github.io/) | 48.3(46.3) | [smplgait.yaml](../configs/smplgait/smplgait.yaml) | 64x44 | Silhouette + RGB|
|
||||
| [GaitBase](https://arxiv.org/abs/2211.06597) | 65.6 | - | 64x44 | Silhouette |
|
||||
|
||||
|
||||
------------------------------------------
|
||||
|
||||
@@ -3,7 +3,7 @@ from time import strftime, localtime
|
||||
import numpy as np
|
||||
from utils import get_msg_mgr, mkdir
|
||||
|
||||
from .metric import mean_iou, cuda_dist, compute_ACC_mAP
|
||||
from .metric import mean_iou, cuda_dist, compute_ACC_mAP, evaluate_rank
|
||||
from .re_rank import re_ranking
|
||||
|
||||
|
||||
@@ -225,3 +225,43 @@ def evaluate_segmentation(data, dataset):
|
||||
miou = mean_iou(pred, labels)
|
||||
get_msg_mgr().log_info('mIOU: %.3f' % (miou.mean()))
|
||||
return {"scalar/test_accuracy/mIOU": miou}
|
||||
|
||||
def evaluate_Gait3D(data, conf, metric='euc'):
|
||||
msg_mgr = get_msg_mgr()
|
||||
|
||||
features, labels, cams, time_seqs = data['embeddings'], data['labels'], data['types'], data['views']
|
||||
import json
|
||||
probe_sets = json.load(
|
||||
open('./datasets/Gait3D/Gait3D.json', 'rb'))['PROBE_SET']
|
||||
probe_mask = []
|
||||
for id, ty, sq in zip(labels, cams, time_seqs):
|
||||
if '-'.join([id, ty, sq]) in probe_sets:
|
||||
probe_mask.append(True)
|
||||
else:
|
||||
probe_mask.append(False)
|
||||
probe_mask = np.array(probe_mask)
|
||||
|
||||
# probe_features = features[:probe_num]
|
||||
probe_features = features[probe_mask]
|
||||
# gallery_features = features[probe_num:]
|
||||
gallery_features = features[~probe_mask]
|
||||
# probe_lbls = np.asarray(labels[:probe_num])
|
||||
# gallery_lbls = np.asarray(labels[probe_num:])
|
||||
probe_lbls = np.asarray(labels)[probe_mask]
|
||||
gallery_lbls = np.asarray(labels)[~probe_mask]
|
||||
|
||||
results = {}
|
||||
msg_mgr.log_info(f"The test metric you choose is {metric}.")
|
||||
dist = cuda_dist(probe_features, gallery_features, metric).cpu().numpy()
|
||||
cmc, all_AP, all_INP = evaluate_rank(dist, probe_lbls, gallery_lbls)
|
||||
|
||||
mAP = np.mean(all_AP)
|
||||
mINP = np.mean(all_INP)
|
||||
for r in [1, 5, 10]:
|
||||
results['scalar/test_accuracy/Rank-{}'.format(r)] = cmc[r - 1] * 100
|
||||
results['scalar/test_accuracy/mAP'] = mAP * 100
|
||||
results['scalar/test_accuracy/mINP'] = mINP * 100
|
||||
|
||||
# print_csv_format(dataset_name, results)
|
||||
msg_mgr.log_info(results)
|
||||
return results
|
||||
@@ -86,3 +86,73 @@ def compute_ACC_mAP(distmat, q_pids, g_pids, q_views=None, g_views=None, rank=1)
|
||||
mAP = np.mean(all_AP)
|
||||
|
||||
return ACC, mAP
|
||||
|
||||
|
||||
def evaluate_rank(distmat, p_lbls, g_lbls, max_rank=50):
|
||||
'''
|
||||
Copy from https://github.com/Gait3D/Gait3D-Benchmark/blob/72beab994c137b902d826f4b9f9e95b107bebd78/lib/utils/rank.py#L12-L63
|
||||
'''
|
||||
num_p, num_g = distmat.shape
|
||||
|
||||
if num_g < max_rank:
|
||||
max_rank = num_g
|
||||
print('Note: number of gallery samples is quite small, got {}'.format(num_g))
|
||||
|
||||
indices = np.argsort(distmat, axis=1)
|
||||
|
||||
matches = (g_lbls[indices] == p_lbls[:, np.newaxis]).astype(np.int32)
|
||||
|
||||
# compute cmc curve for each probe
|
||||
all_cmc = []
|
||||
all_AP = []
|
||||
all_INP = []
|
||||
num_valid_p = 0. # number of valid probe
|
||||
|
||||
for p_idx in range(num_p):
|
||||
# compute cmc curve
|
||||
# binary vector, positions with value 1 are correct matches
|
||||
raw_cmc = matches[p_idx]
|
||||
if not np.any(raw_cmc):
|
||||
# this condition is true when probe identity does not appear in gallery
|
||||
continue
|
||||
|
||||
cmc = raw_cmc.cumsum()
|
||||
|
||||
pos_idx = np.where(raw_cmc == 1) # 返回坐标,此处raw_cmc为一维矩阵,所以返回相当于index
|
||||
max_pos_idx = np.max(pos_idx)
|
||||
inp = cmc[max_pos_idx] / (max_pos_idx + 1.0)
|
||||
all_INP.append(inp)
|
||||
|
||||
cmc[cmc > 1] = 1
|
||||
|
||||
all_cmc.append(cmc[:max_rank])
|
||||
num_valid_p += 1.
|
||||
|
||||
# compute average precision
|
||||
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
|
||||
num_rel = raw_cmc.sum()
|
||||
pos_idx = np.where(raw_cmc == 1) # 返回坐标,此处raw_cmc为一维矩阵,所以返回相当于index
|
||||
max_pos_idx = np.max(pos_idx)
|
||||
inp = cmc[max_pos_idx] / (max_pos_idx + 1.0)
|
||||
all_INP.append(inp)
|
||||
|
||||
cmc[cmc > 1] = 1
|
||||
|
||||
all_cmc.append(cmc[:max_rank])
|
||||
num_valid_p += 1.
|
||||
|
||||
# compute average precision
|
||||
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
|
||||
num_rel = raw_cmc.sum()
|
||||
tmp_cmc = raw_cmc.cumsum()
|
||||
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
|
||||
tmp_cmc = np.asarray(tmp_cmc) * raw_cmc
|
||||
AP = tmp_cmc.sum() / num_rel
|
||||
all_AP.append(AP)
|
||||
|
||||
assert num_valid_p > 0, 'Error: all probe identities do not appear in gallery'
|
||||
|
||||
all_cmc = np.asarray(all_cmc).astype(np.float32)
|
||||
all_cmc = all_cmc.sum(0) / num_valid_p
|
||||
|
||||
return all_cmc, all_AP, all_INP
|
||||
|
||||
@@ -0,0 +1,101 @@
|
||||
'''
|
||||
Modifed from https://github.com/Gait3D/Gait3D-Benchmark/blob/72beab994c137b902d826f4b9f9e95b107bebd78/lib/modeling/models/smplgait.py
|
||||
'''
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.autograd import Variable
|
||||
|
||||
from ..base_model import BaseModel
|
||||
from ..modules import SetBlockWrapper, HorizontalPoolingPyramid, PackSequenceWrapper, SeparateFCs, SeparateBNNecks
|
||||
|
||||
|
||||
class SMPLGait(BaseModel):
|
||||
def __init__(self, cfgs, is_training):
|
||||
super().__init__(cfgs, is_training)
|
||||
|
||||
def build_network(self, model_cfg):
|
||||
# Baseline
|
||||
self.Backbone = self.get_backbone(model_cfg['backbone_cfg'])
|
||||
self.Backbone = SetBlockWrapper(self.Backbone)
|
||||
self.FCs = SeparateFCs(**model_cfg['SeparateFCs'])
|
||||
self.BNNecks = SeparateBNNecks(**model_cfg['SeparateBNNecks'])
|
||||
self.TP = PackSequenceWrapper(torch.max)
|
||||
self.HPP = HorizontalPoolingPyramid(bin_num=model_cfg['bin_num'])
|
||||
|
||||
# for SMPL
|
||||
self.fc1 = nn.Linear(85, 128)
|
||||
self.fc2 = nn.Linear(128, 256)
|
||||
self.fc3 = nn.Linear(256, 256)
|
||||
self.bn1 = nn.BatchNorm1d(128)
|
||||
self.bn2 = nn.BatchNorm1d(256)
|
||||
self.bn3 = nn.BatchNorm1d(256)
|
||||
self.dropout2 = nn.Dropout(p=0.2)
|
||||
self.dropout3 = nn.Dropout(p=0.2)
|
||||
|
||||
def forward(self, inputs):
|
||||
ipts, labs, _, _, seqL = inputs
|
||||
|
||||
sils = ipts[0] # [n, s, h, w]
|
||||
smpls = ipts[1] # [n, s, d]
|
||||
|
||||
# extract SMPL features
|
||||
n, s, d = smpls.size()
|
||||
sps = smpls.view(-1, d)
|
||||
del smpls
|
||||
|
||||
sps = F.relu(self.bn1(self.fc1(sps)))
|
||||
sps = F.relu(self.bn2(self.dropout2(self.fc2(sps)))) # (B, 256)
|
||||
sps = F.relu(self.bn3(self.dropout3(self.fc3(sps)))) # (B, 256)
|
||||
sps = sps.reshape(n, 1, s, 16, 16)
|
||||
iden = Variable(torch.eye(16)).unsqueeze(
|
||||
0).repeat(n, 1, s, 1, 1) # [n, 1, s, 16, 16]
|
||||
if sps.is_cuda:
|
||||
iden = iden.cuda()
|
||||
sps_trans = sps + iden # [n, 1, s, 16, 16]
|
||||
|
||||
if len(sils.size()) == 4:
|
||||
sils = sils.unsqueeze(1)
|
||||
|
||||
del ipts
|
||||
outs = self.Backbone(sils) # [n, c, s, h, w]
|
||||
outs_n, outs_c, outs_s, outs_h, outs_w = outs.size()
|
||||
|
||||
zero_tensor = Variable(torch.zeros(
|
||||
(outs_n, outs_c, outs_s, outs_h, outs_h-outs_w)))
|
||||
if outs.is_cuda:
|
||||
zero_tensor = zero_tensor.cuda()
|
||||
# [n, s, c, h, h] [n, s, c, 16, 16]
|
||||
outs = torch.cat([outs, zero_tensor], -1)
|
||||
outs = outs.reshape(outs_n*outs_c*outs_s, outs_h,
|
||||
outs_h) # [n*c*s, 16, 16]
|
||||
|
||||
sps = sps_trans.repeat(1, outs_c, 1, 1, 1).reshape(
|
||||
outs_n * outs_c * outs_s, 16, 16)
|
||||
|
||||
outs_trans = torch.bmm(outs, sps)
|
||||
outs_trans = outs_trans.reshape(outs_n, outs_c, outs_s, outs_h, outs_h)
|
||||
|
||||
# Temporal Pooling, TP
|
||||
outs_trans = self.TP(outs_trans, seqL, options={"dim": 2})[
|
||||
0] # [n, c, h, w]
|
||||
# Horizontal Pooling Matching, HPM
|
||||
feat = self.HPP(outs_trans) # [n, c, p]
|
||||
embed_1 = self.FCs(feat) # [n, c, p]
|
||||
|
||||
embed_2, logits = self.BNNecks(embed_1) # [n, c, p]
|
||||
|
||||
n, _, s, h, w = sils.size()
|
||||
retval = {
|
||||
'training_feat': {
|
||||
'triplet': {'embeddings': embed_1, 'labels': labs},
|
||||
'softmax': {'logits': logits, 'labels': labs}
|
||||
},
|
||||
'visual_summary': {
|
||||
'image/sils': sils.view(n*s, 1, h, w)
|
||||
},
|
||||
'inference_feat': {
|
||||
'embeddings': embed_1
|
||||
}
|
||||
}
|
||||
return retval
|
||||
Reference in New Issue
Block a user