support CASIA-E dataset
This commit is contained in:
@@ -0,0 +1,102 @@
|
|||||||
|
data_cfg:
|
||||||
|
dataset_name: CASIA-E
|
||||||
|
dataset_root: your_path
|
||||||
|
dataset_partition: ./datasets/CASIA-E/CASIA-E.json
|
||||||
|
num_workers: 1
|
||||||
|
remove_no_gallery: false # Remove probe if no gallery for it
|
||||||
|
test_dataset_name: CASIA-E
|
||||||
|
|
||||||
|
evaluator_cfg:
|
||||||
|
enable_float16: true
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 60000
|
||||||
|
save_name: GaitBase
|
||||||
|
#eval_func: GREW_submission
|
||||||
|
sampler:
|
||||||
|
batch_shuffle: false
|
||||||
|
batch_size: 16
|
||||||
|
sample_type: all_ordered # all indicates whole sequence used to test, while ordered means input sequence by its natural order; Other options: fixed_unordered
|
||||||
|
frames_all_limit: 720 # limit the number of sampled frames to prevent out of memory
|
||||||
|
metric: euc # cos
|
||||||
|
transform:
|
||||||
|
- type: BaseSilCuttingTransform
|
||||||
|
|
||||||
|
loss_cfg:
|
||||||
|
- loss_term_weight: 1.0
|
||||||
|
margin: 0.2
|
||||||
|
type: TripletLoss
|
||||||
|
log_prefix: triplet
|
||||||
|
- loss_term_weight: 1.0
|
||||||
|
scale: 16
|
||||||
|
type: CrossEntropyLoss
|
||||||
|
log_prefix: softmax
|
||||||
|
log_accuracy: true
|
||||||
|
|
||||||
|
model_cfg:
|
||||||
|
model: Baseline
|
||||||
|
backbone_cfg:
|
||||||
|
type: ResNet9
|
||||||
|
block: BasicBlock
|
||||||
|
channels: # Layers configuration for automatically model construction
|
||||||
|
- 64
|
||||||
|
- 128
|
||||||
|
- 256
|
||||||
|
- 512
|
||||||
|
layers:
|
||||||
|
- 1
|
||||||
|
- 1
|
||||||
|
- 1
|
||||||
|
- 1
|
||||||
|
strides:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 2
|
||||||
|
- 1
|
||||||
|
maxpool: false
|
||||||
|
SeparateFCs:
|
||||||
|
in_channels: 512
|
||||||
|
out_channels: 256
|
||||||
|
parts_num: 16
|
||||||
|
SeparateBNNecks:
|
||||||
|
class_num: 200
|
||||||
|
in_channels: 256
|
||||||
|
parts_num: 16
|
||||||
|
bin_num:
|
||||||
|
- 16
|
||||||
|
|
||||||
|
optimizer_cfg:
|
||||||
|
lr: 0.1
|
||||||
|
momentum: 0.9
|
||||||
|
solver: SGD
|
||||||
|
weight_decay: 0.0005
|
||||||
|
|
||||||
|
scheduler_cfg:
|
||||||
|
gamma: 0.1
|
||||||
|
milestones: # Learning Rate Reduction at each milestones
|
||||||
|
- 20000
|
||||||
|
- 40000
|
||||||
|
- 50000
|
||||||
|
scheduler: MultiStepLR
|
||||||
|
trainer_cfg:
|
||||||
|
enable_float16: true # half_percesion float for memory reduction and speedup
|
||||||
|
fix_BN: false
|
||||||
|
with_test: false
|
||||||
|
log_iter: 100
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 0
|
||||||
|
save_iter: 20000
|
||||||
|
save_name: GaitBase
|
||||||
|
sync_BN: true
|
||||||
|
total_iter: 60000
|
||||||
|
sampler:
|
||||||
|
batch_shuffle: true
|
||||||
|
batch_size:
|
||||||
|
- 8 # TripletSampler, batch_size[0] indicates Number of Identity
|
||||||
|
- 32 # batch_size[1] indicates Samples sequqnce for each Identity
|
||||||
|
frames_num_fixed: 30 # fixed frames number for training
|
||||||
|
frames_num_max: 40 # max frames number for unfixed training
|
||||||
|
frames_num_min: 20 # min frames number for unfixed traing
|
||||||
|
sample_type: fixed_unordered # fixed control input frames number, unordered for controlling order of input tensor; Other options: unfixed_ordered or all_ordered
|
||||||
|
type: TripletSampler
|
||||||
|
transform:
|
||||||
|
- type: BaseSilCuttingTransform
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
data_cfg:
|
||||||
|
dataset_name: CASIA-E
|
||||||
|
dataset_root: your_path
|
||||||
|
dataset_partition: ./datasets/CASIA-E/CASIA-E.json
|
||||||
|
num_workers: 4
|
||||||
|
remove_no_gallery: false
|
||||||
|
test_dataset_name: CASIA-E
|
||||||
|
|
||||||
|
evaluator_cfg:
|
||||||
|
enable_float16: false
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 120000
|
||||||
|
save_name: GaitPart
|
||||||
|
sampler:
|
||||||
|
batch_size: 4
|
||||||
|
sample_type: all_ordered
|
||||||
|
type: InferenceSampler
|
||||||
|
metric: euc # cos
|
||||||
|
|
||||||
|
loss_cfg:
|
||||||
|
loss_term_weight: 1.0
|
||||||
|
margin: 0.2
|
||||||
|
type: TripletLoss
|
||||||
|
log_prefix: triplet
|
||||||
|
|
||||||
|
model_cfg:
|
||||||
|
model: GaitPart
|
||||||
|
backbone_cfg:
|
||||||
|
in_channels: 1
|
||||||
|
layers_cfg:
|
||||||
|
- BC-32
|
||||||
|
- BC-32
|
||||||
|
- M
|
||||||
|
- BC-64
|
||||||
|
- BC-64
|
||||||
|
- M
|
||||||
|
- FC-128-3
|
||||||
|
- FC-128-3
|
||||||
|
- FC-256-3
|
||||||
|
- FC-256-3
|
||||||
|
type: Plain
|
||||||
|
SeparateFCs:
|
||||||
|
in_channels: 256
|
||||||
|
out_channels: 256
|
||||||
|
parts_num: 16
|
||||||
|
bin_num:
|
||||||
|
- 16
|
||||||
|
|
||||||
|
optimizer_cfg:
|
||||||
|
lr: 0.0001
|
||||||
|
momentum: 0.9
|
||||||
|
solver: Adam
|
||||||
|
weight_decay: 0.0
|
||||||
|
|
||||||
|
scheduler_cfg:
|
||||||
|
gamma: 0.1
|
||||||
|
milestones:
|
||||||
|
- 100000
|
||||||
|
scheduler: MultiStepLR
|
||||||
|
|
||||||
|
trainer_cfg:
|
||||||
|
enable_float16: true
|
||||||
|
fix_BN: false
|
||||||
|
log_iter: 100
|
||||||
|
with_test: false
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 0
|
||||||
|
save_iter: 120000
|
||||||
|
save_name: GaitPart
|
||||||
|
sync_BN: false
|
||||||
|
total_iter: 120000
|
||||||
|
sampler:
|
||||||
|
batch_shuffle: false
|
||||||
|
batch_size:
|
||||||
|
- 8
|
||||||
|
- 32
|
||||||
|
frames_num_fixed: 30
|
||||||
|
frames_num_max: 50
|
||||||
|
frames_num_min: 25
|
||||||
|
frames_skip_num: 10
|
||||||
|
sample_type: fixed_ordered
|
||||||
|
type: TripletSampler
|
||||||
@@ -0,0 +1,77 @@
|
|||||||
|
data_cfg:
|
||||||
|
dataset_name: CASIA-E
|
||||||
|
dataset_root: your_path
|
||||||
|
dataset_partition: ./datasets/CASIA-E/CASIA-E.json
|
||||||
|
num_workers: 1
|
||||||
|
remove_no_gallery: false
|
||||||
|
test_dataset_name: CASIA-E
|
||||||
|
|
||||||
|
evaluator_cfg:
|
||||||
|
enable_float16: false
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 60000
|
||||||
|
save_name: GaitSet
|
||||||
|
sampler:
|
||||||
|
batch_size: 16
|
||||||
|
sample_type: all_ordered
|
||||||
|
type: InferenceSampler
|
||||||
|
metric: euc # cos
|
||||||
|
|
||||||
|
loss_cfg:
|
||||||
|
loss_term_weight: 1.0
|
||||||
|
margin: 0.2
|
||||||
|
type: TripletLoss
|
||||||
|
log_prefix: triplet
|
||||||
|
|
||||||
|
model_cfg:
|
||||||
|
model: GaitSet
|
||||||
|
in_channels:
|
||||||
|
- 1
|
||||||
|
- 64
|
||||||
|
- 128
|
||||||
|
- 256
|
||||||
|
SeparateFCs:
|
||||||
|
in_channels: 256
|
||||||
|
out_channels: 256
|
||||||
|
parts_num: 62
|
||||||
|
bin_num:
|
||||||
|
- 16
|
||||||
|
- 8
|
||||||
|
- 4
|
||||||
|
- 2
|
||||||
|
- 1
|
||||||
|
|
||||||
|
optimizer_cfg:
|
||||||
|
lr: 0.1
|
||||||
|
momentum: 0.9
|
||||||
|
solver: SGD
|
||||||
|
weight_decay: 0.0005
|
||||||
|
|
||||||
|
scheduler_cfg:
|
||||||
|
gamma: 0.1
|
||||||
|
milestones:
|
||||||
|
- 20000
|
||||||
|
- 40000
|
||||||
|
- 50000
|
||||||
|
scheduler: MultiStepLR
|
||||||
|
|
||||||
|
trainer_cfg:
|
||||||
|
enable_float16: true
|
||||||
|
log_iter: 100
|
||||||
|
with_test: false
|
||||||
|
restore_ckpt_strict: true
|
||||||
|
restore_hint: 0
|
||||||
|
save_iter: 60000
|
||||||
|
save_name: GaitSet
|
||||||
|
sync_BN: false
|
||||||
|
total_iter: 60000
|
||||||
|
sampler:
|
||||||
|
batch_shuffle: false
|
||||||
|
batch_size:
|
||||||
|
- 8
|
||||||
|
- 32
|
||||||
|
frames_num_fixed: 30
|
||||||
|
frames_num_max: 50
|
||||||
|
frames_num_min: 25
|
||||||
|
sample_type: fixed_unordered
|
||||||
|
type: TripletSampler
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,43 @@
|
|||||||
|
# CASIA-E
|
||||||
|
Application URL: https://www.scidb.cn/en/detail?dataSetId=57be0e918db743279baf44a38d013a06
|
||||||
|
- Original
|
||||||
|
```
|
||||||
|
test615-1014.zip
|
||||||
|
train001-500.zip
|
||||||
|
val501-614.zip
|
||||||
|
```
|
||||||
|
- Run `python datasets/CASIA-E/extractor.py --input_path CASIA-E/ --output_path CASIA-E-processed/ -n 8 -s 64`. \
|
||||||
|
`n` is number of workers. `s` is the target image size.
|
||||||
|
- Processed
|
||||||
|
```
|
||||||
|
CASIA-E-processed
|
||||||
|
forTrain # raw images
|
||||||
|
001 (subject)
|
||||||
|
H (height)
|
||||||
|
scene1 (scene)
|
||||||
|
bg (walking condition)
|
||||||
|
000 (view)
|
||||||
|
1 (sequence number)
|
||||||
|
xxx.jpg (images)
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
|
||||||
|
opengait # pickle file
|
||||||
|
001 (subject)
|
||||||
|
H_scene1_bg_1 (type)
|
||||||
|
000 (view)
|
||||||
|
000.pkl (contains all frames)
|
||||||
|
......
|
||||||
|
......
|
||||||
|
......
|
||||||
|
```
|
||||||
|
|
||||||
|
## Evaluation
|
||||||
|
Compared with the settings in the original paper, we only used 200 people for training, and the rest were used as the test set, and the division of gallery and probe is more practical and difficult.
|
||||||
|
For specific experimental settings, please refer to configs/gaitbase/gaitbase_casiae.yaml.
|
||||||
|
For the specific division of the probe and gallery, please refer to opengait/evaluation/evaluator.py.
|
||||||
@@ -0,0 +1,98 @@
|
|||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
import tqdm
|
||||||
|
import cv2
|
||||||
|
import tarfile
|
||||||
|
import zipfile
|
||||||
|
from functools import partial
|
||||||
|
import numpy as np
|
||||||
|
import pickle
|
||||||
|
import multiprocessing as mp
|
||||||
|
|
||||||
|
|
||||||
|
def make_pkl_for_one_person(id_, output_path, img_size=64):
|
||||||
|
if id_.split(".")[-1] != "tar" or not os.path.exists(os.path.join(output_path, id_)):
|
||||||
|
return
|
||||||
|
with tarfile.TarFile(os.path.join(output_path, id_)) as f:
|
||||||
|
f.extractall(output_path)
|
||||||
|
os.remove(os.path.join(output_path, id_))
|
||||||
|
id_path = id_.split(".")[0]
|
||||||
|
input_path = os.path.join(output_path, "forTrain", id_path)
|
||||||
|
base_pkl_path = os.path.join(output_path, "opengait", id_path)
|
||||||
|
if not os.path.isdir(input_path):
|
||||||
|
print("Path not found: "+input_path)
|
||||||
|
for height in sorted(os.listdir(input_path)):
|
||||||
|
height_path = os.path.join(input_path, height)
|
||||||
|
for scene in sorted(os.listdir(height_path)):
|
||||||
|
scene_path = os.path.join(height_path, scene)
|
||||||
|
for type_ in sorted(os.listdir(scene_path)):
|
||||||
|
type_path = os.path.join(scene_path, type_)
|
||||||
|
for view in sorted(os.listdir(type_path)):
|
||||||
|
view_path = os.path.join(type_path, view)
|
||||||
|
for num in sorted(os.listdir(view_path)):
|
||||||
|
num_path = os.path.join(view_path, num)
|
||||||
|
imgs = []
|
||||||
|
for file_ in sorted(os.listdir(num_path)):
|
||||||
|
img = cv2.imread(os.path.join(
|
||||||
|
num_path, file_), cv2.IMREAD_GRAYSCALE)
|
||||||
|
if img_size != img.shape[0]:
|
||||||
|
img = cv2.resize(
|
||||||
|
img, dsize=(img_size, img_size))
|
||||||
|
imgs.append(img)
|
||||||
|
if len(imgs) > 5:
|
||||||
|
pkl_path = os.path.join(
|
||||||
|
base_pkl_path, f"{height}-{scene}-{type_}-{num}", view)
|
||||||
|
os.makedirs(pkl_path, exist_ok=True)
|
||||||
|
pickle.dump(np.asarray(imgs), open(
|
||||||
|
os.path.join(pkl_path, f"{view}.pkl"), "wb"))
|
||||||
|
else:
|
||||||
|
print("No enough imgs: "+num_path)
|
||||||
|
|
||||||
|
|
||||||
|
def extractall(base_path: Path, output_path: Path, workers=1, img_size=64) -> None:
|
||||||
|
"""Extract all archives in base_path to output_path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
base_path (Path): Path to the directory containing the archives.
|
||||||
|
output_path (Path): Path to the directory to extract the archives to.
|
||||||
|
"""
|
||||||
|
|
||||||
|
os.makedirs(output_path, exist_ok=True)
|
||||||
|
print("Unzipping train set...")
|
||||||
|
with open(os.path.join(base_path, 'train001-500.zip'), 'rb') as f:
|
||||||
|
z = zipfile.ZipFile(f)
|
||||||
|
z.extractall(output_path)
|
||||||
|
print("Unzipping validation set...")
|
||||||
|
with open(os.path.join(base_path, 'val501-614.zip'), 'rb') as f:
|
||||||
|
z = zipfile.ZipFile(f)
|
||||||
|
z.extractall(output_path)
|
||||||
|
print("Unzipping test set...")
|
||||||
|
with open(os.path.join(base_path, 'test615-1014.zip'), 'rb') as f:
|
||||||
|
z = zipfile.ZipFile(f)
|
||||||
|
z.extractall(output_path)
|
||||||
|
print("Extracting tar file...")
|
||||||
|
os.makedirs(os.path.join(output_path,"forTrain"))
|
||||||
|
os.makedirs(os.path.join(output_path,"opengait"))
|
||||||
|
ids = os.listdir(os.path.join(output_path))
|
||||||
|
progress = tqdm.tqdm(total=len(ids), desc='Pretreating', unit='id')
|
||||||
|
|
||||||
|
with mp.Pool(workers) as pool:
|
||||||
|
for _ in pool.imap_unordered(partial(make_pkl_for_one_person, output_path=output_path, img_size=img_size), ids):
|
||||||
|
progress.update(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser(description='CASIA-E extractor')
|
||||||
|
parser.add_argument('-b', '--input_path', type=str,
|
||||||
|
required=True, help='Base path to CASIA-E zip files')
|
||||||
|
parser.add_argument('-o', '--output_path', type=str,
|
||||||
|
required=True, help='Output path for extracted files. The pickle files are generated in ${output_path}/opengait/')
|
||||||
|
parser.add_argument('-s', '--img_size', default=64,
|
||||||
|
type=int, help='Image resizing size. Default 64')
|
||||||
|
parser.add_argument('-n', '--num_workers',
|
||||||
|
type=int, default=1, help='Number of workers')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
extractall(Path(args.input_path), Path(args.output_path),
|
||||||
|
args.num_workers, args.img_size)
|
||||||
@@ -56,6 +56,14 @@
|
|||||||
| [DeepGaitV2-P3D](https://arxiv.org/pdf/2303.03301.pdf) | 74.4 | - | 64x44 | - | - |
|
| [DeepGaitV2-P3D](https://arxiv.org/pdf/2303.03301.pdf) | 74.4 | - | 64x44 | - | - |
|
||||||
| [SwinGait(Transformer-based)](https://arxiv.org/pdf/2303.03301.pdf) | 75.0 | - | 64x44 | - | - |
|
| [SwinGait(Transformer-based)](https://arxiv.org/pdf/2303.03301.pdf) | 75.0 | - | 64x44 | - | - |
|
||||||
|
|
||||||
|
## [CASIA-E](https://www.scidb.cn/en/detail?dataSetId=57be0e918db743279baf44a38d013a06)
|
||||||
|
|
||||||
|
| Model | `Rank@1.NM` | `Rank@1.BG` | `Rank@1.CL` | Input size| Configuration |
|
||||||
|
| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :---------: | :---------:| :----: | :-------------: | :--------------------------------------------------------------: |
|
||||||
|
| GaitSet | 82.54 | 75.26 | 62.53 | 64x44 | configs/gaitset/gaitset_casiae.yaml
|
||||||
|
| GaitPart | 82.92 | 74.36 | 60.48 | 64x44 | configs/gaitpart/gaitpart_casiae.yaml
|
||||||
|
| GaitBase | 91.59 | 86.65 | 74.73 | 64x44 | configs/gaitbase/gaitbase_casiae.yaml
|
||||||
|
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
|
|
||||||
The results in the parentheses are mentioned in the papers.
|
The results in the parentheses are mentioned in the papers.
|
||||||
|
|||||||
@@ -70,12 +70,21 @@ def cross_view_gallery_evaluation(feature, label, seq_type, view, dataset, metri
|
|||||||
|
|
||||||
def single_view_gallery_evaluation(feature, label, seq_type, view, dataset, metric):
|
def single_view_gallery_evaluation(feature, label, seq_type, view, dataset, metric):
|
||||||
probe_seq_dict = {'CASIA-B': {'NM': ['nm-05', 'nm-06'], 'BG': ['bg-01', 'bg-02'], 'CL': ['cl-01', 'cl-02']},
|
probe_seq_dict = {'CASIA-B': {'NM': ['nm-05', 'nm-06'], 'BG': ['bg-01', 'bg-02'], 'CL': ['cl-01', 'cl-02']},
|
||||||
'OUMVLP': {'NM': ['00']}}
|
'OUMVLP': {'NM': ['00']},
|
||||||
|
'CASIA-E': {'NM': ['H-scene2-nm-1', 'H-scene2-nm-2', 'L-scene2-nm-1', 'L-scene2-nm-2', 'H-scene3-nm-1', 'H-scene3-nm-2', 'L-scene3-nm-1', 'L-scene3-nm-2', 'H-scene3_s-nm-1', 'H-scene3_s-nm-2', 'L-scene3_s-nm-1', 'L-scene3_s-nm-2',],
|
||||||
|
'BG': ['H-scene2-bg-1', 'H-scene2-bg-2', 'L-scene2-bg-1', 'L-scene2-bg-2', 'H-scene3-bg-1', 'H-scene3-bg-2', 'L-scene3-bg-1', 'L-scene3-bg-2', 'H-scene3_s-bg-1', 'H-scene3_s-bg-2', 'L-scene3_s-bg-1', 'L-scene3_s-bg-2'],
|
||||||
|
'CL': ['H-scene2-cl-1', 'H-scene2-cl-2', 'L-scene2-cl-1', 'L-scene2-cl-2', 'H-scene3-cl-1', 'H-scene3-cl-2', 'L-scene3-cl-1', 'L-scene3-cl-2', 'H-scene3_s-cl-1', 'H-scene3_s-cl-2', 'L-scene3_s-cl-1', 'L-scene3_s-cl-2']
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
gallery_seq_dict = {'CASIA-B': ['nm-01', 'nm-02', 'nm-03', 'nm-04'],
|
gallery_seq_dict = {'CASIA-B': ['nm-01', 'nm-02', 'nm-03', 'nm-04'],
|
||||||
'OUMVLP': ['01']}
|
'OUMVLP': ['01'],
|
||||||
|
'CASIA-E': ['H-scene1-nm-1', 'H-scene1-nm-2', 'L-scene1-nm-1', 'L-scene1-nm-2']}
|
||||||
msg_mgr = get_msg_mgr()
|
msg_mgr = get_msg_mgr()
|
||||||
acc = {}
|
acc = {}
|
||||||
view_list = sorted(np.unique(view))
|
view_list = sorted(np.unique(view))
|
||||||
|
if dataset == 'CASIA-E':
|
||||||
|
view_list.remove("270")
|
||||||
view_num = len(view_list)
|
view_num = len(view_list)
|
||||||
num_rank = 1
|
num_rank = 1
|
||||||
for (type_, probe_seq) in probe_seq_dict[dataset].items():
|
for (type_, probe_seq) in probe_seq_dict[dataset].items():
|
||||||
@@ -92,8 +101,8 @@ def single_view_gallery_evaluation(feature, label, seq_type, view, dataset, metr
|
|||||||
gallery_y = label[gseq_mask]
|
gallery_y = label[gseq_mask]
|
||||||
gallery_x = feature[gseq_mask, :]
|
gallery_x = feature[gseq_mask, :]
|
||||||
dist = cuda_dist(probe_x, gallery_x, metric)
|
dist = cuda_dist(probe_x, gallery_x, metric)
|
||||||
idx = dist.cpu().sort(1)[1].numpy()
|
idx = dist.topk(num_rank, largest=False)[1].cpu().numpy()
|
||||||
acc[type_][v1, v2] = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
|
acc[type_][v1, v2] = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx], 1) > 0,
|
||||||
0) * 100 / dist.shape[0], 2)
|
0) * 100 / dist.shape[0], 2)
|
||||||
|
|
||||||
result_dict = {}
|
result_dict = {}
|
||||||
@@ -113,7 +122,7 @@ def evaluate_indoor_dataset(data, dataset, metric='euc', cross_view_gallery=Fals
|
|||||||
label = np.array(label)
|
label = np.array(label)
|
||||||
view = np.array(view)
|
view = np.array(view)
|
||||||
|
|
||||||
if dataset not in ('CASIA-B', 'OUMVLP'):
|
if dataset not in ('CASIA-B', 'OUMVLP', 'CASIA-E'):
|
||||||
raise KeyError("DataSet %s hasn't been supported !" % dataset)
|
raise KeyError("DataSet %s hasn't been supported !" % dataset)
|
||||||
if cross_view_gallery:
|
if cross_view_gallery:
|
||||||
return cross_view_gallery_evaluation(
|
return cross_view_gallery_evaluation(
|
||||||
@@ -145,7 +154,7 @@ def evaluate_real_scene(data, dataset, metric='euc'):
|
|||||||
probe_y = label[pseq_mask]
|
probe_y = label[pseq_mask]
|
||||||
|
|
||||||
dist = cuda_dist(probe_x, gallery_x, metric)
|
dist = cuda_dist(probe_x, gallery_x, metric)
|
||||||
idx = dist.cpu().sort(1)[1].numpy()
|
idx = dist.topk(num_rank, largest=False)[1].cpu().numpy()
|
||||||
acc = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
|
acc = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
|
||||||
0) * 100 / dist.shape[0], 2)
|
0) * 100 / dist.shape[0], 2)
|
||||||
msg_mgr.log_info('==Rank-1==')
|
msg_mgr.log_info('==Rank-1==')
|
||||||
@@ -173,8 +182,9 @@ def GREW_submission(data, dataset, metric='euc'):
|
|||||||
probe_x = feature[pseq_mask, :]
|
probe_x = feature[pseq_mask, :]
|
||||||
probe_y = view[pseq_mask]
|
probe_y = view[pseq_mask]
|
||||||
|
|
||||||
|
num_rank = 20
|
||||||
dist = cuda_dist(probe_x, gallery_x, metric)
|
dist = cuda_dist(probe_x, gallery_x, metric)
|
||||||
idx = dist.cpu().sort(1)[1].numpy()
|
idx = dist.topk(num_rank, largest=False)[1].cpu().numpy()
|
||||||
|
|
||||||
save_path = os.path.join(
|
save_path = os.path.join(
|
||||||
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
|
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
|
||||||
@@ -182,8 +192,8 @@ def GREW_submission(data, dataset, metric='euc'):
|
|||||||
with open(save_path, "w") as f:
|
with open(save_path, "w") as f:
|
||||||
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
|
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
|
||||||
for i in range(len(idx)):
|
for i in range(len(idx)):
|
||||||
r_format = [int(idx) for idx in gallery_y[idx[i, 0:20]]]
|
r_format = [int(idx) for idx in gallery_y[idx[i, 0:num_rank]]]
|
||||||
output_row = '{}'+',{}'*20+'\n'
|
output_row = '{}'+',{}'*num_rank+'\n'
|
||||||
f.write(output_row.format(probe_y[i], *r_format))
|
f.write(output_row.format(probe_y[i], *r_format))
|
||||||
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
|
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
|
||||||
return
|
return
|
||||||
|
|||||||
Reference in New Issue
Block a user