refactor evaluation and add cross view gallery

This commit is contained in:
darkliang
2022-11-24 22:07:50 +08:00
parent c5b4fb0806
commit 793405ad7c
18 changed files with 388 additions and 309 deletions
-1
View File
@@ -11,7 +11,6 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 60000 restore_hint: 60000
save_name: Baseline save_name: Baseline
eval_func: identification
sampler: sampler:
batch_shuffle: false batch_shuffle: false
batch_size: 16 batch_size: 16
+1 -1
View File
@@ -11,7 +11,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 250000 restore_hint: 250000
save_name: Baseline save_name: Baseline
eval_func: identification_GREW_submission # identification_real_scene # identification_GREW_submission eval_func: GREW_submission # identification_real_scene # identification_GREW_submission
sampler: sampler:
batch_shuffle: false batch_shuffle: false
batch_size: 64 batch_size: 64
-1
View File
@@ -11,7 +11,6 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 150000 restore_hint: 150000
save_name: Baseline save_name: Baseline
eval_func: identification
sampler: sampler:
batch_shuffle: false batch_shuffle: false
batch_size: 4 batch_size: 4
+1 -1
View File
@@ -9,7 +9,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 60000 restore_hint: 60000
save_name: Baseline save_name: Baseline
eval_func: evaluate_HID eval_func: HID_submission
sampler: sampler:
batch_shuffle: false batch_shuffle: false
batch_size: 8 batch_size: 8
+2 -1
View File
@@ -12,7 +12,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 80000 restore_hint: 80000
save_name: tmp save_name: tmp
eval_func: identification eval_func: evaluate_indoor_dataset
sampler: sampler:
batch_size: 4 batch_size: 4
sample_type: all_ordered sample_type: all_ordered
@@ -21,6 +21,7 @@ evaluator_cfg:
- img_w: 64 - img_w: 64
type: BaseSilCuttingTransform type: BaseSilCuttingTransform
metric: euc # cos metric: euc # cos
cross_view_gallery: false
loss_cfg: loss_cfg:
loss_term_weight: 1.0 loss_term_weight: 1.0
+1 -1
View File
@@ -12,7 +12,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 250000 restore_hint: 250000
save_name: GaitGL save_name: GaitGL
eval_func: identification_GREW_submission # identification_real_scene # identification_GREW_submission eval_func: GREW_submission # identification_real_scene # identification_GREW_submission
sampler: sampler:
batch_size: 4 batch_size: 4
sample_type: all_ordered sample_type: all_ordered
+1 -1
View File
@@ -12,7 +12,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 250000 restore_hint: 250000
save_name: GaitGL_BNNeck save_name: GaitGL_BNNeck
eval_func: identification_GREW_submission # identification_real_scene # identification_GREW_submission eval_func: GREW_submission # identification_real_scene # identification_GREW_submission
sampler: sampler:
batch_size: 4 batch_size: 4
sample_type: all_ordered sample_type: all_ordered
+1 -1
View File
@@ -11,7 +11,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 250000 restore_hint: 250000
save_name: GaitPart save_name: GaitPart
eval_func: identification_GREW_submission # identification_real_scene # identification_GREW_submission eval_func: GREW_submission # identification_real_scene # identification_GREW_submission
sampler: sampler:
batch_size: 4 batch_size: 4
sample_type: all_ordered sample_type: all_ordered
+1 -1
View File
@@ -11,7 +11,7 @@ evaluator_cfg:
restore_ckpt_strict: true restore_ckpt_strict: true
restore_hint: 250000 restore_hint: 250000
save_name: GaitSet save_name: GaitSet
eval_func: identification_GREW_submission # identification_real_scene # identification_GREW_submission eval_func: GREW_submission # identification_real_scene # identification_GREW_submission
sampler: sampler:
batch_size: 4 batch_size: 4
sample_type: all_ordered sample_type: all_ordered
+3
View File
@@ -0,0 +1,3 @@
from .metric import mean_iou
from numpy import set_printoptions
set_printoptions(suppress=True, formatter={'float': '{:0.2f}'.format})
+222
View File
@@ -0,0 +1,222 @@
import os
from time import strftime, localtime
import numpy as np
from utils import get_msg_mgr, mkdir
from .metric import mean_iou, cuda_dist, compute_ACC_mAP
from .re_rank import re_ranking
def de_diag(acc, each_angle=False):
# Exclude identical-view cases
dividend = acc.shape[1] - 1.
result = np.sum(acc - np.diag(np.diag(acc)), 1) / dividend
if not each_angle:
result = np.mean(result)
return result
def cross_view_gallery_evaluation(feature, label, seq_type, view, probe_seq_dict, gallery_seq_list, metric):
msg_mgr = get_msg_mgr()
acc = {}
map = {}
view_list = sorted(np.unique(view))
for (type_, probe_seq) in probe_seq_dict.items():
acc[type_] = np.zeros(len(view_list)) - 1.
map[type_] = np.zeros(len(view_list)) - 1.
for (v1, probe_view) in enumerate(view_list):
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
gseq_mask = np.isin(seq_type, gallery_seq_list)
gallery_y = label[gseq_mask]
gallery_x = feature[gseq_mask, :]
dist = cuda_dist(probe_x, gallery_x, metric)
eval_results = compute_ACC_mAP(
dist.cpu().numpy(), probe_y, gallery_y, np.asarray(view)[pseq_mask], np.asarray(view)[gseq_mask])
acc[type_][v1] = np.round(eval_results[0] * 100, 2)
map[type_][v1] = np.round(eval_results[1] * 100, 2)
result_dict = {}
msg_mgr.log_info(
'===Cross View Gallery Evaluation (Excluded identical-view cases)===')
out_acc_str = "========= Rank@1 Acc =========\n"
out_map_str = "============= mAP ============\n"
for type_ in probe_seq_dict.keys():
avg_acc = np.mean(acc[type_])
avg_map = np.mean(map[type_])
result_dict[f'scalar/test_accuracy/{type_}-Rank@1'] = avg_acc
result_dict[f'scalar/test_accuracy/{type_}-mAP'] = avg_map
out_acc_str += f"{type_}:\t{acc[type_]}, mean: {avg_acc:.2f}%\n"
out_map_str += f"{type_}:\t{map[type_]}, mean: {avg_map:.2f}%\n"
# msg_mgr.log_info(f'========= Rank@1 Acc =========')
msg_mgr.log_info(f'{out_acc_str}')
# msg_mgr.log_info(f'========= mAP =========')
msg_mgr.log_info(f'{out_map_str}')
return result_dict
# Modified From https://github.com/AbnerHqC/GaitSet/blob/master/model/utils/evaluator.py
def single_view_gallery_evaluation(feature, label, seq_type, view, probe_seq_dict, gallery_seq_list, metric):
msg_mgr = get_msg_mgr()
acc = {}
map = {}
view_list = sorted(np.unique(view))
view_num = len(view_list)
num_rank = 1
for (type_, probe_seq) in probe_seq_dict.items():
acc[type_] = np.zeros((view_num, view_num)) - 1.
map[type_] = np.zeros((view_num, view_num)) - 1.
for (v1, probe_view) in enumerate(view_list):
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
for (v2, gallery_view) in enumerate(view_list):
gseq_mask = np.isin(seq_type, gallery_seq_list) & np.isin(
view, [gallery_view])
gallery_y = label[gseq_mask]
gallery_x = feature[gseq_mask, :]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
acc[type_][v1, v2] = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
result_dict = {}
msg_mgr.log_info('===Rank-1 (Exclude identical-view cases)===')
out_str = ""
for type_ in probe_seq_dict.keys():
sub_acc = de_diag(acc[type_], each_angle=True)
msg_mgr.log_info(f'{type_}: {sub_acc}')
result_dict[f'scalar/test_accuracy/{type_}'] = np.mean(sub_acc)
out_str += f"{type_}: {np.mean(sub_acc):.2f}%\t"
msg_mgr.log_info(out_str)
return result_dict
def evaluate_indoor_dataset(data, dataset, metric='euc', cross_view_gallery=False):
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
probe_seq_dict = {'CASIA-B': {'NM': ['nm-05', 'nm-06'], 'BG': ['bg-01', 'bg-02'], 'CL': ['cl-01', 'cl-02']},
'OUMVLP': {'NM': ['00']}}
gallery_seq_dict = {'CASIA-B': ['nm-01', 'nm-02', 'nm-03', 'nm-04'],
'OUMVLP': ['01']}
if dataset not in (probe_seq_dict or gallery_seq_dict):
raise KeyError("DataSet %s hasn't been supported !" % dataset)
if cross_view_gallery:
return cross_view_gallery_evaluation(
feature, label, seq_type, view, probe_seq_dict[dataset], gallery_seq_dict[dataset], metric)
else:
return single_view_gallery_evaluation(
feature, label, seq_type, view, probe_seq_dict[dataset], gallery_seq_dict[dataset], metric)
def evaluate_real_scene(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
gallery_seq_type = {'0001-1000': ['1', '2'],
"HID2021": ['0'], '0001-1000-test': ['0'],
'GREW': ['01'], 'TTG-200': ['1']}
probe_seq_type = {'0001-1000': ['3', '4', '5', '6'],
"HID2021": ['1'], '0001-1000-test': ['1'],
'GREW': ['02'], 'TTG-200': ['2', '3', '4', '5', '6']}
num_rank = 20
acc = np.zeros([num_rank]) - 1.
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
acc = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
msg_mgr.log_info('==Rank-1==')
msg_mgr.log_info('%.3f' % (np.mean(acc[0])))
msg_mgr.log_info('==Rank-5==')
msg_mgr.log_info('%.3f' % (np.mean(acc[4])))
msg_mgr.log_info('==Rank-10==')
msg_mgr.log_info('%.3f' % (np.mean(acc[9])))
msg_mgr.log_info('==Rank-20==')
msg_mgr.log_info('%.3f' % (np.mean(acc[19])))
return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])}
def GREW_submission(data, dataset, metric='euc'):
get_msg_mgr().log_info("Evaluating GREW")
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
gallery_seq_type = {'GREW': ['01', '02']}
probe_seq_type = {'GREW': ['03']}
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = view[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
save_path = os.path.join(
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("GREW_result")
with open(save_path, "w") as f:
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
for i in range(len(idx)):
r_format = [int(idx) for idx in gallery_y[idx[i, 0:20]]]
output_row = '{}'+',{}'*20+'\n'
f.write(output_row.format(probe_y[i], *r_format))
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
return
def HID_submission(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
msg_mgr.log_info("Evaluating HID")
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
seq_type = np.array(seq_type)
probe_mask = (label == "probe")
gallery_mask = (label != "probe")
gallery_x = feature[gallery_mask, :]
gallery_y = label[gallery_mask]
probe_x = feature[probe_mask, :]
probe_y = seq_type[probe_mask]
feat = np.concatenate([probe_x, gallery_x])
dist = cuda_dist(feat, feat, metric).cpu().numpy()
msg_mgr.log_info("Starting Re-ranking")
re_rank = re_ranking(dist, probe_x.shape[0], k1=6, k2=6, lambda_value=0.3)
idx = np.argsort(re_rank, axis=1)
save_path = os.path.join(
"HID_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("HID_result")
with open(save_path, "w") as f:
f.write("videoID,label\n")
for i in range(len(idx)):
f.write("{},{}\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("HID result saved to {}/{}".format(os.getcwd(), save_path))
return
def evaluate_segmentation(data, dataset):
labels = data['mask']
pred = data['pred']
miou = mean_iou(pred, labels)
get_msg_mgr().log_info('mIOU: %.3f' % (miou.mean()))
return {"scalar/test_accuracy/mIOU": miou}
+88
View File
@@ -0,0 +1,88 @@
import torch
import numpy as np
import torch.nn.functional as F
from utils import is_tensor
def cuda_dist(x, y, metric='euc'):
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
if metric == 'cos':
x = F.normalize(x, p=2, dim=1) # n c p
y = F.normalize(y, p=2, dim=1) # n c p
num_bin = x.size(2)
n_x = x.size(0)
n_y = y.size(0)
dist = torch.zeros(n_x, n_y).cuda()
for i in range(num_bin):
_x = x[:, :, i]
_y = y[:, :, i]
if metric == 'cos':
dist += torch.matmul(_x, _y.transpose(0, 1))
else:
_dist = torch.sum(_x ** 2, 1).unsqueeze(1) + torch.sum(_y ** 2, 1).unsqueeze(
0) - 2 * torch.matmul(_x, _y.transpose(0, 1))
dist += torch.sqrt(F.relu(_dist))
return 1 - dist/num_bin if metric == 'cos' else dist / num_bin
def mean_iou(msk1, msk2, eps=1.0e-9):
if not is_tensor(msk1):
msk1 = torch.from_numpy(msk1).cuda()
if not is_tensor(msk2):
msk2 = torch.from_numpy(msk2).cuda()
n = msk1.size(0)
inter = msk1 * msk2
union = ((msk1 + msk2) > 0.).float()
miou = inter.view(n, -1).sum(-1) / (union.view(n, -1).sum(-1) + eps)
return miou
def compute_ACC_mAP(distmat, q_pids, g_pids, q_views=None, g_views=None, rank=1):
num_q, _ = distmat.shape
# indices = np.argsort(distmat, axis=1)
# matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
all_ACC = []
all_AP = []
num_valid_q = 0. # number of valid query
for q_idx in range(num_q):
q_idx_dist = distmat[q_idx]
q_idx_glabels = g_pids
if q_views is not None and g_views is not None:
q_idx_mask = np.isin(g_views, [q_views[q_idx]], invert=True) | np.isin(
g_pids, [q_pids[q_idx]], invert=True)
q_idx_dist = q_idx_dist[q_idx_mask]
q_idx_glabels = q_idx_glabels[q_idx_mask]
assert(len(q_idx_glabels) >
0), "No gallery after excluding identical-view cases!"
q_idx_indices = np.argsort(q_idx_dist)
q_idx_matches = (q_idx_glabels[q_idx_indices]
== q_pids[q_idx]).astype(np.int32)
# binary vector, positions with value 1 are correct matches
# orig_cmc = matches[q_idx]
orig_cmc = q_idx_matches
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_ACC.append(cmc[rank-1])
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
if num_rel > 0:
num_valid_q += 1.
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
# all_ACC = np.asarray(all_ACC).astype(np.float32)
ACC = np.mean(all_ACC)
mAP = np.mean(all_AP)
return ACC, mAP
+64
View File
@@ -0,0 +1,64 @@
import numpy as np
def re_ranking(original_dist, query_num, k1, k2, lambda_value):
# Modified from https://github.com/michuanhaohao/reid-strong-baseline/blob/master/utils/re_ranking.py
all_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(
np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(
candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(
k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
for i in range(query_num):
temp_min = np.zeros(shape=[1, all_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + \
original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
+1 -1
View File
@@ -30,7 +30,7 @@ from data.dataset import DataSet
import data.sampler as Samplers import data.sampler as Samplers
from utils import Odict, mkdir, ddp_all_gather from utils import Odict, mkdir, ddp_all_gather
from utils import get_valid_args, is_list, is_dict, np2var, ts2np, list2var, get_attr_from from utils import get_valid_args, is_list, is_dict, np2var, ts2np, list2var, get_attr_from
from utils import evaluation as eval_functions from evaluation import evaluator as eval_functions
from utils import NoOp from utils import NoOp
from utils import get_msg_mgr from utils import get_msg_mgr
+2 -2
View File
@@ -1,6 +1,6 @@
import torch import torch
from .base import BaseLoss from .base import BaseLoss
from utils import MeanIOU from evaluation import mean_iou
class BinaryCrossEntropyLoss(BaseLoss): class BinaryCrossEntropyLoss(BaseLoss):
@@ -24,7 +24,7 @@ class BinaryCrossEntropyLoss(BaseLoss):
loss = loss.view(n, -1) loss = loss.view(n, -1)
mean_loss = loss.mean() mean_loss = loss.mean()
hard_loss = loss.max() hard_loss = loss.max()
miou = MeanIOU((logits > 0.5).float(), labels) miou = mean_iou((logits > 0.5).float(), labels)
self.info.update({ self.info.update({
'loss': mean_loss.detach().clone(), 'loss': mean_loss.detach().clone(),
'hard_loss': hard_loss.detach().clone(), 'hard_loss': hard_loss.detach().clone(),
-1
View File
@@ -7,5 +7,4 @@ from .common import mkdir, clones
from .common import MergeCfgsDict from .common import MergeCfgsDict
from .common import get_attr_from from .common import get_attr_from
from .common import NoOp from .common import NoOp
from .common import MeanIOU
from .msg_manager import get_msg_mgr from .msg_manager import get_msg_mgr
-12
View File
@@ -203,15 +203,3 @@ def get_ddp_module(module, **kwargs):
def params_count(net): def params_count(net):
n_parameters = sum(p.numel() for p in net.parameters()) n_parameters = sum(p.numel() for p in net.parameters())
return 'Parameters Count: {:.5f}M'.format(n_parameters / 1e6) return 'Parameters Count: {:.5f}M'.format(n_parameters / 1e6)
def MeanIOU(msk1, msk2, eps=1.0e-9):
if not is_tensor(msk1):
msk1 = torch.from_numpy(msk1).cuda()
if not is_tensor(msk2):
msk2 = torch.from_numpy(msk2).cuda()
n = msk1.size(0)
inter = msk1 * msk2
union = ((msk1 + msk2) > 0.).float()
MeIOU = inter.view(n, -1).sum(-1) / (union.view(n, -1).sum(-1) + eps)
return MeIOU
-284
View File
@@ -1,284 +0,0 @@
import os
from time import strftime, localtime
import torch
import numpy as np
import torch.nn.functional as F
from utils import get_msg_mgr, mkdir, MeanIOU
def cuda_dist(x, y, metric='euc'):
x = torch.from_numpy(x).cuda()
y = torch.from_numpy(y).cuda()
if metric == 'cos':
x = F.normalize(x, p=2, dim=1) # n c p
y = F.normalize(y, p=2, dim=1) # n c p
num_bin = x.size(2)
n_x = x.size(0)
n_y = y.size(0)
dist = torch.zeros(n_x, n_y).cuda()
for i in range(num_bin):
_x = x[:, :, i]
_y = y[:, :, i]
if metric == 'cos':
dist += torch.matmul(_x, _y.transpose(0, 1))
else:
_dist = torch.sum(_x ** 2, 1).unsqueeze(1) + torch.sum(_y ** 2, 1).unsqueeze(
0) - 2 * torch.matmul(_x, _y.transpose(0, 1))
dist += torch.sqrt(F.relu(_dist))
return 1 - dist/num_bin if metric == 'cos' else dist / num_bin
# Exclude identical-view cases
def de_diag(acc, each_angle=False):
dividend = acc.shape[1] - 1.
result = np.sum(acc - np.diag(np.diag(acc)), 1) / dividend
if not each_angle:
result = np.mean(result)
return result
# Modified From https://github.com/AbnerHqC/GaitSet/blob/master/model/utils/evaluator.py
def identification(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view_list = list(set(view))
view_list.sort()
view_num = len(view_list)
# sample_num = len(feature)
probe_seq_dict = {'CASIA-B': [['nm-05', 'nm-06'], ['bg-01', 'bg-02'], ['cl-01', 'cl-02']],
'OUMVLP': [['00']]}
gallery_seq_dict = {'CASIA-B': [['nm-01', 'nm-02', 'nm-03', 'nm-04']],
'OUMVLP': [['01']]}
if dataset not in (probe_seq_dict or gallery_seq_dict):
raise KeyError("DataSet %s hasn't been supported !" % dataset)
num_rank = 5
acc = np.zeros([len(probe_seq_dict[dataset]),
view_num, view_num, num_rank]) - 1.
for (p, probe_seq) in enumerate(probe_seq_dict[dataset]):
for gallery_seq in gallery_seq_dict[dataset]:
for (v1, probe_view) in enumerate(view_list):
for (v2, gallery_view) in enumerate(view_list):
gseq_mask = np.isin(seq_type, gallery_seq) & np.isin(
view, [gallery_view])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.sort(1)[1].cpu().numpy()
acc[p, v1, v2, :] = np.round(
np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
result_dict = {}
np.set_printoptions(precision=3, suppress=True)
if 'OUMVLP' not in dataset:
for i in range(1):
msg_mgr.log_info(
'===Rank-%d (Include identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
np.mean(acc[0, :, :, i]),
np.mean(acc[1, :, :, i]),
np.mean(acc[2, :, :, i])))
for i in range(1):
msg_mgr.log_info(
'===Rank-%d (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
de_diag(acc[0, :, :, i]),
de_diag(acc[1, :, :, i]),
de_diag(acc[2, :, :, i])))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, i])
result_dict["scalar/test_accuracy/BG"] = de_diag(acc[1, :, :, i])
result_dict["scalar/test_accuracy/CL"] = de_diag(acc[2, :, :, i])
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
msg_mgr.log_info(
'===Rank-%d of each angle (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, i], True)))
msg_mgr.log_info('BG: {}'.format(de_diag(acc[1, :, :, i], True)))
msg_mgr.log_info('CL: {}'.format(de_diag(acc[2, :, :, i], True)))
else:
msg_mgr.log_info('===Rank-1 (Include identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (np.mean(acc[0, :, :, 0])))
msg_mgr.log_info('===Rank-1 (Exclude identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (de_diag(acc[0, :, :, 0])))
msg_mgr.log_info(
'===Rank-1 of each angle (Exclude identical-view cases)===')
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, 0], True)))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, 0])
return result_dict
def identification_real_scene(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
gallery_seq_type = {'0001-1000': ['1', '2'],
"HID2021": ['0'], '0001-1000-test': ['0'],
'GREW': ['01'], 'TTG-200': ['1']}
probe_seq_type = {'0001-1000': ['3', '4', '5', '6'],
"HID2021": ['1'], '0001-1000-test': ['1'],
'GREW': ['02'], 'TTG-200': ['2', '3', '4', '5', '6']}
num_rank = 20
acc = np.zeros([num_rank]) - 1.
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
acc = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
msg_mgr.log_info('==Rank-1==')
msg_mgr.log_info('%.3f' % (np.mean(acc[0])))
msg_mgr.log_info('==Rank-5==')
msg_mgr.log_info('%.3f' % (np.mean(acc[4])))
msg_mgr.log_info('==Rank-10==')
msg_mgr.log_info('%.3f' % (np.mean(acc[9])))
msg_mgr.log_info('==Rank-20==')
msg_mgr.log_info('%.3f' % (np.mean(acc[19])))
return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])}
def identification_GREW_submission(data, dataset, metric='euc'):
get_msg_mgr().log_info("Evaluating GREW")
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
gallery_seq_type = {'GREW': ['01', '02']}
probe_seq_type = {'GREW': ['03']}
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = view[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
save_path = os.path.join(
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("GREW_result")
with open(save_path, "w") as f:
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
for i in range(len(idx)):
r_format = [int(idx) for idx in gallery_y[idx[i, 0:20]]]
output_row = '{}'+',{}'*20+'\n'
f.write(output_row.format(probe_y[i], *r_format))
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
return
def evaluate_HID(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
msg_mgr.log_info("Evaluating HID")
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
seq_type = np.array(seq_type)
probe_mask = (label == "probe")
gallery_mask = (label != "probe")
gallery_x = feature[gallery_mask, :]
gallery_y = label[gallery_mask]
probe_x = feature[probe_mask, :]
probe_y = seq_type[probe_mask]
feat = np.concatenate([probe_x, gallery_x])
dist = cuda_dist(feat, feat, metric).cpu().numpy()
msg_mgr.log_info("Starting Re-ranking")
re_rank = re_ranking(dist, probe_x.shape[0], k1=6, k2=6, lambda_value=0.3)
idx = np.argsort(re_rank, axis=1)
save_path = os.path.join(
"HID_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("HID_result")
with open(save_path, "w") as f:
f.write("videoID,label\n")
for i in range(len(idx)):
f.write("{},{}\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("HID result saved to {}/{}".format(os.getcwd(), save_path))
return
def re_ranking(original_dist, query_num, k1, k2, lambda_value):
# Modified from https://github.com/michuanhaohao/reid-strong-baseline/blob/master/utils/re_ranking.py
all_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(
np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(
candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(
k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
original_dist = original_dist[:query_num, ]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
for i in range(query_num):
temp_min = np.zeros(shape=[1, all_num], dtype=np.float16)
indNonZero = np.where(V[i, :] != 0)[0]
indImages = [invIndex[ind] for ind in indNonZero]
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
V[indImages[j], indNonZero[j]])
jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + \
original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
def mean_iou(data, dataset):
labels = data['mask']
pred = data['pred']
miou = MeanIOU(pred, labels)
get_msg_mgr().log_info('mIOU: %.3f' % (miou.mean()))
return {"scalar/test_accuracy/mIOU": miou}