refactor evaluation and add cross view gallery

This commit is contained in:
darkliang
2022-11-24 22:07:50 +08:00
parent c5b4fb0806
commit 793405ad7c
18 changed files with 388 additions and 309 deletions
+222
View File
@@ -0,0 +1,222 @@
import os
from time import strftime, localtime
import numpy as np
from utils import get_msg_mgr, mkdir
from .metric import mean_iou, cuda_dist, compute_ACC_mAP
from .re_rank import re_ranking
def de_diag(acc, each_angle=False):
# Exclude identical-view cases
dividend = acc.shape[1] - 1.
result = np.sum(acc - np.diag(np.diag(acc)), 1) / dividend
if not each_angle:
result = np.mean(result)
return result
def cross_view_gallery_evaluation(feature, label, seq_type, view, probe_seq_dict, gallery_seq_list, metric):
msg_mgr = get_msg_mgr()
acc = {}
map = {}
view_list = sorted(np.unique(view))
for (type_, probe_seq) in probe_seq_dict.items():
acc[type_] = np.zeros(len(view_list)) - 1.
map[type_] = np.zeros(len(view_list)) - 1.
for (v1, probe_view) in enumerate(view_list):
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
gseq_mask = np.isin(seq_type, gallery_seq_list)
gallery_y = label[gseq_mask]
gallery_x = feature[gseq_mask, :]
dist = cuda_dist(probe_x, gallery_x, metric)
eval_results = compute_ACC_mAP(
dist.cpu().numpy(), probe_y, gallery_y, np.asarray(view)[pseq_mask], np.asarray(view)[gseq_mask])
acc[type_][v1] = np.round(eval_results[0] * 100, 2)
map[type_][v1] = np.round(eval_results[1] * 100, 2)
result_dict = {}
msg_mgr.log_info(
'===Cross View Gallery Evaluation (Excluded identical-view cases)===')
out_acc_str = "========= Rank@1 Acc =========\n"
out_map_str = "============= mAP ============\n"
for type_ in probe_seq_dict.keys():
avg_acc = np.mean(acc[type_])
avg_map = np.mean(map[type_])
result_dict[f'scalar/test_accuracy/{type_}-Rank@1'] = avg_acc
result_dict[f'scalar/test_accuracy/{type_}-mAP'] = avg_map
out_acc_str += f"{type_}:\t{acc[type_]}, mean: {avg_acc:.2f}%\n"
out_map_str += f"{type_}:\t{map[type_]}, mean: {avg_map:.2f}%\n"
# msg_mgr.log_info(f'========= Rank@1 Acc =========')
msg_mgr.log_info(f'{out_acc_str}')
# msg_mgr.log_info(f'========= mAP =========')
msg_mgr.log_info(f'{out_map_str}')
return result_dict
# Modified From https://github.com/AbnerHqC/GaitSet/blob/master/model/utils/evaluator.py
def single_view_gallery_evaluation(feature, label, seq_type, view, probe_seq_dict, gallery_seq_list, metric):
msg_mgr = get_msg_mgr()
acc = {}
map = {}
view_list = sorted(np.unique(view))
view_num = len(view_list)
num_rank = 1
for (type_, probe_seq) in probe_seq_dict.items():
acc[type_] = np.zeros((view_num, view_num)) - 1.
map[type_] = np.zeros((view_num, view_num)) - 1.
for (v1, probe_view) in enumerate(view_list):
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
for (v2, gallery_view) in enumerate(view_list):
gseq_mask = np.isin(seq_type, gallery_seq_list) & np.isin(
view, [gallery_view])
gallery_y = label[gseq_mask]
gallery_x = feature[gseq_mask, :]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
acc[type_][v1, v2] = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
result_dict = {}
msg_mgr.log_info('===Rank-1 (Exclude identical-view cases)===')
out_str = ""
for type_ in probe_seq_dict.keys():
sub_acc = de_diag(acc[type_], each_angle=True)
msg_mgr.log_info(f'{type_}: {sub_acc}')
result_dict[f'scalar/test_accuracy/{type_}'] = np.mean(sub_acc)
out_str += f"{type_}: {np.mean(sub_acc):.2f}%\t"
msg_mgr.log_info(out_str)
return result_dict
def evaluate_indoor_dataset(data, dataset, metric='euc', cross_view_gallery=False):
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
probe_seq_dict = {'CASIA-B': {'NM': ['nm-05', 'nm-06'], 'BG': ['bg-01', 'bg-02'], 'CL': ['cl-01', 'cl-02']},
'OUMVLP': {'NM': ['00']}}
gallery_seq_dict = {'CASIA-B': ['nm-01', 'nm-02', 'nm-03', 'nm-04'],
'OUMVLP': ['01']}
if dataset not in (probe_seq_dict or gallery_seq_dict):
raise KeyError("DataSet %s hasn't been supported !" % dataset)
if cross_view_gallery:
return cross_view_gallery_evaluation(
feature, label, seq_type, view, probe_seq_dict[dataset], gallery_seq_dict[dataset], metric)
else:
return single_view_gallery_evaluation(
feature, label, seq_type, view, probe_seq_dict[dataset], gallery_seq_dict[dataset], metric)
def evaluate_real_scene(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
gallery_seq_type = {'0001-1000': ['1', '2'],
"HID2021": ['0'], '0001-1000-test': ['0'],
'GREW': ['01'], 'TTG-200': ['1']}
probe_seq_type = {'0001-1000': ['3', '4', '5', '6'],
"HID2021": ['1'], '0001-1000-test': ['1'],
'GREW': ['02'], 'TTG-200': ['2', '3', '4', '5', '6']}
num_rank = 20
acc = np.zeros([num_rank]) - 1.
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
acc = np.round(np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
msg_mgr.log_info('==Rank-1==')
msg_mgr.log_info('%.3f' % (np.mean(acc[0])))
msg_mgr.log_info('==Rank-5==')
msg_mgr.log_info('%.3f' % (np.mean(acc[4])))
msg_mgr.log_info('==Rank-10==')
msg_mgr.log_info('%.3f' % (np.mean(acc[9])))
msg_mgr.log_info('==Rank-20==')
msg_mgr.log_info('%.3f' % (np.mean(acc[19])))
return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])}
def GREW_submission(data, dataset, metric='euc'):
get_msg_mgr().log_info("Evaluating GREW")
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
gallery_seq_type = {'GREW': ['01', '02']}
probe_seq_type = {'GREW': ['03']}
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = view[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
save_path = os.path.join(
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("GREW_result")
with open(save_path, "w") as f:
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
for i in range(len(idx)):
r_format = [int(idx) for idx in gallery_y[idx[i, 0:20]]]
output_row = '{}'+',{}'*20+'\n'
f.write(output_row.format(probe_y[i], *r_format))
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
return
def HID_submission(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
msg_mgr.log_info("Evaluating HID")
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
seq_type = np.array(seq_type)
probe_mask = (label == "probe")
gallery_mask = (label != "probe")
gallery_x = feature[gallery_mask, :]
gallery_y = label[gallery_mask]
probe_x = feature[probe_mask, :]
probe_y = seq_type[probe_mask]
feat = np.concatenate([probe_x, gallery_x])
dist = cuda_dist(feat, feat, metric).cpu().numpy()
msg_mgr.log_info("Starting Re-ranking")
re_rank = re_ranking(dist, probe_x.shape[0], k1=6, k2=6, lambda_value=0.3)
idx = np.argsort(re_rank, axis=1)
save_path = os.path.join(
"HID_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
mkdir("HID_result")
with open(save_path, "w") as f:
f.write("videoID,label\n")
for i in range(len(idx)):
f.write("{},{}\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("HID result saved to {}/{}".format(os.getcwd(), save_path))
return
def evaluate_segmentation(data, dataset):
labels = data['mask']
pred = data['pred']
miou = mean_iou(pred, labels)
get_msg_mgr().log_info('mIOU: %.3f' % (miou.mean()))
return {"scalar/test_accuracy/mIOU": miou}