This commit is contained in:
Noah
2022-03-21 18:52:22 +08:00
5 changed files with 73 additions and 70 deletions
BIN
View File
Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

+6
View File
@@ -8,9 +8,15 @@ class TripletSampler(tordata.sampler.Sampler):
def __init__(self, dataset, batch_size, batch_shuffle=False): def __init__(self, dataset, batch_size, batch_shuffle=False):
self.dataset = dataset self.dataset = dataset
self.batch_size = batch_size self.batch_size = batch_size
if len(self.batch_size) != 2:
raise ValueError(
"batch_size should be (P x K) not {}".format(batch_size))
self.batch_shuffle = batch_shuffle self.batch_shuffle = batch_shuffle
self.world_size = dist.get_world_size() self.world_size = dist.get_world_size()
if (self.batch_size[0]*self.batch_size[1]) % self.world_size != 0:
raise ValueError("World size ({}) is not divisible by batch_size ({} x {})".format(
self.world_size, batch_size[0], batch_size[1]))
self.rank = dist.get_rank() self.rank = dist.get_rank()
def __iter__(self): def __iter__(self):
+1 -1
View File
@@ -58,7 +58,7 @@ if __name__ == '__main__':
torch.distributed.init_process_group('nccl', init_method='env://') torch.distributed.init_process_group('nccl', init_method='env://')
if torch.distributed.get_world_size() != torch.cuda.device_count(): if torch.distributed.get_world_size() != torch.cuda.device_count():
raise ValueError("Expect number of availuable GPUs({}) equals to the world size({}).".format( raise ValueError("Expect number of availuable GPUs({}) equals to the world size({}).".format(
torch.distributed.get_world_size(), torch.cuda.device_count())) torch.cuda.device_count(), torch.distributed.get_world_size()))
cfgs = config_loader(opt.cfgs) cfgs = config_loader(opt.cfgs)
if opt.iter != 0: if opt.iter != 0:
cfgs['evaluator_cfg']['restore_hint'] = int(opt.iter) cfgs['evaluator_cfg']['restore_hint'] = int(opt.iter)
+64 -67
View File
@@ -40,7 +40,6 @@ def de_diag(acc, each_angle=False):
def identification(data, dataset, metric='euc'): def identification(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr() msg_mgr = get_msg_mgr()
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views'] feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label) label = np.array(label)
view_list = list(set(view)) view_list = list(set(view))
@@ -198,8 +197,12 @@ def evaluate_HID(data, dataset, metric='euc'):
gallery_y = label[gallery_mask] gallery_y = label[gallery_mask]
probe_x = feature[probe_mask, :] probe_x = feature[probe_mask, :]
probe_y = seq_type[probe_mask] probe_y = seq_type[probe_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy() feat = np.concatenate([probe_x, gallery_x])
dist = cuda_dist(feat, feat, metric).cpu().numpy()
re_rank = re_ranking(dist, probe_x.shape[0], k1=6, k2=6, lambda_value=0.3)
idx = np.argsort(re_rank, axis=1)
import os import os
from time import strftime, localtime from time import strftime, localtime
save_path = os.path.join( save_path = os.path.join(
@@ -213,71 +216,65 @@ def evaluate_HID(data, dataset, metric='euc'):
return return
def evaluate_GREW(data, dataset, metric='euc'): def re_ranking(original_dist, query_num, k1, k2, lambda_value):
msg_mgr = get_msg_mgr() # Modified from https://github.com/michuanhaohao/reid-strong-baseline/blob/master/utils/re_ranking.py
msg_mgr.log_info("Evaluating GREW") all_num = original_dist.shape[0]
original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float16)
initial_rank = np.argsort(original_dist).astype(np.int32)
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views'] print('starting re_ranking')
label = np.array(label) for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[candidate, :int(
np.around(k1 / 2)) + 1]
candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,
:int(np.around(k1 / 2)) + 1]
fi_candidate = np.where(
candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 / 3 * len(
candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(
k_reciprocal_expansion_index, candidate_k_reciprocal_index)
if dataset not in (probe_seq_dict or gallery_seq_dict): k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
raise KeyError("DataSet %s hasn't been supported !" % dataset) weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
num_rank = 5 V[i, k_reciprocal_expansion_index] = weight / np.sum(weight)
acc = np.zeros([len(probe_seq_dict[dataset]), original_dist = original_dist[:query_num, ]
view_num, view_num, num_rank]) - 1. if k2 != 1:
for (p, probe_seq) in enumerate(probe_seq_dict[dataset]): V_qe = np.zeros_like(V, dtype=np.float16)
for gallery_seq in gallery_seq_dict[dataset]: for i in range(all_num):
for (v1, probe_view) in enumerate(view_list): V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
for (v2, gallery_view) in enumerate(view_list): V = V_qe
gseq_mask = np.isin(seq_type, gallery_seq) & np.isin( del V_qe
view, [gallery_view]) del initial_rank
gallery_x = feature[gseq_mask, :] invIndex = []
gallery_y = label[gseq_mask] for i in range(all_num):
invIndex.append(np.where(V[:, i] != 0)[0])
pseq_mask = np.isin(seq_type, probe_seq) & np.isin( jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric) for i in range(query_num):
idx = dist.sort(1)[1].cpu().numpy() temp_min = np.zeros(shape=[1, all_num], dtype=np.float16)
acc[p, v1, v2, :] = np.round( indNonZero = np.where(V[i, :] != 0)[0]
np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0, indImages = [invIndex[ind] for ind in indNonZero]
0) * 100 / dist.shape[0], 2) for j in range(len(indNonZero)):
result_dict = {} temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],
np.set_printoptions(precision=3, suppress=True) V[indImages[j], indNonZero[j]])
if 'OUMVLP' not in dataset: jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
for i in range(1):
msg_mgr.log_info( final_dist = jaccard_dist * (1 - lambda_value) + \
'===Rank-%d (Include identical-view cases)===' % (i + 1)) original_dist * lambda_value
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % ( del original_dist
np.mean(acc[0, :, :, i]), del V
np.mean(acc[1, :, :, i]), del jaccard_dist
np.mean(acc[2, :, :, i]))) final_dist = final_dist[:query_num, query_num:]
for i in range(1): return final_dist
msg_mgr.log_info(
'===Rank-%d (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
de_diag(acc[0, :, :, i]),
de_diag(acc[1, :, :, i]),
de_diag(acc[2, :, :, i])))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, i])
result_dict["scalar/test_accuracy/BG"] = de_diag(acc[1, :, :, i])
result_dict["scalar/test_accuracy/CL"] = de_diag(acc[2, :, :, i])
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
msg_mgr.log_info(
'===Rank-%d of each angle (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, i], True)))
msg_mgr.log_info('BG: {}'.format(de_diag(acc[1, :, :, i], True)))
msg_mgr.log_info('CL: {}'.format(de_diag(acc[2, :, :, i], True)))
else:
msg_mgr.log_info('===Rank-1 (Include identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (np.mean(acc[0, :, :, 0])))
msg_mgr.log_info('===Rank-1 (Exclude identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (de_diag(acc[0, :, :, 0])))
msg_mgr.log_info(
'===Rank-1 of each angle (Exclude identical-view cases)===')
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, 0], True)))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, 0])
return result_dict
+1 -1
View File
@@ -1,6 +1,6 @@
# HID Tutorial # HID Tutorial
![](http://hid2022.iapr-tc4.org/wp-content/uploads/sites/7/2022/03/%E5%9B%BE%E7%89%871-2.png) ![](http://hid2022.iapr-tc4.org/wp-content/uploads/sites/7/2022/03/%E5%9B%BE%E7%89%871-2.png)
This is the official support for competition of [Human Identification at a Distance (HID)](http://hid2022.iapr-tc4.org/). We report our result is 68.7% using the baseline model. In order for participants to better start the first step, we provide a tutorial on how to use OpenGait for HID. This is the official support for competition of [Human Identification at a Distance (HID)](http://hid2022.iapr-tc4.org/). We report our result of 68.7% using the baseline model and 80.0% with re-ranking. In order for participants to better start the first step, we provide a tutorial on how to use OpenGait for HID.
## Preprocess the dataset ## Preprocess the dataset
Download the raw dataset from the [official link](http://hid2022.iapr-tc4.org/). You will get three compressed files, i.e. `train.tar`, `HID2022_test_gallery.zip` and `HID2022_test_probe.zip`. Download the raw dataset from the [official link](http://hid2022.iapr-tc4.org/). You will get three compressed files, i.e. `train.tar`, `HID2022_test_gallery.zip` and `HID2022_test_probe.zip`.