Support new Dataset: GREW

This commit is contained in:
Noah
2022-03-21 18:49:09 +08:00
parent 9c1aed2c87
commit 0071833d3f
5 changed files with 322 additions and 11 deletions
+102
View File
@@ -0,0 +1,102 @@
data_cfg:
dataset_name: GREW
dataset_root: your_path
dataset_partition: ./misc/partitions/GREW.json
num_workers: 16
remove_no_gallery: false # Remove probe if no gallery for it
test_dataset_name: GREW
evaluator_cfg:
enable_float16: true
restore_ckpt_strict: true
restore_hint: 60000
save_name: Baseline
eval_func: identification_GREW_submission # identification_real_scene
sampler:
batch_shuffle: false
batch_size: 64
sample_type: all_ordered # all indicates whole sequence used to test, while ordered means input sequence by its natural order; Other options: fixed_unordered
frames_all_limit: 720 # limit the number of sampled frames to prevent out of memory
metric: euc # cos
transform:
- type: BaseSilCuttingTransform
img_w: 64
loss_cfg:
- loss_term_weight: 1.0
margin: 0.2
type: TripletLoss
log_prefix: triplet
- loss_term_weight: 0.1
scale: 16
type: CrossEntropyLoss
log_prefix: softmax
log_accuracy: true
model_cfg:
model: Baseline
backbone_cfg:
in_channels: 1
layers_cfg: # Layers configuration for automatically model construction
- BC-64
- BC-64
- M
- BC-128
- BC-128
- M
- BC-256
- BC-256
# - M
# - BC-512
# - BC-512
type: Plain
SeparateFCs:
in_channels: 256
out_channels: 256
parts_num: 16
SeparateBNNecks:
class_num: 20000
in_channels: 256
parts_num: 16
bin_num:
- 16
# - 8
# - 4
# - 2
# - 1
optimizer_cfg:
lr: 0.1
momentum: 0.9
solver: SGD
weight_decay: 0.0005
scheduler_cfg:
gamma: 0.1
milestones: # Learning Rate Reduction at each milestones
- 80000
- 100000
scheduler: MultiStepLR
trainer_cfg:
enable_float16: true # half_percesion float for memory reduction and speedup
fix_BN: false
log_iter: 100
restore_ckpt_strict: true
restore_hint: 0
save_iter: 5000
save_name: Baseline
sync_BN: true
total_iter: 120000
sampler:
batch_shuffle: true
batch_size:
- 16 # TripletSampler, batch_size[0] indicates Number of Identity
- 4 # batch_size[1] indicates Samples sequqnce for each Identity
frames_num_fixed: 30 # fixed frames number for training
frames_num_max: 50 # max frames number for unfixed training
frames_num_min: 25 # min frames number for unfixed traing
sample_type: fixed_unordered # fixed control input frames number, unordered for controlling order of input tensor; Other options: unfixed_ordered or all_ordered
type: TripletSampler
transform:
- type: BaseSilCuttingTransform
img_w: 64
+115 -5
View File
@@ -122,11 +122,13 @@ def identification_real_scene(data, dataset, metric='euc'):
label = np.array(label)
gallery_seq_type = {'0001-1000': ['1', '2'],
"HID2021": ['0'], '0001-1000-test': ['0']}
"HID2021": ['0'], '0001-1000-test': ['0'],
'GREW': ['01']}
probe_seq_type = {'0001-1000': ['3', '4', '5', '6'],
"HID2021": ['1'], '0001-1000-test': ['1']}
"HID2021": ['1'], '0001-1000-test': ['1'],
'GREW': ['02']}
num_rank = 5
num_rank = 20
acc = np.zeros([num_rank]) - 1.
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
@@ -143,8 +145,46 @@ def identification_real_scene(data, dataset, metric='euc'):
msg_mgr.log_info('%.3f' % (np.mean(acc[0])))
msg_mgr.log_info('==Rank-5==')
msg_mgr.log_info('%.3f' % (np.mean(acc[4])))
msg_mgr.log_info('==Rank-10==')
msg_mgr.log_info('%.3f' % (np.mean(acc[9])))
msg_mgr.log_info('==Rank-20==')
msg_mgr.log_info('%.3f' % (np.mean(acc[19])))
return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])}
def identification_GREW_submission(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
view = np.array(view)
gallery_seq_type = {'GREW': ['01','02']}
probe_seq_type = {'GREW': ['03']}
num_rank = 20
acc = np.zeros([num_rank]) - 1.
gseq_mask = np.isin(seq_type, gallery_seq_type[dataset])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq_type[dataset])
probe_x = feature[pseq_mask, :]
probe_y = view[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
import os
from time import strftime, localtime
save_path = os.path.join(
"GREW_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
os.makedirs("GREW_result", exist_ok=True)
with open(save_path, "w") as f:
f.write("videoId,rank1,rank2,rank3,rank4,rank5,rank6,rank7,rank8,rank9,rank10,rank11,rank12,rank13,rank14,rank15,rank16,rank17,rank18,rank19,rank20\n")
for i in range(len(idx)):
r_format = [int(idx) for idx in gallery_y[idx[i, 0:20]]]
output_row = '{}'+',{}'*20+'\n'
f.write(output_row.format(probe_y[i], *r_format))
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
return
def evaluate_HID(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
@@ -168,6 +208,76 @@ def evaluate_HID(data, dataset, metric='euc'):
with open(save_path, "w") as f:
f.write("videoID,label\n")
for i in range(len(idx)):
f.write("{},{}\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("HID result saved to {}/{}".format(os.getcwd(), save_path))
f.write("{},{},\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("GREW result saved to {}/{}".format(os.getcwd(), save_path))
return
def evaluate_GREW(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
msg_mgr.log_info("Evaluating GREW")
feature, label, seq_type, view = data['embeddings'], data['labels'], data['types'], data['views']
label = np.array(label)
if dataset not in (probe_seq_dict or gallery_seq_dict):
raise KeyError("DataSet %s hasn't been supported !" % dataset)
num_rank = 5
acc = np.zeros([len(probe_seq_dict[dataset]),
view_num, view_num, num_rank]) - 1.
for (p, probe_seq) in enumerate(probe_seq_dict[dataset]):
for gallery_seq in gallery_seq_dict[dataset]:
for (v1, probe_view) in enumerate(view_list):
for (v2, gallery_view) in enumerate(view_list):
gseq_mask = np.isin(seq_type, gallery_seq) & np.isin(
view, [gallery_view])
gallery_x = feature[gseq_mask, :]
gallery_y = label[gseq_mask]
pseq_mask = np.isin(seq_type, probe_seq) & np.isin(
view, [probe_view])
probe_x = feature[pseq_mask, :]
probe_y = label[pseq_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.sort(1)[1].cpu().numpy()
acc[p, v1, v2, :] = np.round(
np.sum(np.cumsum(np.reshape(probe_y, [-1, 1]) == gallery_y[idx[:, 0:num_rank]], 1) > 0,
0) * 100 / dist.shape[0], 2)
result_dict = {}
np.set_printoptions(precision=3, suppress=True)
if 'OUMVLP' not in dataset:
for i in range(1):
msg_mgr.log_info(
'===Rank-%d (Include identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
np.mean(acc[0, :, :, i]),
np.mean(acc[1, :, :, i]),
np.mean(acc[2, :, :, i])))
for i in range(1):
msg_mgr.log_info(
'===Rank-%d (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: %.3f,\tBG: %.3f,\tCL: %.3f' % (
de_diag(acc[0, :, :, i]),
de_diag(acc[1, :, :, i]),
de_diag(acc[2, :, :, i])))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, i])
result_dict["scalar/test_accuracy/BG"] = de_diag(acc[1, :, :, i])
result_dict["scalar/test_accuracy/CL"] = de_diag(acc[2, :, :, i])
np.set_printoptions(precision=2, floatmode='fixed')
for i in range(1):
msg_mgr.log_info(
'===Rank-%d of each angle (Exclude identical-view cases)===' % (i + 1))
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, i], True)))
msg_mgr.log_info('BG: {}'.format(de_diag(acc[1, :, :, i], True)))
msg_mgr.log_info('CL: {}'.format(de_diag(acc[2, :, :, i], True)))
else:
msg_mgr.log_info('===Rank-1 (Include identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (np.mean(acc[0, :, :, 0])))
msg_mgr.log_info('===Rank-1 (Exclude identical-view cases)===')
msg_mgr.log_info('NM: %.3f ' % (de_diag(acc[0, :, :, 0])))
msg_mgr.log_info(
'===Rank-1 of each angle (Exclude identical-view cases)===')
msg_mgr.log_info('NM: {}'.format(de_diag(acc[0, :, :, 0], True)))
result_dict["scalar/test_accuracy/NM"] = de_diag(acc[0, :, :, 0])
return result_dict
File diff suppressed because one or more lines are too long
+15 -6
View File
@@ -14,7 +14,7 @@ import numpy as np
from tqdm import tqdm
def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbose: bool = False) -> None:
def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbose: bool = False, dataset='CASIAB') -> None:
"""Reads a group of images and saves the data in pickle format.
Args:
@@ -31,6 +31,10 @@ def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbos
logging.debug(f'Reading sid {sinfo[0]}, seq {sinfo[1]}, view {sinfo[2]} from {img_file}')
img = cv2.imread(str(img_file), cv2.IMREAD_GRAYSCALE)
if dataset == 'GREW':
to_pickle.append(img.astype('uint8'))
continue
if img.sum() <= 10000:
if verbose:
@@ -76,6 +80,8 @@ def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbos
if to_pickle:
to_pickle = np.asarray(to_pickle)
dst_path = os.path.join(output_path, *sinfo)
# print(img_paths[0].as_posix().split('/'),img_paths[0].as_posix().split('/')[-5])
# dst_path = os.path.join(output_path, img_paths[0].as_posix().split('/')[-5], *sinfo) if dataset == 'GREW' else dst
os.makedirs(dst_path, exist_ok=True)
pkl_path = os.path.join(dst_path, f'{sinfo[2]}.pkl')
if verbose:
@@ -89,7 +95,7 @@ def imgs2pickle(img_groups: Tuple, output_path: Path, img_size: int = 64, verbos
def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: int = 4, verbose: bool = False) -> None:
def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: int = 4, verbose: bool = False, dataset: str = 'CASIAB') -> None:
"""Reads a dataset and saves the data in pickle format.
Args:
@@ -103,6 +109,8 @@ def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: i
logging.info(f'Listing {input_path}')
total_files = 0
for img_path in input_path.rglob('*.png'):
if 'gei.png' in img_path.as_posix():
continue
if verbose:
logging.debug(f'Adding {img_path}')
*_, sid, seq, view, _ = img_path.as_posix().split('/')
@@ -115,18 +123,19 @@ def pretreat(input_path: Path, output_path: Path, img_size: int = 64, workers: i
with mp.Pool(workers) as pool:
logging.info(f'Start pretreating {input_path}')
for _ in pool.imap_unordered(partial(imgs2pickle, output_path=output_path, img_size=img_size, verbose=verbose), img_groups.items()):
for _ in pool.imap_unordered(partial(imgs2pickle, output_path=output_path, img_size=img_size, verbose=verbose, dataset=dataset), img_groups.items()):
progress.update(1)
logging.info('Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenGait dataset pretreatment module.')
parser.add_argument('-r', '--input_path', default='', type=str, help='Root path of raw dataset.')
parser.add_argument('-i', '--input_path', default='', type=str, help='Root path of raw dataset.')
parser.add_argument('-o', '--output_path', default='', type=str, help='Output path of pickled dataset.')
parser.add_argument('-l', '--log_file', default='./pretreatment.log', type=str, help='Log file path. Default: ./pretreatment.log')
parser.add_argument('-n', '--n_workers', default=4, type=int, help='Number of thread workers. Default: 4')
parser.add_argument('-i', '--img_size', default=64, type=int, help='Image resizing size. Default 64')
parser.add_argument('-r', '--img_size', default=64, type=int, help='Image resizing size. Default 64')
parser.add_argument('-d', '--dataset', default='CASIAB', type=str, help='Dataset for pretreatment.')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Display debug info.')
args = parser.parse_args()
@@ -138,4 +147,4 @@ if __name__ == '__main__':
for k, v in args.__dict__.items():
logging.debug(f'{k}: {v}')
pretreat(input_path=Path(args.input_path), output_path=Path(args.output_path), img_size=args.img_size, workers=args.n_workers, verbose=args.verbose)
pretreat(input_path=Path(args.input_path), output_path=Path(args.output_path), img_size=args.img_size, workers=args.n_workers, verbose=args.verbose, dataset=args.dataset)
+89
View File
@@ -0,0 +1,89 @@
import argparse
import os
import shutil
from pathlib import Path
from tqdm import tqdm
TOTAL_Test = 24000
TOTAL_Train = 20000
def rearrange_train(train_path: Path, output_path: Path) -> None:
progress = tqdm(total=TOTAL_Train)
for sid in train_path.iterdir():
if not sid.is_dir():
continue
for sub_seq in sid.iterdir():
if not sub_seq.is_dir():
continue
for subfile in os.listdir(sub_seq):
src = os.path.join(train_path, sid.name, sub_seq.name)
dst = os.path.join(output_path, sid.name+'train', '00', sub_seq.name)
os.makedirs(dst,exist_ok=True)
if subfile not in os.listdir(dst) and subfile.endswith('.png'):
os.symlink(os.path.join(src, subfile),
os.path.join(dst, subfile))
progress.update(1)
def rearrange_test(test_path: Path, output_path: Path) -> None:
# for gallery
gallery = Path(os.path.join(test_path, 'gallery'))
probe = Path(os.path.join(test_path, 'probe'))
progress = tqdm(total=TOTAL_Test)
for sid in gallery.iterdir():
if not sid.is_dir():
continue
cnt = 1
for sub_seq in sid.iterdir():
if not sub_seq.is_dir():
continue
for subfile in sorted(os.listdir(sub_seq)):
src = os.path.join(gallery, sid.name, sub_seq.name)
dst = os.path.join(output_path, sid.name, '%02d'%cnt, sub_seq.name)
os.makedirs(dst,exist_ok=True)
if subfile not in os.listdir(dst) and subfile.endswith('.png'):
os.symlink(os.path.join(src, subfile),
os.path.join(dst, subfile))
cnt += 1
progress.update(1)
# for probe
for sub_seq in probe.iterdir():
if not sub_seq.is_dir():
continue
for subfile in os.listdir(sub_seq):
src = os.path.join(probe, sub_seq.name)
dst = os.path.join(output_path, 'probe', '03', sub_seq.name)
os.makedirs(dst,exist_ok=True)
if subfile not in os.listdir(dst) and subfile.endswith('.png'):
os.symlink(os.path.join(src, subfile),
os.path.join(dst, subfile))
progress.update(1)
def rearrange_GREW(input_path: Path, output_path: Path) -> None:
os.makedirs(output_path, exist_ok=True)
for folder in input_path.iterdir():
if not folder.is_dir():
continue
print(f'Rearranging {folder}')
if folder.name == 'train':
rearrange_train(folder,output_path)
if folder.name == 'test':
rearrange_test(folder, output_path)
if folder.name == 'distractor':
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GREW rearrange tool')
parser.add_argument('-i', '--input_path', required=True, type=str,
help='Root path of raw dataset.')
parser.add_argument('-o', '--output_path', default='GREW_rearranged', type=str,
help='Root path for output.')
args = parser.parse_args()
input_path = Path(args.input_path).resolve()
output_path = Path(args.output_path).resolve()
rearrange_GREW(input_path, output_path)