support HID

This commit is contained in:
darkliang
2022-03-11 19:32:23 +08:00
parent 7c33b33440
commit 7d7b81f48e
5 changed files with 1281 additions and 0 deletions
+27
View File
@@ -140,3 +140,30 @@ def identification_real_scene(data, dataset, metric='euc'):
msg_mgr.log_info('==Rank-5==') msg_mgr.log_info('==Rank-5==')
msg_mgr.log_info('%.3f' % (np.mean(acc[4]))) msg_mgr.log_info('%.3f' % (np.mean(acc[4])))
return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])} return {"scalar/test_accuracy/Rank-1": np.mean(acc[0]), "scalar/test_accuracy/Rank-5": np.mean(acc[4])}
def evaluate_HID(data, dataset, metric='euc'):
msg_mgr = get_msg_mgr()
msg_mgr.log_info("Evaluating HID")
feature, label, seq_type = data['embeddings'], data['labels'], data['types']
label = np.array(label)
seq_type = np.array(seq_type)
probe_mask = (label == "probe")
gallery_mask = (label != "probe")
gallery_x = feature[gallery_mask, :]
gallery_y = label[gallery_mask]
probe_x = feature[probe_mask, :]
probe_y = seq_type[probe_mask]
dist = cuda_dist(probe_x, gallery_x, metric)
idx = dist.cpu().sort(1)[1].numpy()
import os
from time import strftime, localtime
save_path = os.path.join(
"HID_result/"+strftime('%Y-%m%d-%H%M%S', localtime())+".csv")
os.makedirs("HID_result", exist_ok=True)
with open(save_path, "w") as f:
f.write("videoID,label\n")
for i in range(len(idx)):
f.write("{},{}\n".format(probe_y[i], gallery_y[idx[i, 0]]))
print("HID result saved to {}/{}".format(os.getcwd(), save_path))
return
+1012
View File
File diff suppressed because it is too large Load Diff
+22
View File
@@ -0,0 +1,22 @@
# HID Tutorial
This is the official suppor for competition of Human Identification at a Distance (HID). We report our result is 68.7% using the baseline model. In order for participants to better start the first step, we provide a tutorial on how to use OpenGait for HID.
## Preprocess the dataset
Download the raw dataset from the [official link](http://hid2022.iapr-tc4.org/). You will get three compressed files, i.e. `train.tar`, `HID2022_test_gallery.zip` and `HID2022_test_probe.zip`.
After unpacking these three files, run this command:
```shell
python misc/HID/pretreatment_HID.py --input_train_path="train" --input_gallery_path="HID2022_test_gallery" --input_probe_path="HID2022_test_probe" --output_path="HID-128-pkl"
```
## Train the dataset
Modify the `dataset_root` in `./misc/HID/baseline_hid.yaml`, and then run this command:
```shell
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 lib/main.py --cfgs ./misc/HID/baseline_hid.yaml --phase train
```
You can also download the [trained model](https://github.com/ShiqiYu/OpenGait/releases/download/v1.1/pretrained_hid_model.pt) and place it in `output` after unzipping.
## Get the submission file.
```shell
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 lib/main.py --cfgs ./misc/HID/baseline_hid.yaml --phase test
```
The result will be generated in your working directory, you must rename and compress it as the requirements before submitting.
+97
View File
@@ -0,0 +1,97 @@
data_cfg:
dataset_name: HID
dataset_root: your_path
dataset_partition: ./misc/HID/HID.json
num_workers: 1
remove_no_gallery: false # Remove probe if no gallery for it
evaluator_cfg:
enable_float16: true
restore_ckpt_strict: true
restore_hint: 60000
save_name: Baseline
eval_func: evaluate_HID
sampler:
batch_shuffle: false
batch_size: 8
sample_type: all_ordered # all indicates whole sequence used to test, while ordered means input sequence by its natural order; Other options: fixed_unordered
frames_all_limit: 720 # limit the number of sampled frames to prevent out of memory
metric: euc # cos
transform:
- type: BaseSilCuttingTransform
img_w: 128
loss_cfg:
- loss_term_weight: 1.0
margin: 0.2
type: TripletLoss
log_prefix: triplet
- loss_term_weight: 0.1
scale: 16
type: CrossEntropyLoss
log_prefix: softmax
log_accuracy: true
model_cfg:
model: Baseline
backbone_cfg:
in_channels: 1
layers_cfg: # Layers configuration for automatically model construction
- BC-64
- BC-64
- M
- BC-128
- BC-128
- M
- BC-256
- BC-256
- M
- BC-512
- BC-512
type: Plain
SeparateFCs:
in_channels: 512
out_channels: 256
parts_num: 31
SeparateBNNecks:
class_num: 500
in_channels: 256
parts_num: 31
bin_num:
- 16
- 8
- 4
- 2
- 1
optimizer_cfg:
lr: 0.1
momentum: 0.9
solver: SGD
weight_decay: 0.0005
scheduler_cfg:
gamma: 0.1
milestones: # Learning Rate Reduction at each milestones
- 20000
- 40000
scheduler: MultiStepLR
trainer_cfg:
enable_float16: true # half_percesion float for memory reduction and speedup
fix_BN: false
log_iter: 100
restore_ckpt_strict: true
restore_hint: 20000
save_iter: 10000
save_name: Baseline
sync_BN: true
total_iter: 60000
sampler:
batch_shuffle: true
batch_size:
- 16 # TripletSampler, batch_size[0] indicates Number of Identity
- 8 # batch_size[1] indicates Samples sequqnce for each Identity
frames_num_fixed: 30 # fixed frames number for training
sample_type: fixed_unordered # fixed control input frames number, unordered for controlling order of input tensor; Other options: unfixed_ordered or all_ordered
type: TripletSampler
transform:
- type: BaseSilCuttingTransform
img_w: 128
+123
View File
@@ -0,0 +1,123 @@
import os
import cv2
import numpy as np
import argparse
import pickle
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--input_train_path', default='', type=str,
help='Root path of train.')
parser.add_argument('--input_gallery_path', default='', type=str,
help='Root path of gallery.')
parser.add_argument('--input_probe_path', default='', type=str,
help='Root path of probe.')
parser.add_argument('--output_path', default='', type=str,
help='Root path for output.')
opt = parser.parse_args()
OUTPUT_PATH = opt.output_path
print('Pretreatment Start.\n'
'Input train path: {}\n'
'Input gallery path: {}\n'
'Input probe path: {}\n'
'Output path: {}\n'.format(
opt.input_train_path, opt.input_gallery_path, opt.input_probe_path, OUTPUT_PATH))
INPUT_PATH = opt.input_train_path
print("Walk the input train path")
id_list = os.listdir(INPUT_PATH)
id_list.sort()
for _id in tqdm(id_list):
seq_type = os.listdir(os.path.join(INPUT_PATH, _id))
seq_type.sort()
for _seq_type in seq_type:
out_dir = os.path.join(OUTPUT_PATH, _id, _seq_type, "default")
count_frame = 0
all_imgs = []
frame_list = sorted(os.listdir(
os.path.join(INPUT_PATH, _id, _seq_type)))
for _frame_name in frame_list:
frame_path = os.path.join(
INPUT_PATH, _id, _seq_type, _frame_name)
img = cv2.imread(frame_path, cv2.IMREAD_GRAYSCALE)
if img is not None:
# Save the img
all_imgs.append(img)
count_frame += 1
all_imgs = np.asarray(all_imgs)
if count_frame > 0:
os.makedirs(out_dir, exist_ok=True)
all_imgs_pkl = os.path.join(out_dir, '{}.pkl'.format(_seq_type))
pickle.dump(all_imgs, open(all_imgs_pkl, 'wb'))
# Warn if the sequence contains less than 5 frames
if count_frame < 5:
print('Seq:{}-{}, less than 5 valid data.'.format(_id, _seq_type))
print("Walk the input gallery path")
INPUT_PATH = opt.input_gallery_path
id_list = os.listdir(INPUT_PATH)
id_list.sort()
for _id in tqdm(id_list):
seq_type = os.listdir(os.path.join(INPUT_PATH, _id))
seq_type.sort()
for _seq_type in seq_type:
out_dir = os.path.join(OUTPUT_PATH, _id, _seq_type, "default")
count_frame = 0
all_imgs = []
frame_list = sorted(os.listdir(
os.path.join(INPUT_PATH, _id, _seq_type)))
for _frame_name in frame_list:
frame_path = os.path.join(
INPUT_PATH, _id, _seq_type, _frame_name)
img = cv2.imread(frame_path, cv2.IMREAD_GRAYSCALE)
if img is not None:
# Save the img
all_imgs.append(img)
count_frame += 1
all_imgs = np.asarray(all_imgs)
if count_frame > 0:
os.makedirs(out_dir, exist_ok=True)
all_imgs_pkl = os.path.join(out_dir, '{}.pkl'.format(_seq_type))
pickle.dump(all_imgs, open(all_imgs_pkl, 'wb'))
# Warn if the sequence contains less than 5 frames
if count_frame < 5:
print('Seq:{}-{}, less than 5 valid data.'.format(_id, _seq_type))
print("Finish {}".format(_id))
print("Walk the input probe path")
INPUT_PATH = opt.input_probe_path
seq_type = os.listdir(INPUT_PATH)
seq_type.sort()
_id = "probe"
for _seq_type in tqdm(seq_type):
out_dir = os.path.join(OUTPUT_PATH, _id, _seq_type, "default")
count_frame = 0
all_imgs = []
frame_list = sorted(os.listdir(
os.path.join(INPUT_PATH, _seq_type)))
for _frame_name in frame_list:
frame_path = os.path.join(
INPUT_PATH, _seq_type, _frame_name)
img = cv2.imread(frame_path, cv2.IMREAD_GRAYSCALE)
if img is not None:
# Save the img
all_imgs.append(img)
count_frame += 1
all_imgs = np.asarray(all_imgs)
if count_frame > 0:
os.makedirs(out_dir, exist_ok=True)
all_imgs_pkl = os.path.join(out_dir, '{}.pkl'.format(_seq_type))
pickle.dump(all_imgs, open(all_imgs_pkl, 'wb'))
# Warn if the sequence contains less than 5 frames
if count_frame < 5:
print('Seq:{}-{}, less than 5 valid data.'.format(_id, _seq_type))