support CMU Mocap

This commit is contained in:
Iridoudou
2021-08-11 12:12:31 +08:00
parent 3bb691ee4d
commit dd425a66d3
13 changed files with 1311 additions and 41 deletions

3
.gitignore vendored
View File

@ -12,4 +12,5 @@ image.png
smplpytorch/native/models/*.pkl
exp/
output/
output/
make_gif.py

0
01_01_params.pkl Normal file
View File

View File

@ -43,6 +43,7 @@ The SMPL human body layer for Pytorch is from the [smplpytorch](https://github.c
currently supported datasets:
- [HumanAct12](https://ericguo5513.github.io/action-to-motion/)
- [CMU Mocap](https://ericguo5513.github.io/action-to-motion/)
- [UTD-MHAD](https://personal.utdallas.edu/~kehtar/UTD-MHAD.html)
- Set the **DATASET.PATH** in the corresponding configuration file to the location of dataset.

View File

@ -0,0 +1,89 @@
{
"MODEL": {
"GENDER": "neutral"
},
"TRAIN": {
"LEARNING_RATE": 2e-2,
"MAX_EPOCH": 1000,
"WRITE": 1,
"OPTIMIZE_SCALE":1,
"OPTIMIZE_SHAPE":0
},
"USE_GPU": 1,
"DATASET": {
"NAME": "UTD-MHAD",
"PATH": "../Action2Motion/CMU Mocap/mocap/mocap_3djoints/",
"TARGET_PATH": "",
"DATA_MAP": [
[
0,
0
],
[
9,
1
],
[
12,
2
],
[
13,
4
],
[
18,
6
],
[
20,
7
],
[
14,
8
],
[
19,
10
],
[
21,
11
],
[
1,
12
],
[
4,
13
],
[
7,
14
],
[
10,
15
],
[
2,
16
],
[
5,
17
],
[
8,
18
],
[
11,
19
]
]
},
"DEBUG": 0
}

View File

@ -0,0 +1,83 @@
{
"MODEL": {
"GENDER": "neutral"
},
"TRAIN": {
"LEARNING_RATE": 2e-2,
"MAX_EPOCH": 1000,
"WRITE": 1
},
"USE_GPU": 1,
"DATASET": {
"NAME": "UTD-MHAD",
"PATH": "../UTD-MHAD/Skeleton/Skeleton/",
"TARGET_PATH": "",
"DATA_MAP": [
[
12,
1
],
[
0,
3
],
[
16,
4
],
[
18,
5
],
[
20,
6
],
[
22,
7
],
[
17,
8
],
[
19,
9
],
[
21,
10
],
[
23,
11
],
[
1,
12
],
[
4,
13
],
[
7,
14
],
[
2,
16
],
[
5,
17
],
[
8,
18
]
]
},
"DEBUG": 0
}

View File

@ -5,7 +5,9 @@
"TRAIN": {
"LEARNING_RATE": 2e-2,
"MAX_EPOCH": 1000,
"WRITE": 1
"WRITE": 1,
"OPTIMIZE_SCALE":0,
"OPTIMIZE_SHAPE":1
},
"USE_GPU": 1,
"DATASET": {

View File

@ -5,7 +5,9 @@
"TRAIN": {
"LEARNING_RATE": 2e-2,
"MAX_EPOCH": 1000,
"WRITE": 1
"WRITE": 1,
"OPTIMIZE_SCALE":0,
"OPTIMIZE_SHAPE":1
},
"USE_GPU": 1,
"DATASET": {

File diff suppressed because it is too large Load Diff

View File

@ -14,3 +14,6 @@ def load(name, path):
return new_arr
elif name == 'HumanAct12':
return np.load(path,allow_pickle=True)
elif name == "CMU_Mocap":
return np.load(path,allow_pickle=True)

View File

@ -33,17 +33,17 @@ def save_pic(res, smpl_layer, file, logger, dataset_name,target):
savepath=os.path.join(fit_path+"/frame_{}".format(i)),
batch_idx=i,
show=False,
only_joint=False)
display_model(
{'verts': verts.cpu().detach(),
'joints': target.cpu().detach()},
model_faces=smpl_layer.th_faces,
with_joints=True,
kintree_table=smpl_layer.kintree_table,
savepath=os.path.join(gt_path+"/frame_{}".format(i)),
batch_idx=i,
show=False,
only_joint=True)
# display_model(
# {'verts': verts.cpu().detach(),
# 'joints': target.cpu().detach()},
# model_faces=smpl_layer.th_faces,
# with_joints=True,
# kintree_table=smpl_layer.kintree_table,
# savepath=os.path.join(gt_path+"/frame_{}".format(i)),
# batch_idx=i,
# show=False,
# only_joint=True)
logger.info('Pictures saved')

View File

@ -42,10 +42,10 @@ def init(smpl_layer, target, device, cfg):
params["scale"] = params["scale"].to(device)
params["pose_params"].requires_grad = True
params["shape_params"].requires_grad = True
params["scale"].requires_grad = False
params["shape_params"].requires_grad = bool(cfg.TRAIN.OPTIMIZE_SHAPE)
params["scale"].requires_grad = bool(cfg.TRAIN.OPTIMIZE_SCALE)
optimizer = optim.Adam([params["pose_params"], params["shape_params"]],
optimizer = optim.Adam([params["pose_params"], params["shape_params"], params["scale"]],
lr=cfg.TRAIN.LEARNING_RATE)
index={}
@ -73,8 +73,9 @@ def train(smpl_layer, target,
early_stop = Early_Stop()
for epoch in tqdm(range(cfg.TRAIN.MAX_EPOCH)):
# for epoch in range(cfg.TRAIN.MAX_EPOCH):
verts, Jtr = smpl_layer(pose_params, th_betas=shape_params)
loss = F.smooth_l1_loss(Jtr.index_select(1, index["smpl_index"]) * 100,
loss = F.smooth_l1_loss(Jtr.index_select(1, index["smpl_index"]) * 100 * scale,
target.index_select(1, index["dataset_index"]) * 100)
optimizer.zero_grad()
loss.backward()
@ -88,8 +89,8 @@ def train(smpl_layer, target,
break
if epoch % cfg.TRAIN.WRITE == 0:
# logger.info("Epoch {}, lossPerBatch={:.6f}, EarlyStopSatis: {}".format(
# epoch, float(loss), early_stop.satis_num))
# logger.info("Epoch {}, lossPerBatch={:.6f}, scale={:.4f} EarlyStopSatis: {}".format(
# epoch, float(loss),float(scale), early_stop.satis_num))
writer.add_scalar('loss', float(loss), epoch)
writer.add_scalar('learning_rate', float(
optimizer.state_dict()['param_groups'][0]['lr']), epoch)

View File

@ -1,23 +1,17 @@
import numpy as np
rotate = {
'HumanAct12': [1., -1., -1.],
'CMU_Mocap': [0.05, 0.05, 0.05],
'UTD_MHAD': [-1., 1., -1.]
}
def transform(name, arr: np.ndarray):
if name == 'HumanAct12':
rotate = [1., -1., -1.]
for i in range(arr.shape[0]):
origin = arr[i][0].copy()
for j in range(arr.shape[1]):
arr[i][j] -= origin
for k in range(3):
arr[i][j][k] *= rotate[k]
arr[i][0] = [0.0, 0.0, 0.0]
elif name == 'UTD_MHAD':
rotate = [-1., 1.,-1.]
for i in range(arr.shape[0]):
origin = arr[i][3].copy()
for j in range(arr.shape[1]):
arr[i][j] -= origin
for k in range(3):
arr[i][j][k] *= rotate[k]
arr[i][3] = [0.0, 0.0, 0.0]
for i in range(arr.shape[0]):
origin = arr[i][0].copy()
for j in range(arr.shape[1]):
arr[i][j] -= origin
for k in range(3):
arr[i][j][k] *= rotate[name][k]
return arr

View File

@ -1,7 +1,7 @@
import matplotlib.pyplot as plt
import imageio, os
images = []
filenames = sorted(fn for fn in os.listdir('./fit/output/HumanAct12/picture/fit/P01G01R01F0001T0064A0101') )
filenames = sorted(fn for fn in os.listdir('./fit/output/CMU_Mocap/picture/fit/01_01') )
for filename in filenames:
images.append(imageio.imread('./fit/output/HumanAct12/picture/fit/P01G01R01F0001T0064A0101/'+filename))
imageio.mimsave('./assets/fit.gif', images, duration=0.25)
images.append(imageio.imread('./fit/output/CMU_Mocap/picture/fit/01_01/'+filename))
imageio.mimsave('fit.gif', images, duration=0.2)