support CMU Mocap
This commit is contained in:
1096
fit/tools/label.py
1096
fit/tools/label.py
File diff suppressed because it is too large
Load Diff
@ -14,3 +14,6 @@ def load(name, path):
|
||||
return new_arr
|
||||
elif name == 'HumanAct12':
|
||||
return np.load(path,allow_pickle=True)
|
||||
elif name == "CMU_Mocap":
|
||||
return np.load(path,allow_pickle=True)
|
||||
|
||||
|
||||
@ -33,17 +33,17 @@ def save_pic(res, smpl_layer, file, logger, dataset_name,target):
|
||||
savepath=os.path.join(fit_path+"/frame_{}".format(i)),
|
||||
batch_idx=i,
|
||||
show=False,
|
||||
only_joint=False)
|
||||
display_model(
|
||||
{'verts': verts.cpu().detach(),
|
||||
'joints': target.cpu().detach()},
|
||||
model_faces=smpl_layer.th_faces,
|
||||
with_joints=True,
|
||||
kintree_table=smpl_layer.kintree_table,
|
||||
savepath=os.path.join(gt_path+"/frame_{}".format(i)),
|
||||
batch_idx=i,
|
||||
show=False,
|
||||
only_joint=True)
|
||||
# display_model(
|
||||
# {'verts': verts.cpu().detach(),
|
||||
# 'joints': target.cpu().detach()},
|
||||
# model_faces=smpl_layer.th_faces,
|
||||
# with_joints=True,
|
||||
# kintree_table=smpl_layer.kintree_table,
|
||||
# savepath=os.path.join(gt_path+"/frame_{}".format(i)),
|
||||
# batch_idx=i,
|
||||
# show=False,
|
||||
# only_joint=True)
|
||||
logger.info('Pictures saved')
|
||||
|
||||
|
||||
|
||||
@ -42,10 +42,10 @@ def init(smpl_layer, target, device, cfg):
|
||||
params["scale"] = params["scale"].to(device)
|
||||
|
||||
params["pose_params"].requires_grad = True
|
||||
params["shape_params"].requires_grad = True
|
||||
params["scale"].requires_grad = False
|
||||
params["shape_params"].requires_grad = bool(cfg.TRAIN.OPTIMIZE_SHAPE)
|
||||
params["scale"].requires_grad = bool(cfg.TRAIN.OPTIMIZE_SCALE)
|
||||
|
||||
optimizer = optim.Adam([params["pose_params"], params["shape_params"]],
|
||||
optimizer = optim.Adam([params["pose_params"], params["shape_params"], params["scale"]],
|
||||
lr=cfg.TRAIN.LEARNING_RATE)
|
||||
|
||||
index={}
|
||||
@ -73,8 +73,9 @@ def train(smpl_layer, target,
|
||||
|
||||
early_stop = Early_Stop()
|
||||
for epoch in tqdm(range(cfg.TRAIN.MAX_EPOCH)):
|
||||
# for epoch in range(cfg.TRAIN.MAX_EPOCH):
|
||||
verts, Jtr = smpl_layer(pose_params, th_betas=shape_params)
|
||||
loss = F.smooth_l1_loss(Jtr.index_select(1, index["smpl_index"]) * 100,
|
||||
loss = F.smooth_l1_loss(Jtr.index_select(1, index["smpl_index"]) * 100 * scale,
|
||||
target.index_select(1, index["dataset_index"]) * 100)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
@ -88,8 +89,8 @@ def train(smpl_layer, target,
|
||||
break
|
||||
|
||||
if epoch % cfg.TRAIN.WRITE == 0:
|
||||
# logger.info("Epoch {}, lossPerBatch={:.6f}, EarlyStopSatis: {}".format(
|
||||
# epoch, float(loss), early_stop.satis_num))
|
||||
# logger.info("Epoch {}, lossPerBatch={:.6f}, scale={:.4f} EarlyStopSatis: {}".format(
|
||||
# epoch, float(loss),float(scale), early_stop.satis_num))
|
||||
writer.add_scalar('loss', float(loss), epoch)
|
||||
writer.add_scalar('learning_rate', float(
|
||||
optimizer.state_dict()['param_groups'][0]['lr']), epoch)
|
||||
|
||||
@ -1,23 +1,17 @@
|
||||
import numpy as np
|
||||
|
||||
rotate = {
|
||||
'HumanAct12': [1., -1., -1.],
|
||||
'CMU_Mocap': [0.05, 0.05, 0.05],
|
||||
'UTD_MHAD': [-1., 1., -1.]
|
||||
}
|
||||
|
||||
|
||||
def transform(name, arr: np.ndarray):
|
||||
if name == 'HumanAct12':
|
||||
rotate = [1., -1., -1.]
|
||||
for i in range(arr.shape[0]):
|
||||
origin = arr[i][0].copy()
|
||||
for j in range(arr.shape[1]):
|
||||
arr[i][j] -= origin
|
||||
for k in range(3):
|
||||
arr[i][j][k] *= rotate[k]
|
||||
arr[i][0] = [0.0, 0.0, 0.0]
|
||||
elif name == 'UTD_MHAD':
|
||||
rotate = [-1., 1.,-1.]
|
||||
for i in range(arr.shape[0]):
|
||||
origin = arr[i][3].copy()
|
||||
for j in range(arr.shape[1]):
|
||||
arr[i][j] -= origin
|
||||
for k in range(3):
|
||||
arr[i][j][k] *= rotate[k]
|
||||
arr[i][3] = [0.0, 0.0, 0.0]
|
||||
for i in range(arr.shape[0]):
|
||||
origin = arr[i][0].copy()
|
||||
for j in range(arr.shape[1]):
|
||||
arr[i][j] -= origin
|
||||
for k in range(3):
|
||||
arr[i][j][k] *= rotate[name][k]
|
||||
return arr
|
||||
|
||||
Reference in New Issue
Block a user