update code
This commit is contained in:
16
fit/tools/load.py
Normal file
16
fit/tools/load.py
Normal file
@ -0,0 +1,16 @@
|
||||
import scipy.io
|
||||
import numpy as np
|
||||
|
||||
|
||||
def load(name, path):
|
||||
if name == 'UTD_MHAD':
|
||||
data = scipy.io.loadmat(path)
|
||||
arr = data['d_skel']
|
||||
new_arr = np.zeros([arr.shape[2], arr.shape[0], arr.shape[1]])
|
||||
for i in range(arr.shape[2]):
|
||||
for j in range(arr.shape[0]):
|
||||
for k in range(arr.shape[1]):
|
||||
new_arr[i][j][k] = arr[j][k][i]
|
||||
return new_arr
|
||||
elif name == 'HumanAct12':
|
||||
return np.load(path)
|
||||
@ -27,6 +27,7 @@ from smplpytorch.pytorch.smpl_layer import SMPL_Layer
|
||||
from train import train
|
||||
from transform import transform
|
||||
from save import save_pic,save_params
|
||||
from load import load
|
||||
torch.backends.cudnn.benchmark=True
|
||||
|
||||
def parse_args():
|
||||
@ -96,22 +97,34 @@ if __name__ == "__main__":
|
||||
|
||||
smpl_layer = SMPL_Layer(
|
||||
center_idx = 0,
|
||||
gender='neutral',
|
||||
gender=cfg.MODEL.GENDER,
|
||||
model_root='smplpytorch/native/models')
|
||||
|
||||
for root,dirs,files in os.walk(cfg.DATASET_PATH):
|
||||
for file in files:
|
||||
logger.info('Processing file: {}'.format(file))
|
||||
target_path=os.path.join(root,file)
|
||||
|
||||
target = np.array(transform(np.load(target_path)))
|
||||
logger.info('File shape: {}'.format(target.shape))
|
||||
target = torch.from_numpy(target).float()
|
||||
|
||||
res = train(smpl_layer,target,
|
||||
logger,writer,device,
|
||||
args,cfg)
|
||||
|
||||
# save_pic(target,res,smpl_layer,file,logger)
|
||||
save_params(res,file,logger)
|
||||
if not cfg.DEBUG:
|
||||
for root,dirs,files in os.walk(cfg.DATASET_PATH):
|
||||
for file in files:
|
||||
logger.info('Processing file: {}'.format(file))
|
||||
target_path=os.path.join(root,file)
|
||||
|
||||
target = np.array(transform(np.load(target_path)))
|
||||
logger.info('File shape: {}'.format(target.shape))
|
||||
target = torch.from_numpy(target).float()
|
||||
|
||||
|
||||
res = train(smpl_layer,target,
|
||||
logger,writer,device,
|
||||
args,cfg)
|
||||
|
||||
# save_pic(target,res,smpl_layer,file,logger)
|
||||
save_params(res,file,logger)
|
||||
else:
|
||||
target = np.array(transform(load('UTD_MHAD',cfg.DATASET.TARGET_PATH),
|
||||
rotate=[-1,1,-1]))
|
||||
target = torch.from_numpy(target).float()
|
||||
data_map_dataset=torch.tensor(cfg.DATASET.DATA_MAP.UTD_MHAD[1])
|
||||
target = target.index_select(1, data_map_dataset)
|
||||
print(target.shape)
|
||||
res = train(smpl_layer,target,
|
||||
logger,writer,device,
|
||||
args,cfg)
|
||||
|
||||
14
fit/tools/map.py
Normal file
14
fit/tools/map.py
Normal file
@ -0,0 +1,14 @@
|
||||
import numpy as np
|
||||
|
||||
def mapping(Jtr,cfg):
|
||||
name=cfg.DATASET.NAME
|
||||
if not name=='HumanAct12':
|
||||
mapped_joint=cfg.DATASET.DATA_MAP.UTD_MHAD
|
||||
Jtr_mapped=np.zeros([Jtr.shape[0],len(mapped_joint),Jtr.shape[2]])
|
||||
for i in range(Jtr.shape[0]):
|
||||
for j in range(len(mapped_joint)):
|
||||
for k in range(Jtr.shape[2]):
|
||||
Jtr_mapped[i][j][k]=Jtr[i][mapped_joint[j]][k]
|
||||
return Jtr_mapped
|
||||
return Jtr
|
||||
|
||||
@ -9,18 +9,20 @@ import json
|
||||
sys.path.append(os.getcwd())
|
||||
from display_utils import display_model
|
||||
|
||||
|
||||
def create_dir_not_exist(path):
|
||||
if not os.path.exists(path):
|
||||
os.mkdir(path)
|
||||
|
||||
def save_pic(target, res, smpl_layer, file,logger):
|
||||
|
||||
|
||||
def save_pic(target, res, smpl_layer, file, logger):
|
||||
pose_params, shape_params, verts, Jtr = res
|
||||
name=re.split('[/.]',file)[-2]
|
||||
gt_path="fit/output/HumanAct12/picture/gt/{}".format(name)
|
||||
fit_path="fit/output/HumanAct12/picture/fit/{}".format(name)
|
||||
name = re.split('[/.]', file)[-2]
|
||||
gt_path = "fit/output/HumanAct12/picture/gt/{}".format(name)
|
||||
fit_path = "fit/output/HumanAct12/picture/fit/{}".format(name)
|
||||
create_dir_not_exist(gt_path)
|
||||
create_dir_not_exist(fit_path)
|
||||
logger.info('Saving pictures at {} and {}'.format(gt_path,fit_path))
|
||||
logger.info('Saving pictures at {} and {}'.format(gt_path, fit_path))
|
||||
for i in tqdm(range(target.shape[0])):
|
||||
display_model(
|
||||
{'verts': verts.cpu().detach(),
|
||||
@ -42,25 +44,25 @@ def save_pic(target, res, smpl_layer, file,logger):
|
||||
batch_idx=i,
|
||||
show=False)
|
||||
logger.info('Pictures saved')
|
||||
|
||||
def save_params(res,file,logger):
|
||||
|
||||
|
||||
def save_params(res, file, logger):
|
||||
pose_params, shape_params, verts, Jtr = res
|
||||
name=re.split('[/.]',file)[-2]
|
||||
fit_path="fit/output/HumanAct12/params/"
|
||||
name = re.split('[/.]', file)[-2]
|
||||
fit_path = "fit/output/HumanAct12/params/"
|
||||
create_dir_not_exist(fit_path)
|
||||
logger.info('Saving params at {}'.format(fit_path))
|
||||
pose_params=pose_params.cpu().detach()
|
||||
pose_params=pose_params.numpy().tolist()
|
||||
shape_params=shape_params.cpu().detach()
|
||||
shape_params=shape_params.numpy().tolist()
|
||||
Jtr=Jtr.cpu().detach()
|
||||
Jtr=Jtr.numpy().tolist()
|
||||
params={}
|
||||
params["pose_params"]=pose_params
|
||||
params["shape_params"]=shape_params
|
||||
params["Jtr"]=Jtr
|
||||
f=open(os.path.join((fit_path),
|
||||
"{}_params.json".format(name)),'w')
|
||||
json.dump(params,f)
|
||||
pose_params = pose_params.cpu().detach()
|
||||
pose_params = pose_params.numpy().tolist()
|
||||
shape_params = shape_params.cpu().detach()
|
||||
shape_params = shape_params.numpy().tolist()
|
||||
Jtr = Jtr.cpu().detach()
|
||||
Jtr = Jtr.numpy().tolist()
|
||||
params = {}
|
||||
params["pose_params"] = pose_params
|
||||
params["shape_params"] = shape_params
|
||||
params["Jtr"] = Jtr
|
||||
f = open(os.path.join((fit_path),
|
||||
"{}_params.json".format(name)), 'w')
|
||||
json.dump(params, f)
|
||||
logger.info('Params saved')
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ from torch.utils.data import DataLoader
|
||||
import torchvision.datasets as dset
|
||||
import torchvision.transforms as T
|
||||
import numpy as np
|
||||
import scipy.io
|
||||
from tensorboardX import SummaryWriter
|
||||
from easydict import EasyDict as edict
|
||||
import time
|
||||
@ -24,6 +25,7 @@ from tqdm import tqdm
|
||||
sys.path.append(os.getcwd())
|
||||
from smplpytorch.pytorch.smpl_layer import SMPL_Layer
|
||||
from display_utils import display_model
|
||||
from map import mapping
|
||||
|
||||
def train(smpl_layer, target,
|
||||
logger, writer, device,
|
||||
@ -48,9 +50,11 @@ def train(smpl_layer, target,
|
||||
lr=cfg.TRAIN.LEARNING_RATE)
|
||||
|
||||
min_loss = float('inf')
|
||||
for epoch in tqdm(range(cfg.TRAIN.MAX_EPOCH)):
|
||||
data_map=torch.tensor(cfg.DATASET.DATA_MAP.UTD_MHAD)[0].to(device)
|
||||
# for epoch in tqdm(range(cfg.TRAIN.MAX_EPOCH)):
|
||||
for epoch in range(cfg.TRAIN.MAX_EPOCH):
|
||||
verts, Jtr = smpl_layer(pose_params, th_betas=shape_params)
|
||||
loss = F.smooth_l1_loss(Jtr * 100, target * 100)
|
||||
loss = F.smooth_l1_loss(Jtr.index_select(1, data_map) * 100, target * 100)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
@ -58,10 +62,22 @@ def train(smpl_layer, target,
|
||||
min_loss = float(loss)
|
||||
res = [pose_params, shape_params, verts, Jtr]
|
||||
if epoch % cfg.TRAIN.WRITE == 0:
|
||||
# logger.info("Epoch {}, lossPerBatch={:.9f}, scale={:.6f}".format(
|
||||
# epoch, float(loss), float(scale)))
|
||||
logger.info("Epoch {}, lossPerBatch={:.9f}, scale={:.6f}".format(
|
||||
epoch, float(loss), float(scale)))
|
||||
writer.add_scalar('loss', float(loss), epoch)
|
||||
writer.add_scalar('learning_rate', float(
|
||||
optimizer.state_dict()['param_groups'][0]['lr']), epoch)
|
||||
if epoch % cfg.TRAIN.SAVE == 0 and epoch > 0:
|
||||
for i in tqdm(range(Jtr.shape[0])):
|
||||
display_model(
|
||||
{'verts': verts.cpu().detach(),
|
||||
'joints': Jtr.cpu().detach()},
|
||||
model_faces=smpl_layer.th_faces,
|
||||
with_joints=True,
|
||||
kintree_table=smpl_layer.kintree_table,
|
||||
savepath="fit/output/UTD_MHAD/picture/frame_{}".format(str(i).zfill(4)),
|
||||
batch_idx=i,
|
||||
show=True,
|
||||
only_joint=True)
|
||||
logger.info('Train ended, min_loss = {:.9f}'.format(float(min_loss)))
|
||||
return res
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
def transform(arr: np.ndarray):
|
||||
def transform(arr: np.ndarray, rotate=[1.,-1.,-1.]):
|
||||
for i in range(arr.shape[0]):
|
||||
origin = arr[i][0].copy()
|
||||
origin = arr[i][3].copy()
|
||||
for j in range(arr.shape[1]):
|
||||
arr[i][j] -= origin
|
||||
arr[i][j][1] *= -1
|
||||
arr[i][j][2] *= -1
|
||||
arr[i][0] = [0.0, 0.0, 0.0]
|
||||
for k in range(3):
|
||||
arr[i][j][k] *= rotate[k]
|
||||
arr[i][3] = [0.0, 0.0, 0.0]
|
||||
print(arr[0])
|
||||
return arr
|
||||
|
||||
Reference in New Issue
Block a user