# """Human3.6H validation samples."""
# h36m_val = BatchLoader(h36m, ['frame'],
#         ['pose_w', 'pose_uvd', 'afmat', 'camera', 'action'], VALID_MODE,
#         batch_size=h36m.get_length(VALID_MODE), shuffle=True)
# printcn(OKBLUE, 'Preloading Human3.6M validation samples...')
# [x_val], [pw_val, puvd_val, afmat_val, scam_val, action] = h36m_val[0]
#
# h36m_callback = H36MEvalCallback(x_val, pw_val, afmat_val,
#         puvd_val[:,0,2], scam_val, action, logdir=logdir)

model = spnet.build(cfg)

loss = pose_regression_loss('l1l2bincross', 0.01)
model.compile(loss=loss, optimizer=RMSprop(lr=start_lr))
model.summary()

callbacks = []
callbacks.append(SaveModel(weights_path))
callbacks.append(mpii_callback)
# callbacks.append(h36m_callback)

steps_per_epoch = mpii.get_length(TRAIN_MODE) // batch_size_mpii

model.fit_generator(data_tr,
        steps_per_epoch=steps_per_epoch,
        epochs=60,
        callbacks=callbacks,
        workers=8,
        initial_epoch=0)

loss = pose_regression_loss('l1l2bincross', 0.01)
model.compile(loss=loss, optimizer=RMSprop())
model.summary()


def lr_scheduler(epoch, lr):

    if epoch in [80, 100]:
        newlr = 0.2 * lr
        printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' % (lr, newlr, epoch))
    else:
        newlr = lr
        printcn(OKBLUE, 'lr_scheduler: lr %g @ %d' % (newlr, epoch))

    return newlr


callbacks = []
callbacks.append(SaveModel(weights_file))
callbacks.append(LearningRateScheduler(lr_scheduler))
callbacks.append(eval_callback)

steps_per_epoch = mpii.get_length(TRAIN_MODE) // batch_size

model.fit_generator(data_tr,
                    steps_per_epoch=steps_per_epoch,
                    epochs=120,
                    callbacks=callbacks,
                    workers=4,
                    initial_epoch=0)
Beispiel #3
0
[mpii_x_val], [mpii_p_val, mpii_afmat_val, mpii_head_val] = mpii_val[0]
"""Human3.6H validation samples."""
# h36m_val = BatchLoader(h36m, ['frame'],
# ['pose_w', 'pose_uvd', 'afmat', 'camera', 'action'], VALID_MODE,
# batch_size=h36m.get_length(VALID_MODE), shuffle=False)
# printcn(OKBLUE, 'Preloading Human3.6M validation samples...')
# [h36m_x_val], [h36m_pw_val, h36m_puvd_val, h36m_afmat_val, h36m_scam_val, \
# h36m_action] = h36m_val[0]
"""NTU subset of testing samples"""
ntu_te = BatchLoader(ntu_s1, ['frame'], ['ntuaction'],
                     TEST_MODE,
                     batch_size=1,
                     shuffle=False)
"""Save model callback."""
save_model = SaveModel(os.path.join(logdir,
                                    'weights_3dp+ntu_ar_{epoch:03d}.hdf5'),
                       model_to_save=full_model)


def prepare_training(pose_trainable, lr):
    optimizer = SGD(lr=lr, momentum=0.9, nesterov=True)
    # optimizer = RMSprop(lr=lr)
    models = compile_split_models(full_model,
                                  cfg,
                                  optimizer,
                                  pose_trainable=pose_trainable,
                                  ar_loss_weights=action_weight,
                                  copy_replica=cfg.pose_replica)
    full_model.summary()
    """Create validation callbacks."""
    # mpii_callback = MpiiEvalCallback(mpii_x_val, mpii_p_val, mpii_afmat_val,
# """
loss = pose_regression_loss('l1l2bincross', 0.01)
model.compile(loss=loss, optimizer=RMSprop())
# model.summary()

def lr_scheduler(epoch, lr):

    if epoch in [300, 350] or epoch in [50,60]:
        newlr = 0.99*lr
        printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' % (lr, newlr, epoch))
    else:
        newlr = lr
        printcn(OKBLUE, 'lr_scheduler: lr %g @ %d' % (newlr, epoch))

    return newlr

callbacks = []
callbacks.append(SaveModel(weights_file,save_after_num_epoch=20))
callbacks.append(LearningRateScheduler(lr_scheduler))

steps_per_epoch = merl.get_length(TRAIN_MODE) // batch_size

model.fit_generator(data_tr,
        # steps_per_epoch=steps_per_epoch,
        epochs=1000,
        callbacks=callbacks,
        workers=8,
        initial_epoch=0)

# """
[mpii_x_val], [mpii_p_val, mpii_afmat_val, mpii_head_val] = mpii_val[0]
"""Human3.6H validation samples."""
# h36m_val = BatchLoader(h36m, ['frame'],
# ['pose_w', 'pose_uvd', 'afmat', 'camera', 'action'], VALID_MODE,
# batch_size=h36m.get_length(VALID_MODE), shuffle=False)
# printcn(OKBLUE, 'Preloading Human3.6M validation samples...')
# [h36m_x_val], [h36m_pw_val, h36m_puvd_val, h36m_afmat_val, h36m_scam_val, \
# h36m_action] = h36m_val[0]
"""NTU subset of testing samples"""
ntu_te = BatchLoader(ntu_s1, ['frame'], ['ntuaction'],
                     TEST_MODE,
                     batch_size=1,
                     shuffle=False)
"""Save model callback."""
save_model = SaveModel(os.path.join(
    "C:\\networks\\deephar\\output\\spnet\\0429\\",
    'weights_3dp+ntu_ar_{epoch:03d}.hdf5'),
                       model_to_save=full_model)


def prepare_training(pose_trainable, lr):
    optimizer = SGD(lr=lr, momentum=0.9, nesterov=True)
    # optimizer = RMSprop(lr=lr)
    models = compile_split_models(full_model,
                                  cfg,
                                  optimizer,
                                  pose_trainable=pose_trainable,
                                  ar_loss_weights=action_weight,
                                  copy_replica=cfg.pose_replica)
    full_model.summary()
    """Create validation callbacks."""
    # mpii_callback = MpiiEvalCallback(mpii_x_val, mpii_p_val, mpii_afmat_val,
Beispiel #6
0
start_lr = 0.01
action_weight = 0.1
#batch_size_mpii = 3
#batch_size_h36m = 4
#batch_size_ntu = 6 #1
#batch_clips = 1 # 8/4
"""Build the full model"""
full_model = spnet.build(cfg)
"""Load pre-trained weights from pose estimation and copy replica layers."""
full_model.load_weights(
    "E:\\Bachelorarbeit-SS20\\weights\\deephar\\output\\ntu_baseline\\0603\\weights_posebaseline_060.hdf5",
    by_name=True)
"""Save model callback."""

save_model = SaveModel(os.path.join(
    "E:\\Bachelorarbeit-SS20\\weights\\deephar\\output\\spnet\\0625",
    'weights_3dp+benset_ar_{epoch:03d}.hdf5'),
                       model_to_save=full_model)

#with open("C:\\networks\\deephar\\output\\split_train_val_benset\\train_list.p", 'rb') as fp:
#    train_data_keys = pickle.load(fp)

#with open("C:\\networks\\deephar\\output\\split_train_val_benset\\val_list.p", 'rb') as fp:
#    val_data_keys = pickle.load(fp)

sys.path.append(os.path.join(os.getcwd(), 'benset'))

from benset_batchloader_benset import *
from benset_dataloader_ar import *

dataset_path_green = "E:\\Bachelorarbeit-SS20\\datasets\\Benset256_green"