def initialise_pred_cbs(logs_dirs, test_vis_dirs, smpl, X_cb, silh_cb, trainable_params, learning_rates):
    assert len(logs_dirs) == len(learning_rates)
    assert len(test_vis_dirs) == len(learning_rates)

    epoch_pred_cbs = []
    for lr_num, lr in enumerate(learning_rates):
        logs_dir = logs_dirs[lr_num]
        test_vis_dir = test_vis_dirs[lr_num]
        epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=X_cb, train_silh=silh_cb, pred_path=test_vis_dir, period=1, trainable_params=trainable_params, visualise=False, testing=True)
        epoch_pred_cbs.append(epoch_pred_cb)

    return epoch_pred_cbs
    model_dir + "model.{epoch:02d}-{delta_d_hat_mse_loss:.4f}.hdf5",
    monitor='loss',
    verbose=1,
    save_best_only=False,
    mode='auto',
    period=MODEL_SAVE_PERIOD,
    save_weights_only=True)

# Predict on sample params at the end of every few epochs
if USE_GENERATOR:
    epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir,
                                             smpl,
                                             train_inputs=X_cb,
                                             train_silh=silh_cb,
                                             pred_path=train_vis_dir,
                                             period=PREDICTION_PERIOD,
                                             trainable_params=trainable_params,
                                             visualise=False,
                                             testing=False,
                                             RESET_PERIOD=RESET_PERIOD,
                                             data_samples=data_samples,
                                             train_gen="./cb_samples.npz")
else:
    epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir,
                                             smpl,
                                             train_inputs=X_cb,
                                             train_silh=silh_cb,
                                             pred_path=train_vis_dir,
                                             period=PREDICTION_PERIOD,
                                             trainable_params=trainable_params,
                                             visualise=False,
                                             testing=False,
test_sample_input = [np.array(test_sample_indices), np.array(test_sample_params), np.array(test_sample_pcs)]


""" Model set-up """

# Callback functions
# Create a model checkpoint after every few epochs
model_save_checkpoint = tf.keras.callbacks.ModelCheckpoint(
    model_dir + "model.{epoch:02d}-{loss:.2f}.hdf5",
    monitor='loss', verbose=1, save_best_only=False, mode='auto',
    period=save_period, save_weights_only=True)

# Predict on sample params at the end of every few epochs
#epoch_pred_cb = PredOnEpochEnd(logs_dir, smpl, x_train=train_sample_x, y_train=train_sample_pc, x_test=sample_x, y_test=sample_pc,
#                               pred_path=train_vis_dir, period=pred_period, visualise=False)
epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=train_sample_input, train_silh=train_sample_silh, test_inputs=test_sample_input, test_silh=test_sample_silh,
        pred_path=train_vis_dir, period=pred_period, visualise=False)

# Make model entity
# Create initializer for the embedding layer
np.random.seed(10)
offset = np.zeros(85)
offset[42:45] = np.random.rand(3) # change left shoulder 1
offset[51:54] = np.random.rand(3) # change left shoulder 2
offset[57:60] = np.random.rand(3) # change left elbow
offset[63:66] = np.random.rand(3) # change left wrist
offset[69:72] = np.random.rand(3) # change left fingers
#offset[72:75] = np.random.rand(3) # change global translation
embedding_initializer = train_gen.yield_params(offset="right arm")

#optlearner_inputs, optlearner_outputs = OptLearnerArchitecture(embedding_initializer)
optlearner_inputs, optlearner_outputs = OptLearnerDistArchitecture(embedding_initializer)
예제 #4
0
                                            #run_metadata=run_metadata
                                            )

# Print model summary
optlearner_model.summary()


# Visualisation callbacks
epoch_pred_cbs = []
epoch_pred_cbs_control = []
for lr_num, lr in enumerate(learning_rates):
    logs_dir = logs_dirs[lr_num]
    test_vis_dir = test_vis_dirs[lr_num]
    control_logs_dir = control_logs_dirs[lr_num]
    control_dir = control_dirs[lr_num]
    epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=X_cb, train_silh=silh_cb, pred_path=test_vis_dir, period=1, trainable_params=trainable_params, visualise=False, testing=True)
    epoch_pred_cbs.append(epoch_pred_cb)
    epoch_pred_cb_control = OptLearnerPredOnEpochEnd(control_logs_dir, smpl, train_inputs=X_cb, train_silh=silh_cb, pred_path=control_dir, period=1, trainable_params=trainable_params,visualise=False, testing=True)
    epoch_pred_cbs_control.append(epoch_pred_cb_control)


def learned_optimizer(optlearner_model, epochs=50, lr=0.1, cb=epoch_pred_cb, mode="RODRIGUES"):
    metrics_names = optlearner_model.metrics_names
    print("metrics_names: " + str(metrics_names))
    named_scores = {}
    output_names = [output.op.name.split("/")[0] for output in optlearner_model.outputs]
    print("output_names: " + str(output_names))
    pred_index = output_names.index("delta_d_hat")
    #print("prediction index: " + str(pred_index))

    cb.set_model(optlearner_model)
        0.0,
        1.0,  # pc loss weight
        0.0,  # smpl loss weight
        0.0,
        0.0,
        0.0,
        0.0
    ])
# Print model summary
optlearner_model.summary()

# Visualisation callback
epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir,
                                         smpl,
                                         train_inputs=X_cb,
                                         train_silh=silh_cb,
                                         pred_path=test_vis_dir,
                                         period=1,
                                         visualise=False)
epoch_pred_cb_control = OptLearnerPredOnEpochEnd(logs_dir,
                                                 smpl,
                                                 train_inputs=X_cb,
                                                 train_silh=silh_cb,
                                                 pred_path=control_dir,
                                                 period=1,
                                                 visualise=False)


def learned_optimizer(epochs=50, lr=0.9):
    epoch_pred_cb.set_model(optlearner_model)
    for i in range(epochs):
예제 #6
0
    verbose=1,
    save_best_only=False,
    mode='auto',
    period=50,
    save_weights_only=True)

# Predict on sample params at the end of every few epochs
#epoch_pred_cb = PredOnEpochEnd(logs_dir, smpl, x_train=train_sample_x, y_train=train_sample_pc, x_test=sample_x, y_test=sample_pc,
#                               pred_path=train_vis_dir, period=pred_period, visualise=False)
#epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=train_sample_input, train_silh=train_sample_silh, test_inputs=test_sample_input, test_silh=test_sample_silh,
#        pred_path=train_vis_dir, period=pred_period, visualise=False)
PERIOD = 10
epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir,
                                         smpl,
                                         train_inputs=X_cb,
                                         train_silh=silh_cb,
                                         pred_path=train_vis_dir,
                                         period=PERIOD,
                                         visualise=False)


def emb_init_weights(emb_params):
    def emb_init_wrapper(param, offset=False):
        def emb_init(shape, dtype="float32"):
            """ Initializer for the embedding layer """
            emb_params_ = emb_params[:, param]

            if offset:
                #N = 8
                #k = 2 * np.pi / N
                k = np.pi
예제 #7
0
def setup_train_cb(model_dir, MODEL_SAVE_PERIOD, USE_GENERATOR, logs_dir, smpl,
                   X_cb, silh_cb, train_vis_dir, PREDICTION_PERIOD,
                   trainable_params, RESET_PERIOD, data_samples, DISTRACTOR,
                   optlearner_model, update_generator, OFFSET_NT, POSE_OFFSET,
                   exp_dir, DIST, RESET_PRED_TO_ZERO, ARCHITECTURE):
    # Callback functions
    # Create a model checkpoint after every few epochs
    model_save_checkpoint = ModelCheckpoint(
        model_dir + "model.{epoch:02d}-{delta_d_hat_mse_loss:.4f}.hdf5",
        monitor='loss',
        verbose=1,
        save_best_only=False,
        mode='auto',
        period=MODEL_SAVE_PERIOD,
        save_weights_only=True)

    # Predict on sample params at the end of every few epochs
    if USE_GENERATOR:
        epoch_pred_cb = OptLearnerPredOnEpochEnd(
            logs_dir,
            smpl,
            train_inputs=X_cb,
            train_silh=silh_cb,
            pred_path=train_vis_dir,
            period=PREDICTION_PERIOD,
            trainable_params=trainable_params,
            visualise=False,
            testing=False,
            RESET_PERIOD=RESET_PERIOD,
            data_samples=data_samples,
            train_gen=train_vis_dir,
            ARCHITECTURE=ARCHITECTURE)
    else:
        epoch_pred_cb = OptLearnerPredOnEpochEnd(
            logs_dir,
            smpl,
            train_inputs=X_cb,
            train_silh=silh_cb,
            pred_path=train_vis_dir,
            period=PREDICTION_PERIOD,
            trainable_params=trainable_params,
            visualise=False,
            testing=False,
            RESET_PERIOD=RESET_PERIOD,
            data_samples=data_samples,
            ARCHITECTURE=ARCHITECTURE)
        #epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=X_cb, train_silh=silh_cb, pred_path=train_vis_dir, period=PREDICTION_PERIOD, trainable_params=trainable_params, visualise=False, testing=True, RESET_PERIOD=RESET_PERIOD, data_samples=data_samples)

    # Callback for distractor unit
    if USE_GENERATOR:
        weight_cb_wrapper = update_weights_wrapper(
            DISTRACTOR,
            data_samples,
            RESET_PERIOD,
            trainable_params,
            optlearner_model,
            generator=update_generator,
            offset_nt=OFFSET_NT,
            pose_offset=POSE_OFFSET,
            dist=DIST,
            reset_to_zero=RESET_PRED_TO_ZERO)
    else:
        weight_cb_wrapper = update_weights_wrapper(
            DISTRACTOR,
            data_samples,
            RESET_PERIOD,
            trainable_params,
            optlearner_model,
            offset_nt=OFFSET_NT,
            pose_offset=POSE_OFFSET,
            dist=DIST,
            reset_to_zero=RESET_PRED_TO_ZERO)
    #weight_cb = LambdaCallback(on_epoch_end=lambda epoch, logs: weight_cb_wrapper(epoch, logs))
    weight_cb = LambdaCallback(
        on_epoch_begin=lambda epoch, logs: weight_cb_wrapper(epoch, logs))

    # Callback for loss plotting during training
    plotting_cb = OptLearnerLossGraphCallback(exp_dir, graphing_period=100)

    # Collect callbacks
    callbacks = [model_save_checkpoint, epoch_pred_cb, weight_cb, plotting_cb]

    # Parameter error callback
    if USE_GENERATOR:
        param_error_cb = GeneratorParamErrorCallback(exp_dir, update_generator,
                                                     PREDICTION_PERIOD,
                                                     ARCHITECTURE)
        callbacks.append(param_error_cb)

    return callbacks
예제 #8
0
                0.0,  # smpl loss weight
                0.0,
                0.0,
                0.0,
                0.0
            ])
        # Print model summary
        optlearner_model.summary()

        # Visualisation callback
        #epoch_pred_cb = OptLearnerPredOnEpochEnd(logs_dir, smpl, train_inputs=X_cb, train_silh=silh_cb, pred_path=test_vis_dir, period=1, trainable_params=trainable_params, visualise=False)
        epoch_pred_cb_control = OptLearnerPredOnEpochEnd(
            logs_dir_lrs[i],
            smpl,
            train_inputs=X_cb,
            train_silh=silh_cb,
            pred_path=control_dir_lrs[i],
            period=1,
            trainable_params=trainable_params,
            visualise=False)
        regular_optimizer(optlearner_model, epochs=50)

    exit(1)

# Evaluate the model performance
eval_log = optlearner_model.evaluate_generator(test_gen)

eval_string = ""
for i in range(len(eval_log)):
    eval_string += str(optlearner_model.metrics_names[i]) + ": " + str(
        eval_log[i]) + "  "