def convert_keras_models_to_tf(model_root):
    import keras
    from keras.models import load_model
    from keras import backend as K

    keras_models_dir = os.path.join(model_root, 'keras_models')

    tf_models_dir = os.path.join(model_root, 'tf_models')
    my_utils.create_dir_if_not_exist(tf_models_dir)

    model_names = [
        'autoencoder', 'decoder', 'encoder', 'energy_model',
        'l1_discrete_energy_model', 'direct_energy_model'
    ]

    K.clear_session()
    for model_name in model_names:
        model_path = os.path.join(keras_models_dir, model_name + '.hdf5')
        if os.path.exists(model_path):

            # Custom stuff
            keras.losses.energy_loss = lambda x, y: x
            keras.regularizers.reg = lambda: (lambda x: x)
            keras.activations.my_elu = my_utils.create_my_elu()

            model = load_model(model_path)
            k2tf.save_keras_model_as_tf(
                model, os.path.join(tf_models_dir, model_name + '.pb'))
            K.clear_session()
Example #2
0
def setup_submodel(model_path, unified_config,
                   type_str):  # type_str = 'low_res' or 'high_res'
    submodel_path = os.path.join(model_path,
                                 'unified_generate/' + type_str + '/')
    my_utils.create_dir_if_not_exist(submodel_path)

    # Copy in the mesh
    shutil.copy(unified_config[type_str + '_mesh'],
                os.path.join(submodel_path, 'tets.mesh'))

    sim_config, training_data_ouput_path = create_sim_config(
        submodel_path, unified_config, type_str)
    sim_config_path = os.path.join(submodel_path, 'sim_config.json')
    with open(sim_config_path, 'w') as f:
        json.dump(sim_config, f, indent=2)

    return submodel_path, training_data_ouput_path
Example #3
0
def convert_keras_models_to_tf(model_root):
    import keras
    from keras.models import load_model, model_from_json
    from keras import backend as K

    keras_models_dir = os.path.join(model_root, 'keras_models')

    tf_models_dir = os.path.join(model_root, 'tf_models')
    my_utils.create_dir_if_not_exist(tf_models_dir)

    model_names = [
        'autoencoder', 'decoder', 'encoder', 'energy_model',
        'l1_discrete_energy_model', 'direct_energy_model'
    ]

    K.clear_session()
    for model_name in model_names:
        model_path = os.path.join(keras_models_dir, model_name + '.hdf5')
        if os.path.exists(model_path):

            # Custom stuff
            keras.losses.energy_loss = lambda x, y: x
            keras.losses.UTMU_loss = lambda x, y: x
            # keras.losses.contractive_loss = lambda x,y: x
            keras.regularizers.reg = lambda: (lambda x: x)
            keras.activations.my_elu = my_utils.create_my_elu()

            # def make_UTMU_loss():
            #     K_UTMU = K.constant(value=numpy.random.random((30,30)))
            #     def UTMU_loss(y_true, y_pred):
            #         u = y_true - y_pred
            #         return K.mean(K.dot(u, K.dot(K_UTMU, K.transpose(u))), axis=-1) # TODO should mean be over an axis?

            #     return UTMU_loss
            # keras.losses.UTMU_loss = make_UTMU_loss()

            model = model_from_json(
                open(os.path.join(keras_models_dir,
                                  model_name + '.json')).read())
            model.load_weights(
                os.path.join(keras_models_dir, model_name + '.h5'))
            # model = load_model(model_path,  custom_objects={'contractive_loss': lambda x,y: x, 'lam':1e-4})
            k2tf.save_keras_model_as_tf(
                model, os.path.join(tf_models_dir, model_name + '.pb'))
            K.clear_session()
Example #4
0
def convert_keras_models_to_tf(model_root):
    from keras.models import load_model
    from keras import backend as K

    keras_models_dir = os.path.join(model_root, 'keras_models')

    tf_models_dir = os.path.join(model_root, 'tf_models')
    my_utils.create_dir_if_not_exist(tf_models_dir)

    model_names = ['autoencoder', 'decoder', 'encoder', 'energy_model', 'discrete_energy_model']

    K.clear_session()
    for model_name in model_names:
        model_path = os.path.join(keras_models_dir, model_name + '.hdf5')
        if os.path.exists(model_path):
            import keras.losses
            keras.losses.energy_loss = lambda x,y: x
            model = load_model(model_path)
            k2tf.save_keras_model_as_tf(model, os.path.join(tf_models_dir, model_name + '.pb'))
            K.clear_session()
Example #5
0
def fit_nn_direct_energy(
    reduced_space_samples,
    energy_samples,
    reduced_space_validation_samples,
    energy_validation_samples,
    activation='relu',
    epochs=100,
    batch_size=100,
    layers=[32, 16],
    do_fine_tuning=False,
    model_root=None,
    autoencoder_config=None,
    energy_model_config=None
):  # TODO should probably just pass in both configs here
    """
    Returns and encoder and decoder for going into and out of the reduced latent space.
    If pca_weights is given, then do a weighted mse.
    If pca_object is given, then the first and final layers will do a pca transformation of the reduced_space_samples.
    """

    import keras
    import keras.backend as K
    from keras.layers import Input, Dense, Lambda
    from keras.models import Model, load_model
    from keras.engine.topology import Layer
    from keras.callbacks import History

    flatten_data, unflatten_data = my_utils.get_flattners(
        reduced_space_samples)

    # TODO: Do I need to shuffle?
    reduced_space_samples = flatten_data(reduced_space_samples)

    input = Input(shape=(len(reduced_space_samples[0]), ),
                  name="energy_model_input")
    output = input

    for i, layer_width in enumerate(layers):
        output = Dense(layer_width,
                       activation=activation,
                       name="dense_decode_layer_" + str(i))(output)

    output = Dense(
        len(energy_samples[0]),
        activation='relu',
        name="output_layer" + str(i),
    )(output)

    model = Model(input, output)

    optimizer = keras.optimizers.Adam(lr=0.1,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08,
                                      decay=0)
    model.compile(
        optimizer=optimizer,
        loss='mean_absolute_percentage_error',
    )

    def mean_absolute_percentage_error(y_true, y_pred):

        ## Note: does not handle mix 1d representation
        #if _is_1d(y_true):
        #    y_true, y_pred = _check_1d_array(y_true, y_pred)

        return numpy.mean(numpy.abs((y_true - y_pred) / y_true)) * 100

    hist = History()
    model_start_time = time.time()
    from keras.callbacks import Callback
    idx = numpy.random.randint(len(reduced_space_samples), size=10)

    class MyHistoryCallback(Callback):
        """Callback that records events into a `History` object.
        This callback is automatically applied to
        every Keras model. The `History` object
        gets returned by the `fit` method of models.
        """
        def on_train_begin(self, logs=None):
            self.epoch = []
            self.history = {}

        def on_epoch_end(self, epoch, logs=None):
            actual = energy_samples[idx, :]
            pred = self.model.predict(reduced_space_samples[idx, :])
            mse = mean_absolute_percentage_error(actual, pred)
            print("Actual energy: ", actual)
            print("Predicted energy: ", pred)

            self.history.setdefault('mse', []).append(mse)
            # self.history.setdefault('val_mse', []).append(val_mse)
            print()
            print("Percentage error: ", mse)
            # print("Val Mean squared error: ", val_mse)

            logs = logs or {}
            self.epoch.append(epoch)
            for k, v in logs.items():
                self.history.setdefault(k, []).append(v)

    my_hist_callback = MyHistoryCallback()

    model.fit(
        reduced_space_samples,
        energy_samples,
        epochs=epochs,
        batch_size=batch_size,
        shuffle=True,
        # validation_data=(reduced_space_validation_samples, energy_validation_samples),
        callbacks=[my_hist_callback])

    if model_root:
        print("Saving model...")
        models_with_names = [
            (model, "direct_energy_model"),
        ]

        keras_models_dir = os.path.join(model_root, 'keras_models')
        my_utils.create_dir_if_not_exist(keras_models_dir)

        for keras_model, name in models_with_names:
            keras_model_file = os.path.join(keras_models_dir, name + ".hdf5")
            keras_model.save(keras_model_file)  # Save the keras model

        print("Finished saving model.")

    print("Total training time: ", time.time() - model_start_time)

    # model.save('discrete_energy_model.hdf5')
    # if energy_model_confiredict(normalize(flatten_data(decoded_data)))
    def decode(encoded_data):
        return unflatten_data(model.predict(encoded_data))

    return decode, hist
Example #6
0
def binary_discrete_nn_analysis(
    reduced_space_samples,
    energy_samples,
    reduced_space_validation_samples,
    energy_validation_samples,
    activation='relu',
    epochs=100,
    batch_size=100,
    layers=[32, 16],
    do_fine_tuning=False,
    model_root=None,
    autoencoder_config=None,
    energy_model_config=None
):  # TODO should probably just pass in both configs here
    """
    Returns and encoder and decoder for going into and out of the reduced latent space.
    If pca_weights is given, then do a weighted mse.
    If pca_object is given, then the first and final layers will do a pca transformation of the reduced_space_samples.
    """

    import keras
    import keras.backend as K
    from keras.layers import Input, Dense, Lambda, multiply, ActivityRegularization
    from keras.models import Model, load_model
    from keras.engine.topology import Layer
    from keras.callbacks import History

    flatten_data, unflatten_data = my_utils.get_flattners(
        reduced_space_samples)

    # TODO: Do I need to shuffle?
    reduced_space_samples = flatten_data(reduced_space_samples)
    energy_samples = flatten_data(energy_samples)
    reduced_space_validation_samples = flatten_data(
        reduced_space_samples[:10] if reduced_space_validation_samples is None
        else reduced_space_validation_samples)
    energy_validation_samples = flatten_data(
        energy_samples[:10]
        if energy_validation_samples is None else energy_validation_samples)

    input = Input(shape=(len(reduced_space_samples[0]), ),
                  name="energy_model_input")
    output = input

    for i, layer_width in enumerate(layers):
        output = Dense(layer_width,
                       activation=activation,
                       name="dense_decode_layer_" + str(i))(output)

    fixed_input = Input(tensor=K.variable([[1.0]]))
    weights = Dense(len(energy_samples[0]), activation=None,
                    use_bias=False)(fixed_input)

    # hard_sigmoid?
    output = Dense(
        len(energy_samples[0]),
        activation='hard_sigmoid',
        name="output_layer" + str(i),
        # activity_regularizer=keras.regularizers.l1(0.0001), # TODO what val? default was 0.01
    )(output)

    output = multiply([output, weights])
    output = ActivityRegularization(l1=0.001, l2=0.0)(output)

    model = Model([input, fixed_input], output)

    def energy_loss(y_true, y_pred):
        return K.mean(
            K.square(
                K.sum(y_pred * y_true, axis=-1) -
                K.sum(y_true, axis=-1)))  # TODO should mean be over an axis?

    def energy_loss_numpy(y_true, y_pred):
        return numpy.mean(
            numpy.square(
                numpy.sum(y_pred * y_true, axis=-1) -
                numpy.sum(y_true, axis=-1)))

    optimizer = keras.optimizers.Adam(lr=0.01,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08,
                                      decay=0)
    model.compile(
        optimizer=optimizer,
        loss=energy_loss  #'mean_squared_error',
    )

    hist = History()
    model_start_time = time.time()
    from keras.callbacks import Callback
    idx = numpy.random.randint(len(reduced_space_samples), size=10)

    class MyHistoryCallback(Callback):
        """Callback that records events into a `History` object.
        This callback is automatically applied to
        every Keras model. The `History` object
        gets returned by the `fit` method of models.
        """
        def on_train_begin(self, logs=None):
            self.epoch = []
            self.history = {}

        def on_epoch_end(self, epoch, logs=None):
            actual = energy_samples[idx, :]
            pred = self.model.predict(reduced_space_samples[idx, :])
            mse = energy_loss_numpy(actual, pred)
            print("Actual energy: ", (actual).sum(axis=1))
            print("Predicted energy: ", numpy.sum(pred * actual, axis=-1))
            nonzeros = [p[numpy.nonzero(p)] for p in pred]
            # print("non zero weights: ", nonzeros)
            print("len(nonzero):", [len(nz) for nz in nonzeros])
            #val_mse = mean_squared_error(high_dim_pca_decode(self.model.predict(encoded_high_dim_pca_test_displacements)), test_displacements)
            self.history.setdefault('mse', []).append(mse)
            # self.history.setdefault('val_mse', []).append(val_mse)
            print()
            print("Mean squared error: ", mse)
            # print("Val Mean squared error: ", val_mse)

            logs = logs or {}
            self.epoch.append(epoch)
            for k, v in logs.items():
                self.history.setdefault(k, []).append(v)

    my_hist_callback = MyHistoryCallback()

    model.fit(
        reduced_space_samples,
        energy_samples,
        epochs=epochs,
        batch_size=batch_size,
        shuffle=True,
        # validation_data=(reduced_space_validation_samples, energy_validation_samples),
        callbacks=[my_hist_callback])

    if model_root:
        print("Saving model...")
        models_with_names = [
            (model, "l1_discrete_energy_model"),
        ]

        keras_models_dir = os.path.join(model_root, 'keras_models')
        my_utils.create_dir_if_not_exist(keras_models_dir)

        for keras_model, name in models_with_names:
            keras_model_file = os.path.join(keras_models_dir, name + ".hdf5")
            keras_model.save(keras_model_file)  # Save the keras model

        print("Finished saving model.")

    print("Total training time: ", time.time() - model_start_time)

    # model.save('discrete_energy_model.hdf5')
    # if energy_model_confiredict(normalize(flatten_data(decoded_data)))
    def decode(encoded_data):
        return unflatten_data(model.predict(encoded_data))

    return decode, hist
Example #7
0
def autoencoder_analysis(
    data,
    test_data,
    activation='relu',
    latent_dim=3,
    epochs=100,
    batch_size=100,
    learning_rate=0.001,
    layers=[32, 16],
    pca_weights=None,
    pca_basis=None,
    do_fine_tuning=False,
    model_root=None,
    autoencoder_config=None,
    callback=None,
):  # TODO should probably just pass in both configs here
    """
    Returns and encoder and decoder for going into and out of the reduced latent space.
    If pca_weights is given, then do a weighted mse.
    """
    assert not (
        (pca_weights is not None) and
        (pca_basis is not None))  # pca_weights incompatible with pca_object

    import keras
    import keras.backend as K
    from keras.layers import Input, Dense, Lambda
    from keras.models import Model, load_model
    from keras.engine.topology import Layer
    from keras.callbacks import History

    flatten_data, unflatten_data = my_utils.get_flattners(data)

    # TODO: Do I need to shuffle?
    train_data = data
    test_data = test_data if test_data is not None else data[:10]

    ## Preprocess the data
    # mean = numpy.mean(train_data, axis=0)
    # std = numpy.std(train_data, axis=0)

    mean = numpy.mean(train_data)
    std = numpy.std(train_data)
    s_min = numpy.min(train_data)
    s_max = numpy.max(train_data)
    print(mean)
    print(std)

    # TODO dig into this. Why does it mess up?
    def normalize(data):
        return data
        #return (data - mean) / std
        # return numpy.nan_to_num((train_data - s_min) / (s_max - s_min))
    def denormalize(data):
        return data
        #return data * std + mean
        # return data * (s_max - s_min) + s_mi

    # My elu fixes the jvp problem
    if activation == "my_elu":
        activation = my_utils.create_my_elu()
    ## Set up the network

    input = Input(shape=(len(train_data[0]), ), name="encoder_input")
    output = input

    if pca_basis is not None:
        # output = Lambda(pca_transform_layer)(output)
        # output = PCALayer(pca_object, is_inv=False, fine_tune=do_fine_tuning)(output)

        W = pca_basis
        b = numpy.zeros(pca_basis.shape[1])
        output = Dense(pca_basis.shape[1],
                       activation='linear',
                       weights=[W, b],
                       trainable=do_fine_tuning,
                       name="pca_encode_layer")(output)

    for i, layer_width in enumerate(layers):
        act = activation
        # if i == len(layers) - 1:
        #     act = 'sigmoid'
        output = Dense(layer_width,
                       activation=act,
                       name="dense_encode_layer_" + str(i))(output)

    # -- Encoded layer
    output = Dense(latent_dim, activation=activation, name="encoded_layer")(
        output)  # TODO Tanh into encoded layer to bound vars?

    for i, layer_width in enumerate(reversed(layers)):
        output = Dense(layer_width,
                       activation=activation,
                       name="dense_decode_layer_" + str(i))(output)

    if pca_basis is not None:
        output = Dense(
            pca_basis.shape[1],
            activation='linear',
            name="to_pca_decode_layer"
        )(
            output
        )  ## TODO is this right?? Possibly should just change earlier layer width
        # output = Lambda(pca_inv_transform_layer)(output)
        # output = PCALayer(pca_object, is_inv=True, fine_tune=do_fine_tuning)(output)

        W = pca_basis.T
        b = numpy.zeros(len(train_data[0]))
        output = Dense(len(train_data[0]),
                       activation='linear',
                       weights=[W, b],
                       trainable=do_fine_tuning,
                       name="pca_decode_layer")(output)
    else:
        output = Dense(
            len(train_data[0]),
            activation='linear',
            name="decoder_output_layer"
        )(
            output
        )  #'linear',)(output) # First test seems to indicate no change on output with linear

    autoencoder = Model(input, output)
    ## Set the optimization parameters

    optimizer = keras.optimizers.Adam(lr=learning_rate,
                                      beta_1=0.9,
                                      beta_2=0.999,
                                      epsilon=1e-08,
                                      decay=0)
    autoencoder.compile(
        optimizer=optimizer,
        loss='mean_squared_error',
    )

    hist = History()
    model_start_time = time.time()

    autoencoder.fit(train_data,
                    train_data,
                    epochs=epochs,
                    batch_size=batch_size,
                    shuffle=True,
                    validation_data=(test_data, test_data),
                    callbacks=[hist, callback])

    training_time = time.time() - model_start_time
    print("Total training time: ", training_time)

    autoencoder, encoder, decoder = my_utils.decompose_ae(autoencoder,
                                                          do_energy=False)

    # Save the models in both tensorflow and keras formats
    if model_root:
        print("Saving model...")
        models_with_names = [
            (autoencoder, "autoencoder"),
            (encoder, "encoder"),
            (decoder, "decoder"),
        ]

        keras_models_dir = os.path.join(model_root, 'keras_models')
        my_utils.create_dir_if_not_exist(keras_models_dir)

        for keras_model, name in models_with_names:
            keras_model_file = os.path.join(keras_models_dir, name + ".hdf5")
            keras_model.save(keras_model_file)  # Save the keras model

        print("Finished saving model.")

    def encode(decoded_data):
        return encoder.predict(normalize(flatten_data(decoded_data)))

    def decode(encoded_data):
        return unflatten_data(denormalize(decoder.predict(encoded_data)))

    return encode, decode, training_time
Example #8
0
def generate_model(
        model_root,  # This is the root of the standard formatted directory created by build-model.py
        learning_config,  # Comes from the training config file
):
    autoencoder_config = learning_config['autoencoder_config']
    energy_model_config = learning_config['energy_model_config']
    save_objs = learning_config['save_objs']
    train_in_full_space = False
    record_full_mse_each_epoch = False

    training_data_path = os.path.join(model_root, 'training_data/training')
    validation_data_path = os.path.join(model_root, 'training_data/validation')
    # Loading the rest pose
    base_verts, face_indices = my_utils.load_base_vert_and_face_dmat_to_numpy(
        training_data_path)
    base_verts_eig = p2e(base_verts)
    face_indices_eig = p2e(face_indices)

    # Loading displacements for training data
    displacements = my_utils.load_displacement_dmats_to_numpy(
        training_data_path)
    if os.path.exists(validation_data_path):
        test_displacements = my_utils.load_displacement_dmats_to_numpy(
            validation_data_path)
    else:
        test_displacements = None
    flatten_data, unflatten_data = my_utils.get_flattners(displacements)
    displacements = flatten_data(displacements)
    test_displacements = flatten_data(test_displacements)

    # Do the training
    pca_ae_train_dim = autoencoder_config['pca_layer_dim']
    pca_compare_dims = autoencoder_config['pca_compare_dims']
    ae_dim = autoencoder_config['ae_encoded_dim']
    ae_epochs = autoencoder_config['training_epochs']
    batch_size = autoencoder_config['batch_size'] if autoencoder_config[
        'batch_size'] > 0 else len(displacements)
    learning_rate = autoencoder_config['learning_rate']
    layers = autoencoder_config['non_pca_layer_sizes']
    do_fine_tuning = autoencoder_config['do_fine_tuning']
    activation = autoencoder_config['activation']
    save_pca_components = True
    save_autoencoder = True

    training_results = {
        'autoencoder': {},
        'pca': {},
    }

    # Normal low dim pca first
    for pca_dim in pca_compare_dims:
        U, explained_var, pca_encode, pca_decode = pca_no_centering(
            displacements, pca_dim)

        encoded_pca_displacements = pca_encode(displacements)
        decoded_pca_displacements = pca_decode(encoded_pca_displacements)
        if test_displacements is not None:
            encoded_pca_test_displacements = pca_encode(test_displacements)
            decoded_pca_test_displacements = pca_decode(
                encoded_pca_test_displacements)

        if save_pca_components:
            pca_results_filename = os.path.join(
                model_root,
                'pca_results/pca_components_' + str(pca_dim) + '.dmat')
            print('Saving pca results to', pca_results_filename)
            my_utils.create_dir_if_not_exist(
                os.path.dirname(pca_results_filename))
            my_utils.save_numpy_mat_to_dmat(pca_results_filename,
                                            numpy.ascontiguousarray(U))

            training_mse = mean_squared_error(
                flatten_data(decoded_pca_displacements),
                flatten_data(displacements))
            if test_displacements is not None:
                validation_mse = mean_squared_error(
                    flatten_data(decoded_pca_test_displacements),
                    flatten_data(test_displacements))
            training_results['pca'][str(pca_dim) + '-components'] = {}
            training_results['pca'][
                str(pca_dim) + '-components']['training-mse'] = training_mse
            training_results['pca'][
                str(pca_dim) +
                '-components']['explained_var'] = numpy.sum(explained_var)
            if test_displacements is not None:
                training_results['pca'][
                    str(pca_dim) +
                    '-components']['validation-mse'] = validation_mse

            print(str(pca_dim) + ' training MSE: ', training_mse)
            print(
                str(pca_dim) + ' explained variance: ',
                numpy.sum(explained_var))
            print()

    # High dim pca to train autoencoder
    pca_start_time = time.time()
    U_ae, explained_var, high_dim_pca_encode, high_dim_pca_decode = pca_no_centering(
        displacements, pca_ae_train_dim)
    pca_train_time = time.time() - pca_start_time

    if train_in_full_space:
        high_dim_pca_encode = lambda x: x
        high_dim_pca_decode = lambda x: x
    encoded_high_dim_pca_displacements = high_dim_pca_encode(displacements)
    if test_displacements is not None:
        encoded_high_dim_pca_test_displacements = high_dim_pca_encode(
            test_displacements)
    else:
        encoded_high_dim_pca_test_displacements = None

    from keras.callbacks import Callback

    class MyHistoryCallback(Callback):
        """Callback that records events into a `History` object.
        This callback is automatically applied to
        every Keras model. The `History` object
        gets returned by the `fit` method of models.
        """
        def on_train_begin(self, logs=None):
            self.epoch = []
            self.history = {}

        def on_epoch_end(self, epoch, logs=None):
            if record_full_mse_each_epoch:
                mse = mean_squared_error(
                    high_dim_pca_decode(
                        self.model.predict(
                            encoded_high_dim_pca_displacements)),
                    displacements)
                val_mse = mean_squared_error(
                    high_dim_pca_decode(
                        self.model.predict(
                            encoded_high_dim_pca_test_displacements)),
                    test_displacements)
                self.history.setdefault('mse', []).append(mse)
                self.history.setdefault('val_mse', []).append(val_mse)
                print()
                print("Mean squared error: ", mse)
                print("Val Mean squared error: ", val_mse)

            logs = logs or {}
            self.epoch.append(epoch)
            for k, v in logs.items():
                self.history.setdefault(k, []).append(v)

    my_hist_callback = MyHistoryCallback()

    ae_pca_basis_path = os.path.join(model_root,
                                     'pca_results/ae_pca_components.dmat')
    print('Saving pca results to', ae_pca_basis_path)
    my_utils.save_numpy_mat_to_dmat(ae_pca_basis_path,
                                    numpy.ascontiguousarray(U_ae))
    ae_encode, ae_decode, ae_train_time = autoencoder_analysis(
        # displacements, # Uncomment to train in full space
        # test_displacements,
        encoded_high_dim_pca_displacements,
        encoded_high_dim_pca_test_displacements,
        activation=activation,
        latent_dim=ae_dim,
        epochs=ae_epochs,
        batch_size=batch_size,
        learning_rate=learning_rate,
        layers=
        layers,  # [200, 200, 50] First two layers being wide seems best so far. maybe an additional narrow third 0.0055 see
        # pca_basis=U_ae,
        do_fine_tuning=do_fine_tuning,
        model_root=model_root,
        autoencoder_config=autoencoder_config,
        callback=my_hist_callback,
    )

    # decoded_autoencoder_displacements = ae_decode(ae_encode(displacements))
    # decoded_autoencoder_test_displacements = ae_decode(ae_encode(test_displacements))
    decoded_autoencoder_displacements = high_dim_pca_decode(
        ae_decode(ae_encode(high_dim_pca_encode(displacements))))
    if test_displacements is not None:
        decoded_autoencoder_test_displacements = high_dim_pca_decode(
            ae_decode(ae_encode(high_dim_pca_encode(test_displacements))))
    ae_training_mse = mean_squared_error(
        flatten_data(decoded_autoencoder_displacements),
        flatten_data(displacements))
    training_results['autoencoder']['training-mse'] = ae_training_mse
    if test_displacements is not None:
        training_results['autoencoder']['validation-mse'] = mean_squared_error(
            flatten_data(decoded_autoencoder_test_displacements),
            flatten_data(test_displacements))

    print('Finale ae training MSE:', ae_training_mse)
    training_results['autoencoder']['ae_training_time_s'] = ae_train_time
    training_results['autoencoder']['pca_training_time_s'] = pca_train_time
    # TODO output energy loss as well
    with open(os.path.join(model_root, 'training_results.json'), 'w') as f:
        json.dump(training_results, f, indent=2)

    history_path = os.path.join(model_root, "training_history.json")
    with open(history_path, 'w') as f:
        json.dump(my_hist_callback.history, f, indent=2)

    if save_objs:
        print("Saving objs of decoded training data...")
        obj_dir = os.path.join(model_root, 'decoded_training_objs/')
        if not os.path.exists(obj_dir):
            os.makedirs(obj_dir)

        for i, dec_displ in enumerate(decoded_autoencoder_displacements):
            decoded_verts = base_verts + dec_displ.reshape(
                (len(base_verts), 3))
            decoded_verts_eig = p2e(decoded_verts)

            obj_path = os.path.join(obj_dir, "decoded_" + str(i) + ".obj")
            igl.writeOBJ(obj_path, decoded_verts_eig, face_indices_eig)
def reencode_and_augment_training_data(model_root, num_extra_per_poses=0):
    """ 
    Loads existing traing data and generates new encoding / energy vector pairs for 
    1. Energy evalutated on decoded displacements of training data.
    2. Energy evaluated on poses sampled around the encoded training poses.
    """
    training_data_path = os.path.join(model_root, 'training_data/training')
    U = igl.eigen.MatrixXd()
    igl.readDMAT(
        os.path.join(model_root, 'pca_results/ae_pca_components.dmat'), U)

    displacements = my_utils.load_displacement_dmats_to_numpy(
        training_data_path)
    flatten_displ, unflatten_displ = my_utils.get_flattners(displacements)

    from keras.models import Model, load_model
    encoder = load_model(os.path.join(model_root, 'keras_models/encoder.hdf5'))
    decoder = load_model(os.path.join(model_root, 'keras_models/decoder.hdf5'))

    encoded_displacements = encoder.predict(flatten_displ(displacements) @ U)
    decoded_displacements = decoder.predict(
        encoded_displacements) @ U.transpose()

    print('Generating extra samples...')
    extra_encoded_displacements = sample_more_encoded_displacements(
        encoded_displacements, num_extra_per_poses)
    extra_decoded_displacements = decoder.predict(
        extra_encoded_displacements) @ U.transpose()

    sampled_training_data_path = os.path.join(
        model_root, 'augmented_training_data/sampled/')
    reencoded_training_data_path = os.path.join(
        model_root, 'augmented_training_data/reencoded/')
    my_utils.create_dir_if_not_exist(sampled_training_data_path)
    my_utils.create_dir_if_not_exist(reencoded_training_data_path)

    extra_displacements_path = save_mat_with_prefix(
        sampled_training_data_path, 'displacements',
        extra_decoded_displacements)
    save_mat_with_prefix(sampled_training_data_path, 'enc_displacements',
                         extra_encoded_displacements)
    reencoded_displacements_path = save_mat_with_prefix(
        reencoded_training_data_path, 'displacements', decoded_displacements)
    save_mat_with_prefix(reencoded_training_data_path, 'enc_displacements',
                         encoded_displacements)

    tet_mesh_path = os.path.join(model_root, 'tets.mesh')
    parameters_path = os.path.join(model_root,
                                   'training_data/training/parameters.json')

    print('Computing energies for reencoded poses...')
    subprocess.call([
        './generate_data_for_pose/build/bin/GenerateDataForPose',
        reencoded_displacements_path, tet_mesh_path, parameters_path
    ])

    print('Computing energies for samples...')
    subprocess.call([
        './generate_data_for_pose/build/bin/GenerateDataForPose',
        extra_displacements_path, tet_mesh_path, parameters_path
    ])