def plot_and_save_predictions(hyperp, run_options, file_paths, fig_size):
    ###############################################################################
    #                     Form Fenics Domain and Load Predictions                 #
    ###############################################################################
    #=== Form Fenics Domain ===#
    if run_options.fin_dimensions_2D == 1:
        V, _ = get_space_2D(40)
    if run_options.fin_dimensions_3D == 1:
        V, mesh = get_space_3D(40)

    solver = Fin(V)

    #=== Load Observation Indices, Test and Predicted Parameters and State ===#
    df_obs_indices = pd.read_csv(file_paths.observation_indices_savefilepath +
                                 '.csv')
    obs_indices = df_obs_indices.to_numpy()

    df_parameter_test = pd.read_csv(file_paths.savefile_name_parameter_test +
                                    '.csv')
    parameter_test = df_parameter_test.to_numpy()
    df_parameter_pred = pd.read_csv(file_paths.savefile_name_parameter_pred +
                                    '.csv')
    parameter_pred = df_parameter_pred.to_numpy()

    df_state_pred = pd.read_csv(file_paths.savefile_name_state_pred + '.csv')
    state_pred = df_state_pred.to_numpy()

    ###############################################################################
    #                             Plotting Predictions                            #
    ###############################################################################
    #=== Converting Test Parameter Into Dolfin Object and Computed State Observation ===#
    if run_options.data_thermal_fin_nine == 1:
        parameter_test_dl = solver.nine_param_to_function(parameter_test)
        if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_test_dl.vector().get_local()
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind - 1]
            parameter_test_dl = convert_array_to_dolfin_function(
                V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_test_dl = convert_array_to_dolfin_function(V, parameter_test)

    state_test_dl, _ = solver.forward(
        parameter_test_dl)  # generate true state for comparison
    state_test = state_test_dl.vector().get_local()
    if hyperp.data_type == 'bnd':
        state_test = state_test[obs_indices].flatten()

    #=== Plotting Test Parameter and Test State ===#
    if run_options.fin_dimensions_2D == 1:
        p_test_fig, ax = plot_2D(parameter_test_dl, 'True Parameter', fig_size)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(p_test_fig, cax=cax)
    if run_options.fin_dimensions_3D == 1:
        p_test_fig, ax = plot_3D(parameter_test_dl,
                                 'True Parameter',
                                 angle_1=90,
                                 angle_2=270)
        plt.colorbar(p_test_fig)
    plt.savefig(file_paths.figures_savefile_name_parameter_test,
                dpi=300,
                bbox_inches='tight',
                pad_inches=0)
    print('Figure saved to ' + file_paths.figures_savefile_name_parameter_test)
    plt.show()

    if hyperp.data_type == 'full':  # No state prediction for bnd only data
        if run_options.fin_dimensions_2D == 1:
            s_test_fig, ax = plot_2D(state_test_dl, 'True State', fig_size)
            divider = make_axes_locatable(ax)
            cax = divider.append_axes("right", size="5%", pad=0.05)
            plt.colorbar(s_test_fig, cax=cax)
        if run_options.fin_dimensions_3D == 1:
            s_test_fig, ax = plot_3D(state_test_dl,
                                     'True State',
                                     angle_1=90,
                                     angle_2=270)
            plt.colorbar(s_test_fig)
        plt.savefig(file_paths.figures_savefile_name_state_test,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_test)
        plt.show()

    #=== Converting Predicted Parameter into Dolfin Object ===#
    if run_options.data_thermal_fin_nine == 1:
        parameter_pred_dl = solver.nine_param_to_function(parameter_pred)
        if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_pred_dl.vector().get_local()
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind - 1]
            parameter_pred_dl = convert_array_to_dolfin_function(
                V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_pred_dl = convert_array_to_dolfin_function(V, parameter_pred)

    #=== Plotting Predicted Parameter and State ===#
    if run_options.fin_dimensions_2D == 1:
        p_pred_fig, ax = plot_2D(parameter_pred_dl,
                                 'Decoder Estimation of True Parameter',
                                 fig_size)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(p_test_fig, cax=cax)
    if run_options.fin_dimensions_3D == 1:
        p_pred_fig, ax = plot_3D(parameter_pred_dl,
                                 'Decoder Estimation of True Parameter',
                                 angle_1=90,
                                 angle_2=270)
        plt.colorbar(p_test_fig)
    plt.savefig(file_paths.figures_savefile_name_parameter_pred,
                dpi=300,
                bbox_inches='tight',
                pad_inches=0)
    print('Figure saved to ' + file_paths.figures_savefile_name_parameter_pred)
    plt.show()
    parameter_pred_error = np.linalg.norm(parameter_pred - parameter_test,
                                          2) / np.linalg.norm(
                                              parameter_test, 2)
    print('Parameter prediction relative error: %.7f' % parameter_pred_error)

    if hyperp.data_type == 'full':  # No visualization of state prediction if the truncation layer only consists of the boundary observations
        state_pred_dl = convert_array_to_dolfin_function(V, state_pred)
        if run_options.fin_dimensions_2D == 1:
            s_pred_fig, ax = plot_2D(state_pred_dl,
                                     'Encoder Estimation of True State',
                                     fig_size)
            divider = make_axes_locatable(ax)
            cax = divider.append_axes("right", size="5%", pad=0.05)
            plt.colorbar(s_test_fig, cax=cax)
        if run_options.fin_dimensions_3D == 1:
            s_pred_fig, ax = plot_3D(state_pred_dl,
                                     'Encoder Estimation of True State',
                                     angle_1=90,
                                     angle_2=270)
            plt.colorbar(s_test_fig)
        plt.savefig(file_paths.figures_savefile_name_state_pred,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_pred)
        plt.show()
    state_pred_error = np.linalg.norm(state_pred - state_test,
                                      2) / np.linalg.norm(state_test, 2)
    print('State observation prediction relative error: %.7f' %
          state_pred_error)
Пример #2
0
def plot_and_save_predictions_vtkfiles(hyperp, run_options, file_paths):
    ###############################################################################
    #                     Form Fenics Domain and Load Predictions                 #
    ###############################################################################
    #=== Form Fenics Domain ===#
    if run_options.fin_dimensions_2D == 1:
        V, _ = get_space_2D(40)
    if run_options.fin_dimensions_3D == 1:
        V, mesh = get_space_3D(40)

    solver = Fin(V)

    #=== Load Observation Indices, Test and Predicted Parameters and State ===#
    df_obs_indices = pd.read_csv(file_paths.observation_indices_savefilepath +
                                 '.csv')
    obs_indices = df_obs_indices.to_numpy()

    df_parameter_test = pd.read_csv(file_paths.savefile_name_parameter_test +
                                    '.csv')
    parameter_test = df_parameter_test.to_numpy()

    if run_options.forward_mapping == 1:
        df_state_pred = pd.read_csv(file_paths.savefile_name_state_pred +
                                    '.csv')
        state_pred = df_state_pred.to_numpy()

    if run_options.inverse_mapping == 1:
        df_parameter_pred = pd.read_csv(
            file_paths.savefile_name_parameter_pred + '.csv')
        parameter_pred = df_parameter_pred.to_numpy()

###############################################################################
#                             Plotting Predictions                            #
###############################################################################
#=== Converting Test Parameter Into Dolfin Object and Computed State Observation ===#
    if run_options.data_thermal_fin_nine == 1:
        parameter_test_dl = solver.nine_param_to_function(parameter_test)
        if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_test_dl.vector().get_local()
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind - 1]
            parameter_test_dl = convert_array_to_dolfin_function(
                V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_test_dl = convert_array_to_dolfin_function(V, parameter_test)

    state_test_dl, _ = solver.forward(
        parameter_test_dl)  # generate true state for comparison
    state_test = state_test_dl.vector().get_local()
    if hyperp.data_type == 'bnd':
        state_test = state_test[obs_indices].flatten()

    #=== Saving as vtkfile ===#
    vtkfile_parameter_test = File(
        file_paths.figures_savefile_name_parameter_test + '.pvd')
    vtkfile_parameter_test << parameter_test_dl
    vtkfile_state_test = File(file_paths.figures_savefile_name_state_test +
                              '.pvd')
    vtkfile_state_test << state_test_dl

    #=== Converting Predicted Parameter into Dolfin Object ===#
    if run_options.inverse_mapping == 1:
        if run_options.data_thermal_fin_nine == 1:
            parameter_pred_dl = solver.nine_param_to_function(parameter_pred)
            if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
                parameter_values = parameter_pred_dl.vector().get_local()
                zero_indices = np.where(parameter_values == 0)[0]
                for ind in zero_indices:
                    parameter_values[ind] = parameter_values[ind - 1]
                parameter_pred_dl = convert_array_to_dolfin_function(
                    V, parameter_values)
        if run_options.data_thermal_fin_vary == 1:
            parameter_pred_dl = convert_array_to_dolfin_function(
                V, parameter_pred)

    if run_options.forward_mapping == 1 and hyperp.data_type == 'full':  # No visualization of state prediction if the truncation layer only consists of the boundary observations
        state_pred_dl = convert_array_to_dolfin_function(V, state_pred)

    #=== Saving as vtkfile ===#
    if run_options.inverse_mapping == 1:
        vtkfile_parameter_pred = File(
            file_paths.figures_savefile_name_parameter_pred + '.pvd')
        vtkfile_parameter_pred << parameter_pred_dl
    if run_options.forward_mapping == 1 and hyperp.data_type == 'full':
        vtkfile_state_pred = File(file_paths.figures_savefile_name_state_pred +
                                  '.pvd')
        vtkfile_state_pred << state_pred_dl
Пример #3
0
def optimize(hyperp, run_options, file_paths, NN, obs_indices,
             loss_autoencoder, loss_encoder, loss_model_augmented,
             relative_error, data_and_latent_train, data_and_latent_val,
             data_and_latent_test, data_dimension, num_batches_train):
    #=== Generate Dolfin Function Space and Mesh ===#
    if run_options.fin_dimensions_2D == 1:
        V, mesh = get_space_2D(40)
    if run_options.fin_dimensions_3D == 1:
        V, mesh = get_space_3D(40)
    solver = Fin(V)
    print(V.dim())

    #=== Optimizer ===#
    optimizer = tf.keras.optimizers.Adam()

    #=== Define Metrics ===#
    mean_loss_train = tf.keras.metrics.Mean()
    mean_loss_train_autoencoder = tf.keras.metrics.Mean()
    mean_loss_train_encoder = tf.keras.metrics.Mean()
    mean_loss_train_model_augmented = tf.keras.metrics.Mean()

    mean_loss_val = tf.keras.metrics.Mean()
    mean_loss_val_autoencoder = tf.keras.metrics.Mean()
    mean_loss_val_encoder = tf.keras.metrics.Mean()
    mean_loss_val_model_augmented = tf.keras.metrics.Mean()

    mean_loss_test = tf.keras.metrics.Mean()
    mean_loss_test_autoencoder = tf.keras.metrics.Mean()
    mean_loss_test_encoder = tf.keras.metrics.Mean()
    mean_loss_test_model_augmented = tf.keras.metrics.Mean()

    mean_relative_error_data_autoencoder = tf.keras.metrics.Mean()
    mean_relative_error_latent_encoder = tf.keras.metrics.Mean()
    mean_relative_error_data_decoder = tf.keras.metrics.Mean()

    #=== Initialize Metric Storage Arrays ===#
    storage_array_loss_train = np.array([])
    storage_array_loss_train_autoencoder = np.array([])
    storage_array_loss_train_encoder = np.array([])
    storage_array_loss_train_model_augmented = np.array([])

    storage_array_loss_val = np.array([])
    storage_array_loss_val_autoencoder = np.array([])
    storage_array_loss_val_encoder = np.array([])
    storage_array_loss_val_model_augmented = np.array([])

    storage_array_loss_test = np.array([])
    storage_array_loss_test_autoencoder = np.array([])
    storage_array_loss_test_encoder = np.array([])
    storage_array_loss_test_model_augmented = np.array([])

    storage_array_relative_error_data_autoencoder = np.array([])
    storage_array_relative_error_latent_encoder = np.array([])
    storage_array_relative_error_data_decoder = np.array([])

    #=== Creating Directory for Trained Neural Network ===#
    if not os.path.exists(file_paths.NN_savefile_directory):
        os.makedirs(file_paths.NN_savefile_directory)

    #=== Tensorboard ===# Tensorboard: type "tensorboard --logdir=Tensorboard" into terminal and click the link
    if os.path.exists(
            file_paths.tensorboard_directory
    ):  # Remove existing directory because Tensorboard graphs mess up of you write over it
        shutil.rmtree(file_paths.tensorboard_directory)
    summary_writer = tf.summary.create_file_writer(
        file_paths.tensorboard_directory)

    ###############################################################################
    #                   Training, Validation and Testing Step                     #
    ###############################################################################
    #=== Train Step ===#
    #@tf.function
    def train_step(batch_data_train, batch_latent_train):
        with tf.GradientTape() as tape:
            batch_data_pred_train_AE = NN(batch_data_train)
            batch_latent_pred_train = NN.encoder(batch_data_train)
            batch_loss_train_autoencoder = loss_autoencoder(
                batch_data_pred_train_AE, batch_data_train)
            batch_loss_train_encoder = loss_encoder(batch_latent_pred_train,
                                                    batch_latent_train,
                                                    hyperp.penalty)
            if file_paths.autoencoder_type == 'rev_':
                batch_state_obs_train = batch_data_train
                batch_parameter_pred = NN.encoder(batch_data_train)
            else:
                batch_state_obs_train = batch_latent_train
                batch_parameter_pred = batch_data_pred_train_AE
            batch_loss_train_model_augmented = loss_model_augmented(
                hyperp, run_options, V, solver, obs_indices,
                batch_state_obs_train, batch_parameter_pred,
                hyperp.penalty_aug)
            batch_loss_train = batch_loss_train_autoencoder + batch_loss_train_encoder + batch_loss_train_model_augmented
        gradients = tape.gradient(batch_loss_train, NN.trainable_variables)
        optimizer.apply_gradients(zip(gradients, NN.trainable_variables))
        mean_loss_train(batch_loss_train)
        mean_loss_train_autoencoder(batch_loss_train_autoencoder)
        mean_loss_train_encoder(batch_loss_train_encoder)
        mean_loss_train_model_augmented(batch_loss_train_model_augmented)
        return gradients

    #=== Validation Step ===#
    #@tf.function
    def val_step(batch_data_val, batch_latent_val):
        batch_data_pred_val_AE = NN(batch_data_val)
        batch_latent_pred_val = NN.encoder(batch_data_val)
        batch_loss_val_autoencoder = loss_autoencoder(batch_data_pred_val_AE,
                                                      batch_data_val)
        batch_loss_val_encoder = loss_encoder(batch_latent_pred_val,
                                              batch_latent_val, hyperp.penalty)
        if file_paths.autoencoder_type == 'rev_':
            batch_state_obs_val = batch_data_val
            batch_parameter_pred = NN.encoder(batch_data_val)
        else:
            batch_state_obs_val = batch_latent_val
            batch_parameter_pred = batch_data_pred_val_AE
        batch_loss_val_model_augmented = loss_model_augmented(
            hyperp, run_options, V, solver, obs_indices, batch_state_obs_val,
            batch_parameter_pred, hyperp.penalty_aug)
        batch_loss_val = batch_loss_val_autoencoder + batch_loss_val_encoder + batch_loss_val_model_augmented
        mean_loss_val_autoencoder(batch_loss_val_autoencoder)
        mean_loss_val_encoder(batch_loss_val_encoder)
        mean_loss_val_model_augmented(batch_loss_val_model_augmented)
        mean_loss_val(batch_loss_val)

    #=== Test Step ===#
    #@tf.function
    def test_step(batch_data_test, batch_latent_test):
        batch_data_pred_test_AE = NN(batch_data_test)
        batch_data_pred_test_decoder = NN.decoder(batch_latent_test)
        batch_latent_pred_test = NN.encoder(batch_data_test)
        batch_loss_test_autoencoder = loss_autoencoder(batch_data_pred_test_AE,
                                                       batch_data_test)
        batch_loss_test_encoder = loss_encoder(batch_latent_pred_test,
                                               batch_latent_test,
                                               hyperp.penalty)
        if file_paths.autoencoder_type == 'rev_':
            batch_state_obs_test = batch_data_test
            batch_parameter_pred = NN.encoder(batch_data_test)
        else:
            batch_state_obs_test = batch_latent_test
            batch_parameter_pred = batch_data_pred_test_AE
        batch_loss_test_model_augmented = loss_model_augmented(
            hyperp, run_options, V, solver, obs_indices, batch_state_obs_test,
            batch_parameter_pred, hyperp.penalty_aug)
        batch_loss_test = batch_loss_test_autoencoder + batch_loss_test_encoder + batch_loss_test_model_augmented
        mean_loss_test_autoencoder(batch_loss_test_autoencoder)
        mean_loss_test_encoder(batch_loss_test_encoder)
        mean_loss_test_model_augmented(batch_loss_test_model_augmented)
        mean_loss_test(batch_loss_test)
        mean_relative_error_data_autoencoder(
            relative_error(batch_data_pred_test_AE, batch_data_test))
        mean_relative_error_latent_encoder(
            relative_error(batch_latent_pred_test, batch_latent_test))
        mean_relative_error_data_decoder(
            relative_error(batch_data_pred_test_decoder, batch_data_test))

###############################################################################
#                             Train Neural Network                            #
###############################################################################

    print('Beginning Training')
    for epoch in range(hyperp.num_epochs):
        print('================================')
        print('            Epoch %d            ' % (epoch))
        print('================================')
        print(file_paths.filename)
        print('GPU: ' + run_options.which_gpu + '\n')
        print('Optimizing %d batches of size %d:' %
              (num_batches_train, hyperp.batch_size))
        start_time_epoch = time.time()
        for batch_num, (
                batch_data_train,
                batch_latent_train) in data_and_latent_train.enumerate():
            start_time_batch = time.time()
            gradients = train_step(batch_data_train, batch_latent_train)
            elapsed_time_batch = time.time() - start_time_batch
            #=== Display Model Summary ===#
            if batch_num == 0 and epoch == 0:
                NN.summary()
            if batch_num == 0:
                print('Time per Batch: %.4f' % (elapsed_time_batch))

        #=== Computing Relative Errors Validation ===#
        for batch_data_val, batch_latent_val in data_and_latent_val:
            val_step(batch_data_val, batch_latent_val)

        #=== Computing Relative Errors Test ===#
        for batch_data_test, batch_latent_test in data_and_latent_test:
            test_step(batch_data_test, batch_latent_test)

        #=== Track Training Metrics, Weights and Gradients ===#
        with summary_writer.as_default():
            tf.summary.scalar('loss_training',
                              mean_loss_train.result(),
                              step=epoch)
            tf.summary.scalar('loss_training_autoencoder',
                              mean_loss_train_autoencoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_training_encoder',
                              mean_loss_train_encoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_training_model_augmented',
                              mean_loss_train_model_augmented.result(),
                              step=epoch)
            tf.summary.scalar('loss_val', mean_loss_val.result(), step=epoch)
            tf.summary.scalar('loss_val_autoencoder',
                              mean_loss_val_autoencoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_val_encoder',
                              mean_loss_val_encoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_val_model_augmented',
                              mean_loss_val_model_augmented.result(),
                              step=epoch)
            tf.summary.scalar('loss_test', mean_loss_test.result(), step=epoch)
            tf.summary.scalar('loss_test_autoencoder',
                              mean_loss_test_autoencoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_test_encoder',
                              mean_loss_test_encoder.result(),
                              step=epoch)
            tf.summary.scalar('loss_test_model_augmented',
                              mean_loss_test_model_augmented.result(),
                              step=epoch)
            tf.summary.scalar('relative_error_data_autoencoder',
                              mean_relative_error_data_autoencoder.result(),
                              step=epoch)
            tf.summary.scalar('relative_error_data_decoder',
                              mean_relative_error_data_decoder.result(),
                              step=epoch)
            tf.summary.scalar('relative_error_latent_encoder',
                              mean_relative_error_latent_encoder.result(),
                              step=epoch)
            for w in NN.weights:
                tf.summary.histogram(w.name, w, step=epoch)
            l2_norm = lambda t: tf.sqrt(tf.reduce_sum(tf.pow(t, 2)))
            for gradient, variable in zip(gradients, NN.trainable_variables):
                tf.summary.histogram("gradients_norm/" + variable.name,
                                     l2_norm(gradient),
                                     step=epoch)

        #=== Update Storage Arrays ===#
        storage_array_loss_train = np.append(storage_array_loss_train,
                                             mean_loss_train.result())
        storage_array_loss_train_autoencoder = np.append(
            storage_array_loss_train_autoencoder,
            mean_loss_train_autoencoder.result())
        storage_array_loss_train_encoder = np.append(
            storage_array_loss_train_encoder, mean_loss_train_encoder.result())
        storage_array_loss_train_model_augmented = np.append(
            storage_array_loss_train_model_augmented,
            mean_loss_train_model_augmented.result())
        storage_array_loss_val = np.append(storage_array_loss_val,
                                           mean_loss_val.result())
        storage_array_loss_val_autoencoder = np.append(
            storage_array_loss_val_autoencoder,
            mean_loss_val_autoencoder.result())
        storage_array_loss_val_encoder = np.append(
            storage_array_loss_val_encoder, mean_loss_val_encoder.result())
        storage_array_loss_val_model_augmented = np.append(
            storage_array_loss_val_model_augmented,
            mean_loss_val_model_augmented.result())
        storage_array_loss_test = np.append(storage_array_loss_test,
                                            mean_loss_test.result())
        storage_array_loss_test_autoencoder = np.append(
            storage_array_loss_test_autoencoder,
            mean_loss_test_autoencoder.result())
        storage_array_loss_test_encoder = np.append(
            storage_array_loss_test_encoder, mean_loss_test_encoder.result())
        storage_array_loss_test_model_augmented = np.append(
            storage_array_loss_test_model_augmented,
            mean_loss_test_model_augmented.result())
        storage_array_relative_error_data_autoencoder = np.append(
            storage_array_relative_error_data_autoencoder,
            mean_relative_error_data_autoencoder.result())
        storage_array_relative_error_latent_encoder = np.append(
            storage_array_relative_error_latent_encoder,
            mean_relative_error_latent_encoder.result())
        storage_array_relative_error_data_decoder = np.append(
            storage_array_relative_error_data_decoder,
            mean_relative_error_data_decoder.result())

        #=== Display Epoch Iteration Information ===#
        elapsed_time_epoch = time.time() - start_time_epoch
        print('Time per Epoch: %.4f\n' % (elapsed_time_epoch))
        print('Train Loss: Full: %.3e, AE: %.3e, Encoder: %.3e, Aug: %.3e' %
              (mean_loss_train.result(), mean_loss_train_autoencoder.result(),
               mean_loss_train_encoder.result(),
               mean_loss_train_model_augmented.result()))
        print('Val Loss: Full: %.3e, AE: %.3e, Encoder: %.3e, Aug: %.3e' %
              (mean_loss_val.result(), mean_loss_val_autoencoder.result(),
               mean_loss_val_encoder.result(),
               mean_loss_val_model_augmented.result()))
        print('Test Loss: Full: %.3e, AE: %.3e, Encoder: %.3e, Aug: %.3e' %
              (mean_loss_test.result(), mean_loss_test_autoencoder.result(),
               mean_loss_test_encoder.result(),
               mean_loss_test_model_augmented.result()))
        print('Rel Errors: AE: %.3e, Encoder: %.3e, Decoder: %.3e\n' %
              (mean_relative_error_data_autoencoder.result(),
               mean_relative_error_latent_encoder.result(),
               mean_relative_error_data_decoder.result()))
        start_time_epoch = time.time()

        #=== Resetting Metrics ===#
        mean_loss_train.reset_states()
        mean_loss_train_autoencoder.reset_states()
        mean_loss_train_encoder.reset_states()
        mean_loss_train_model_augmented.reset_states()
        mean_loss_val.reset_states()
        mean_loss_val_autoencoder.reset_states()
        mean_loss_val_encoder.reset_states()
        mean_loss_val_model_augmented.reset_states()
        mean_loss_test.reset_states()
        mean_loss_test_autoencoder.reset_states()
        mean_loss_test_encoder.reset_states()
        mean_loss_test_model_augmented.reset_states()
        mean_relative_error_data_autoencoder.reset_states()
        mean_relative_error_latent_encoder.reset_states()
        mean_relative_error_data_decoder.reset_states()

    #=== Save Final Model ===#
    NN.save_weights(file_paths.NN_savefile_name)
    print('Final Model Saved')

    return storage_array_loss_train, storage_array_loss_train_autoencoder, storage_array_loss_train_encoder, storage_array_loss_train_model_augmented, storage_array_loss_val, storage_array_loss_val_autoencoder, storage_array_loss_val_encoder, storage_array_loss_val_model_augmented, storage_array_loss_test, storage_array_loss_test_autoencoder, storage_array_loss_test_encoder, storage_array_loss_test_model_augmented, storage_array_relative_error_data_autoencoder, storage_array_relative_error_latent_encoder, storage_array_relative_error_data_decoder
Пример #4
0
def plot_and_save(hyperp, run_options, file_paths, fig_size):
    ###############################################################################
    #                     Form Fenics Domain and Load Predictions                 #
    ###############################################################################
    #=== Form Fenics Domain ===#
    if run_options.fin_dimensions_2D == 1:
        V, _ = get_space_2D(40)
    if run_options.fin_dimensions_3D == 1:
        V, mesh = get_space_3D(40)

    solver = Fin(V)

    #=== Load Observation Indices, Test and Predicted Parameters and State ===#
    df_obs_indices = pd.read_csv(file_paths.observation_indices_savefilepath +
                                 '.csv')
    obs_indices = df_obs_indices.to_numpy()

    df_parameter_test = pd.read_csv(file_paths.savefile_name_parameter_test +
                                    '.csv')
    parameter_test = df_parameter_test.to_numpy()
    df_parameter_pred = pd.read_csv(file_paths.savefile_name_parameter_pred +
                                    '.csv')
    parameter_pred = df_parameter_pred.to_numpy()

    df_state_pred = pd.read_csv(file_paths.savefile_name_state_pred + '.csv')
    state_pred = df_state_pred.to_numpy()

    ###############################################################################
    #                             Plotting Predictions                            #
    ###############################################################################
    #=== Converting Test Parameter Into Dolfin Object and Computed State Observation ===#
    if run_options.data_thermal_fin_nine == 1:
        parameter_test_dl = solver.nine_param_to_function(parameter_test)
        if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_test_dl.vector().get_local()
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind - 1]
            parameter_test_dl = convert_array_to_dolfin_function(
                V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_test_dl = convert_array_to_dolfin_function(V, parameter_test)

    state_test_dl, _ = solver.forward(
        parameter_test_dl)  # generate true state for comparison
    state_test = state_test_dl.vector().get_local()
    if hyperp.data_type == 'bnd':
        state_test = state_test[obs_indices].flatten()

    #=== Plotting Test Parameter and Test State ===#
    if run_options.fin_dimensions_2D == 1:
        p_test_fig, ax = plot_2D(parameter_test_dl, 'True Parameter', fig_size)
    if run_options.fin_dimensions_3D == 1:
        p_test_fig = plot_3D(parameter_test_dl,
                             'True Parameter',
                             angle_1=90,
                             angle_2=270)
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    plt.colorbar(p_test_fig, cax=cax)
    plt.savefig(file_paths.figures_savefile_name_parameter_test,
                dpi=300,
                bbox_inches='tight',
                pad_inches=0)
    print('Figure saved to ' + file_paths.figures_savefile_name_parameter_test)
    plt.show()

    if hyperp.data_type == 'full':  # No state prediction for bnd only data
        if run_options.fin_dimensions_2D == 1:
            s_test_fig, ax = plot_2D(state_test_dl, 'True State', fig_size)
        if run_options.fin_dimensions_3D == 1:
            s_test_fig = plot_3D(state_test_dl,
                                 'True State',
                                 angle_1=90,
                                 angle_2=270)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(s_test_fig, cax=cax)
        plt.savefig(file_paths.figures_savefile_name_state_test,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_test)
        plt.show()

    #=== Converting Predicted Parameter into Dolfin Object ===#
    if run_options.data_thermal_fin_nine == 1:
        parameter_pred_dl = solver.nine_param_to_function(parameter_pred)
        if run_options.fin_dimensions_3D == 1:  # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_pred_dl.vector().get_local()
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind - 1]
            parameter_pred_dl = convert_array_to_dolfin_function(
                V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_pred_dl = convert_array_to_dolfin_function(V, parameter_pred)

    #=== Plotting Predicted Parameter and State ===#
    if run_options.fin_dimensions_2D == 1:
        p_pred_fig, ax = plot_2D(parameter_pred_dl,
                                 'Decoder Estimation of True Parameter',
                                 fig_size)
    if run_options.fin_dimensions_3D == 1:
        p_pred_fig = plot_3D(parameter_pred_dl,
                             'Decoder Estimation of True Parameter',
                             angle_1=90,
                             angle_2=270)
    divider = make_axes_locatable(ax)
    cax = divider.append_axes("right", size="5%", pad=0.05)
    plt.colorbar(p_test_fig, cax=cax)
    plt.savefig(file_paths.figures_savefile_name_parameter_pred,
                dpi=300,
                bbox_inches='tight',
                pad_inches=0)
    print('Figure saved to ' + file_paths.figures_savefile_name_parameter_pred)
    plt.show()
    parameter_pred_error = np.linalg.norm(parameter_pred - parameter_test,
                                          2) / np.linalg.norm(
                                              parameter_test, 2)
    print('Parameter prediction relative error: %.7f' % parameter_pred_error)

    if hyperp.data_type == 'full':  # No visualization of state prediction if the truncation layer only consists of the boundary observations
        state_pred_dl = convert_array_to_dolfin_function(V, state_pred)
        if run_options.fin_dimensions_2D == 1:
            s_pred_fig, ax = plot_2D(state_pred_dl,
                                     'Encoder Estimation of True State',
                                     fig_size)
        if run_options.fin_dimensions_3D == 1:
            s_pred_fig = plot_3D(state_pred_dl,
                                 'Encoder Estimation of True State',
                                 angle_1=90,
                                 angle_2=270)
        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(s_test_fig, cax=cax)
        plt.savefig(file_paths.figures_savefile_name_state_pred,
                    dpi=300,
                    bbox_inches='tight',
                    pad_inches=0)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_pred)
        plt.show()
    state_pred_error = np.linalg.norm(state_pred - state_test,
                                      2) / np.linalg.norm(state_test, 2)
    print('State observation prediction relative error: %.7f' %
          state_pred_error)

    ###############################################################################
    #                               Plotting Metrics                              #
    ###############################################################################
    df_metrics = pd.read_csv(file_paths.NN_savefile_name + "_metrics" + '.csv')
    array_metrics = df_metrics.to_numpy()
    x_axis = np.linspace(1,
                         hyperp.num_epochs - 1,
                         hyperp.num_epochs - 1,
                         endpoint=True)

    ######################
    #  Autoencoder Loss  #
    ######################
    fig_loss = plt.figure()
    print('Loading Metrics')
    storage_loss_array = array_metrics[1:, 0]
    plt.plot(x_axis, np.log(storage_loss_array), label='Log-Loss')

    #=== Figure Properties ===#
    plt.title('Training Log-Loss of Autoencoder')
    plt.xlabel('Epochs')
    plt.ylabel('Log-Loss')
    #plt.axis([0,30,1.5,3])
    plt.legend()

    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'loss' + '_autoencoder_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name, bbox_inches='tight', pad_inches=0)
    plt.close(fig_loss)

    ####################
    #  Parameter Loss  #
    ####################
    fig_loss = plt.figure()
    print('Loading Metrics')
    storage_parameter_loss_array = array_metrics[1:, 1]
    plt.plot(x_axis, np.log(storage_parameter_loss_array), label='Log-Loss')

    #=== Figure Properties ===#
    plt.title('Training Log-Loss of Parameter Data')
    plt.xlabel('Epochs')
    plt.ylabel('Log-Loss')
    #plt.axis([0,30,1.5,3])
    plt.legend()

    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'loss' + '_parameter_data_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name, bbox_inches='tight', pad_inches=0)
    plt.close(fig_loss)

    ################
    #  State Loss  #
    ################
    fig_loss = plt.figure()
    print('Loading Metrics')
    storage_state_loss_array = array_metrics[1:, 2]
    plt.plot(x_axis, np.log(storage_state_loss_array), label='Log-Loss')

    #=== Figure Properties ===#
    plt.title('Training Log-Loss of State Data')
    plt.xlabel('Epochs')
    plt.ylabel('Log-Loss')
    #plt.axis([0,30,1.5,3])
    plt.legend()

    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'loss' + '_state_data_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name, bbox_inches='tight', pad_inches=0)
    plt.close(fig_loss)

    ##############################
    #  Parameter Relative Error  #
    ##############################
    fig_loss = plt.figure()
    print('Loading Metrics')
    storage_parameter_relative_error = array_metrics[1:, 7]
    plt.plot(x_axis, storage_parameter_relative_error, label='Relative Error')

    #=== Figure Properties ===#
    plt.title('Relative Error of Parameter Prediction')
    plt.xlabel('Epochs')
    plt.ylabel('Relative Error')
    #plt.axis([0,30,1.5,3])
    plt.legend()

    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'relative_error' + '_parameter_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name, bbox_inches='tight', pad_inches=0)
    plt.close(fig_loss)

    ##########################
    #  State Relative Error  #
    ##########################
    fig_loss = plt.figure()
    print('Loading Metrics')
    storage_state_relative_error = array_metrics[1:, 8]
    plt.plot(x_axis, storage_state_relative_error, label='Relative Error')

    #=== Figure Properties ===#
    plt.title('Relative Error of State Prediction')
    plt.xlabel('Epochs')
    plt.ylabel('Relative Error')
    #plt.axis([0,30,1.5,3])
    plt.legend()

    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'relative_error' + '_state_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name, bbox_inches='tight', pad_inches=0)
    plt.close(fig_loss)
def plot_and_save_figures(hyperp, run_options, file_paths):
###############################################################################
#                     Form Fenics Domain and Load Predictions                 #
###############################################################################
    #=== Form Fenics Domain ===#
    if run_options.fin_dimensions_2D == 1:
        V,_ = get_space_2D(40)
    if run_options.fin_dimensions_3D == 1:
        V, mesh = get_space_3D(40)
    
    solver = Fin(V) 
    
    #=== Load Observation Indices, Test and Predicted State ===#
    df_obs_indices = pd.read_csv(file_paths.observation_indices_savefilepath + '.csv')    
    obs_indices = df_obs_indices.to_numpy() 
    
    df_parameter_test = pd.read_csv(file_paths.savefile_name_parameter_test + '.csv')
    parameter_test = df_parameter_test.to_numpy()
    
    df_state_pred = pd.read_csv(file_paths.savefile_name_state_pred + '.csv')
    state_pred = df_state_pred.to_numpy()
    
###############################################################################
#                             Plotting Predictions                            #
###############################################################################
    #=== Converting Test Parameter Into Dolfin Object and Computed State Observation ===#       
    if run_options.data_thermal_fin_nine == 1:
        parameter_test_dl = solver.nine_param_to_function(parameter_test)
        if run_options.fin_dimensions_3D == 1: # Interpolation messes up sometimes and makes some values equal 0
            parameter_values = parameter_test_dl.vector().get_local()  
            zero_indices = np.where(parameter_values == 0)[0]
            for ind in zero_indices:
                parameter_values[ind] = parameter_values[ind-1]
            parameter_test_dl = convert_array_to_dolfin_function(V, parameter_values)
    if run_options.data_thermal_fin_vary == 1:
        parameter_test_dl = convert_array_to_dolfin_function(V,parameter_test)
    
    state_test_dl, _ = solver.forward(parameter_test_dl) # generate true state for comparison
    state_test = state_test_dl.vector().get_local()    
    if hyperp.data_type == 'bnd':
        state_test = state_test[obs_indices].flatten()
    
    #=== Plotting Test Parameter and Test State ===#  
    if run_options.fin_dimensions_2D == 1:
        p_test_fig = dl.plot(parameter_test_dl)
        p_test_fig.ax.set_title('True Parameter', fontsize=13)  
    if run_options.fin_dimensions_3D == 1:
        p_test_fig = plot_3D(parameter_test_dl, 'True Parameter', angle_1 = 90, angle_2 = 270)
    plt.colorbar(p_test_fig)
    plt.savefig(file_paths.figures_savefile_name_parameter_test, dpi=300)
    print('Figure saved to ' + file_paths.figures_savefile_name_parameter_test)   
    plt.show()
    
    if hyperp.data_type == 'full': # No state prediction for bnd only data
        if run_options.fin_dimensions_2D == 1:
            s_test_fig = dl.plot(state_test_dl)
            s_test_fig.ax.set_title('True State', fontsize=13) 
        if run_options.fin_dimensions_3D == 1:
            s_test_fig = plot_3D(state_test_dl, 'True State', angle_1 = 90, angle_2 = 270)
        plt.colorbar(s_test_fig)
        plt.savefig(file_paths.figures_savefile_name_state_test, dpi=300)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_test) 
        plt.show()
    
    #=== Plotting Predicted State ===#    
    if hyperp.data_type == 'full': # No visualization of state prediction if the truncation layer only consists of the boundary observations
        state_pred_dl = convert_array_to_dolfin_function(V, state_pred)
        if run_options.fin_dimensions_2D == 1:
            s_pred_fig = dl.plot(state_pred_dl)
            s_pred_fig.ax.set_title('Encoder Estimation of True State', fontsize=13)  
        if run_options.fin_dimensions_3D == 1:
            s_pred_fig = plot_3D(state_pred_dl, 'Encoder Estimation of True State', angle_1 = 90, angle_2 = 270)
        plt.colorbar(s_test_fig)
        plt.savefig(file_paths.figures_savefile_name_state_pred, dpi=300)
        print('Figure saved to ' + file_paths.figures_savefile_name_state_pred) 
        plt.show()
    state_pred_error = np.linalg.norm(state_pred - state_test,2)/np.linalg.norm(state_test,2)
    print('State observation prediction relative error: %.7f' %state_pred_error)

###############################################################################
#                               Plotting Metrics                              #
###############################################################################     
    plt.ioff() # Turn interactive plotting off
    first_trainable_hidden_layer_index = 2  
    marker_list = ['+', '*', 'x', 'D', 'o', '.', 'h']
    
############
#   Loss   #
############    
    #=== Plot and Save Losses===#
    fig_loss = plt.figure()
    x_axis = np.linspace(1, hyperp.num_epochs-1, hyperp.num_epochs-1, endpoint = True)
    for l in range(first_trainable_hidden_layer_index, hyperp.max_hidden_layers):
        #=== Load Metrics and Plot ===#
        print('Loading Metrics for Hidden Layer %d' %(l))
        df_metrics = pd.read_csv(file_paths.NN_savefile_name + "_metrics_hl" + str(l) + '.csv')
        array_metrics = df_metrics.to_numpy()
        storage_loss_array = array_metrics[2:,0]
        plt.plot(x_axis, np.log(storage_loss_array), label = 'hl' + str(l), marker = marker_list[l-2])
        
    #=== Figure Properties ===#   
    plt.title('Training Log-Loss')
    #plt.title(file_paths.filename)
    plt.xlabel('Epochs')
    plt.ylabel('Log-Loss')
    #plt.axis([0,30,1.5,3])
    plt.legend()
    
    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'loss' + '_all_layers_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name)
    plt.close(fig_loss)

################
#   Accuracy   #
################
    fig_accuracy = plt.figure()
    x_axis = np.linspace(1, hyperp.num_epochs-1, hyperp.num_epochs-1, endpoint = True)
    for l in range(first_trainable_hidden_layer_index, hyperp.max_hidden_layers):
        #=== Load Metrics and Plot ===#
        print('Loading Metrics for Hidden Layer %d' %(l))
        df_metrics = pd.read_csv(file_paths.NN_savefile_name + "_metrics_hl" + str(l) + '.csv')
        array_metrics = df_metrics.to_numpy()
        storage_accuracy_array = array_metrics[2:,1]
        plt.plot(x_axis, storage_accuracy_array, label = 'hl' + str(l), marker = marker_list[l-2])
        
    #=== Figure Properties ===#   
    plt.title('Testing Accuracy')
    #plt.title(file_paths.filename)
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    #plt.axis([0,30,0.9,1])
    plt.legend()
    
    #=== Saving Figure ===#
    figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'accuracy' + '_all_layers_' + file_paths.filename + '.png'
    plt.savefig(figures_savefile_name)
    plt.close(fig_accuracy)

################################
#   Relative Number of Zeros   #
################################     
    #=== Load Metrics and Plot ===#
    print('Loading relative number of zeros .csv file')
    try:
        df_rel_zeros = pd.read_csv(file_paths.NN_savefile_name + "_relzeros" + '.csv')
        rel_zeros_array = df_rel_zeros.to_numpy()
        rel_zeros_array = rel_zeros_array.flatten()
    except:
        print('No relative number of zeros .csv file!')
    rel_zeros_array_exists = 'rel_zeros_array' in locals() or 'rel_zeros_array' in globals()
    
    if rel_zeros_array_exists:
        #=== Figure Properties ===# 
        fig_accuracy = plt.figure()
        x_axis = np.linspace(2, hyperp.max_hidden_layers-1, hyperp.max_hidden_layers-2, endpoint = True)
        plt.plot(x_axis, rel_zeros_array, label = 'relative # of 0s')
        plt.title(file_paths.filename)
        plt.xlabel('Layer Number')
        plt.ylabel('Number of Zeros')
        plt.legend()
        
        #=== Saving Figure ===#
        figures_savefile_name = file_paths.figures_savefile_directory + '/' + 'rel_num_zeros_' + file_paths.filename + '.png'
        plt.savefig(figures_savefile_name)
        plt.close(fig_accuracy)