def eval_metric(name, version, metric_name):
    '''
    Function to evaluate a trained model on a chosen metric.
    Training and validation metric are plotted in a
    line chart for each epoch.

    Parameters:
        name : name of the model
        version : version of the model
        history : model training history
        metric_name : loss or accuracy
    Output:
        line chart with epochs of x-axis and metric on
        y-axis
    '''
    cnn = CNN(load_models=name, version=version)
    history = cnn.load_losses(fname=name)

    metric = history[metric_name]
    val_metric = history['val_' + metric_name]
    print(np.shape(metric)[0])
    #e = range(1, NB_EPOCHS + 1)

    plt.plot(metric, 'bo', label='Train ' + metric_name)
    plt.plot(val_metric, 'b', label='Validation ' + metric_name)
    plt.xlabel('Epoch number')
    plt.ylabel(metric_name)
    plt.title('Comparing training and validation ' + metric_name + ' for ')
    plt.legend()
    plt.show()
def main():
    parser = argparse.ArgumentParser("MNIST - TensorFlow 2")
    parser.add_argument(
        "--pickle",
        action="store_true",
        help="Loads / stores data to pickle for faster loading")
    args = parser.parse_args()

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    for physical_device in physical_devices:
        tf.config.experimental.set_memory_growth(physical_device, True)

    if not DataConfig.KEEP_TB:
        shutil.rmtree(DataConfig.TB_DIR, ignore_errors=True)
        while os.path.exists(DataConfig.TB_DIR):
            pass
    os.makedirs(DataConfig.TB_DIR, exist_ok=True)

    if DataConfig.USE_CHECKPOINT:
        if not DataConfig.KEEP_CHECKPOINTS:
            shutil.rmtree(DataConfig.CHECKPOINT_DIR, ignore_errors=True)
            while os.path.exists(DataConfig.CHECKPOINT_DIR):
                pass
        os.makedirs(DataConfig.CHECKPOINT_DIR, exist_ok=True)

        # Makes a copy of all the code (and config) so that the checkpoints are easy to load and use
        output_folder = os.path.join(DataConfig.CHECKPOINT_DIR,
                                     "MNIST-TensorFlow")
        for filepath in glob.glob(os.path.join("**", "*.py"), recursive=True):
            destination_path = os.path.join(output_folder, filepath)
            os.makedirs(os.path.dirname(destination_path), exist_ok=True)
            shutil.copy(filepath, destination_path)
        misc_files = ["README.md", "requirements.txt", "setup.cfg"]
        for misc_file in misc_files:
            shutil.copy(misc_file, os.path.join(output_folder, misc_file))
        print("Finished copying files")

    dataset = DatasetCreator(DataConfig.DATA_PATH,
                             DataConfig.DATASET,
                             batch_size=ModelConfig.BATCH_SIZE,
                             cache=False,
                             pickle=args.pickle)

    if ModelConfig.NETWORK_NAME == "CNN":
        model = CNN(dataset.input_shape, dataset.classes_nb)
    elif ModelConfig.NETWORK_NAME == "SmallMobileNet":
        model = SmallMobileNet(dataset.input_shape, dataset.classes_nb)
    elif ModelConfig.NETWORK_NAME == "MobileNetV2":
        model = MobileNetV2(dataset.input_shape, dataset.classes_nb)
    model.build((None, *dataset.input_shape))

    print(model.summary())

    train(model, dataset)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import context

from src.models import cnn
from src.networks.cnn import CNN

if __name__ == '__main__':
    # creation and training of the CNN, to process the data. the pre-processing
    # of the data is done using the previous encoded dataset
    cnn1 = CNN(model=cnn.Model1((100, 200, 1), 200),
               batch_size=4,
               dataset_path='.')  # Adapt to your path
    cnn1.compile()
    hcnn1 = cnn1.fit(epochs=10, repeat=1, fname='cnn-Model1')

    # saving of the computed network metrics (only the CNN part)
    cnn1.save_losses(hcnn1, 'cnn-Model1')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

######################################################################
# Train a new CNN on encoded or not TS                               #
# prediction with cnn                                                #
######################################################################

import context
import tensorflow as tf
from src.models import cnn
from src.networks.cnn import CNN

if __name__ == '__main__':
    # creation and training of the CNN, to process the data. the pre-processing
    # of the data is done using the previous encoded dataset
    name = 'Model2_test15'
    cnn1 = CNN(model=cnn.Model2((130, 480, 1), 480), batch_size=64, dataset_path='../GREGOIRE/dataset_new', encoded=True) # Adapt to your path
    run_opts = tf.RunOptions(report_tensor_allocations_upon_oom=True)
    cnn1.compile()
    hcnn1 = cnn1.fit(epochs=50, repeat=1, fname=('cnn-'+name))

    # saving of the computed network metrics (only the CNN part)
    cnn1.save_losses(hcnn1, 'cnn-'+name)

Пример #5
0
import numpy as np
import matplotlib.pyplot as plt

from src.networks.cnn import CNN
from src.networks.autoencoder import AutoEncoder

if __name__ == '__main__':

    # Variables
    dataset = 'datatest2'
    model = 'Model2_test15'
    version_model = 0

    # load an already trained cnn model
    ae1 = AutoEncoder(load_models=('encoder-' + 'Model1_test15'), version=0)
    cnn1 = CNN(load_models=('cnn-' + model), version=version_model)

    # predict cnn, 20 predict
    ts_names = sorted(glob.glob(dataset + '/train_TS/*.npy'))
    bathy_names = sorted(glob.glob(dataset + '/train_GT/*.npy'))

    x = np.arange(0, 480, 1)  # cross-shore

    shift = 40
    for i in range(20):
        plt.subplot(4, 5, i + 1)

        ts_origi = np.load(ts_names[i + shift])[200:720, :]  # croping
        width, height = ts_origi.shape
        ts_origi = np.array([ts_origi])
        real_bathy = np.load(bathy_names[i + shift])
Пример #6
0
    def browse_m(self, ae=False):
        """Action of the Choose model button.
		
		Description: browse the selected encoder model and the selected cnn model and then
					 update the predicted bathymetry in consequence.
		
		"""
        # Get the encoder filename
        encoder_filename = QFileDialog.getOpenFileName(None,
                                                       'Find trained encoder',
                                                       'saves/weights/',
                                                       '(*.h5)')[0]

        # Get the encoder model name and version
        encoder_path = encoder_filename.split('.')[0]
        encoder_name = encoder_path.split('/')[-1]
        encoder_version = int(encoder_filename.split('.')[1])

        # Get the cnn model filename
        model_filename = QFileDialog.getOpenFileName(None,
                                                     'Find trained model',
                                                     'saves/weights/',
                                                     '(*.h5)')[0]

        # Get the cnn model name and version
        model_path = model_filename.split('.')[0]
        model_name = model_path.split('/')[-1]
        model_version = int(model_filename.split('.')[1])

        if encoder_filename and model_filename:
            # Create the encoder and the cnn
            ae1 = AutoEncoder(load_models=encoder_name,
                              version=encoder_version)
            cnn1 = CNN(load_models=model_name, version=model_version)

            # Adjust the timestack shape
            ts_origi = self.b_canvas.ts  #[200:]  # croping
            width, height = ts_origi.shape
            ts_origi = np.array([ts_origi])

            # Predict the encoded the timestack (= encode the timestack)
            ts_enc = ae1.predict(ts_origi.reshape(len(ts_origi), width, height,
                                                  1),
                                 batch_size=1)
            a, width, height = ts_enc.shape
            ts_enc = np.array([ts_enc])

            # Predict the bathymetry
            self.b_canvas.bath_pred = cnn1.predict(ts_enc.reshape(
                len(ts_enc), width, height, 1),
                                                   batch_size=1,
                                                   smooth=True).flatten()

            # Update the display
            self.b_canvas.pred = True
            self.b_canvas.plot(bath_path=self.bath)

            max_height_error = max(
                abs(self.b_canvas.bath - self.b_canvas.bath_pred))
            self.text1.selectAll()
            self.text1.textCursor().clearSelection()
            self.text1.textCursor().insertText(
                'max height error : ' + str(round(max_height_error, 2)) + ' m')

            mean_height_error = np.mean(
                abs(self.b_canvas.bath - self.b_canvas.bath_pred))
            self.text2.selectAll()
            self.text2.textCursor().clearSelection()
            self.text2.textCursor().insertText(
                'mean height error : ' + str(round(mean_height_error, 2)) +
                ' m')

            corr_coef = np.corrcoef(self.b_canvas.bath,
                                    self.b_canvas.bath_pred)
            self.text3.selectAll()
            self.text3.textCursor().clearSelection()
            self.text3.textCursor().insertText('correlation coef : ' +
                                               str(round(corr_coef[0, 1], 5)))
import numpy as np
import matplotlib.pyplot as plt

from src.networks.cnn import CNN
from src.networks.autoencoder import AutoEncoder


if __name__ == '__main__':
    # Variables
    dataset = '../../dataset_new'
    model = 'Model2_test17'
    model_encoder = 'Model1_test15'

    # load an already trained cnn model
    ae1 = AutoEncoder(load_models=('encoder-' + model_encoder), version=0)
    cnn0 = CNN(load_models=('cnn-' + model), version=0)
    # cnn1 = CNN(load_models=('cnn-' + model), version=1)
    # cnn2 = CNN(load_models=('cnn-' + model), version=2)
    # cnn3 = CNN(load_models=('cnn-' + model), version=3)
    # cnn4 = CNN(load_models=('cnn-' + model), version=4)
    # cnn5 = CNN(load_models=('cnn-' + model), version=5)
    # cnn6 = CNN(load_models=('cnn-' + model), version=6)
    # cnn7 = CNN(load_models=('cnn-' + model), version=7)
    # cnn8 = CNN(load_models=('cnn-' + model), version=8)
    # cnn9 = CNN(load_models=('cnn-' + model), version=9)

    x = np.arange(0, 480, 1)  # cross-shore

    # predict cnn
    n = '15353'
    ts_origi = np.load(dataset+'/train_TS/TS_'+n+'.npy')  # [200:]	# croping
# Plot 1 figure with 3 different bathy on 8 different wave condition #
# prediction with cnn                                                #
######################################################################

import glob
import context
import numpy as np
import matplotlib.pyplot as plt

from src.networks.cnn import CNN
from src.networks.autoencoder import AutoEncoder

if __name__ == '__main__':
    # load an already trained cnn model
    ae1 = AutoEncoder(load_models='encoder-Model1_test6', version=0)
    cnn1 = CNN(load_models='cnn-Model1_test6', version=2)

    # predict cnn, 20 predict
    ts_names = sorted(glob.glob('../dataset_test/train_TS/*.npy'))
    bathy_names = sorted(glob.glob('../dataset_test/train_GT/*.npy'))

    x = np.arange(0, 480, 1)  # cross-shore

    shift = 48
    for i in range(24):
        plt.subplot(3, 8, i + 1)

        ts_origi = np.load(ts_names[i + shift])  # croping
        width, height = ts_origi.shape
        ts_origi = np.array([ts_origi])
        real_bathy = np.load(bathy_names[i + shift])
Пример #9
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import context
import numpy as np
import matplotlib.pyplot as plt

from src.models import cnn
from src.networks.cnn import CNN

if __name__ == '__main__':
    # load an already trained cnn model
    cnn1 = CNN(load_models='cnn-Model1', version=0)

    # predict cnn
    ts_enc = np.load('dataset/train_encoded_TS/TS_00001.npy')
    width, height = ts_enc.shape
    ts_enc = np.array([ts_enc])
    bathy = cnn1.predict(ts_enc.reshape(len(ts_enc), width, height, 1),
                         batch_size=1)
    plt.subplot(1, 2, 1)
    plt.imshow(ts_enc[0])
    plt.subplot(1, 2, 2)
    plt.plot(bathy[0])
    plt.show()
Пример #10
0
    x = np.arange(-99, 600 - 99, 1)  # cross-shore
    y = np.arange(0, 100, 1)  # long-shore
    X, Y = np.meshgrid(x, y)

    # bathy_nc = netCDFFile('dataset_2D/dep.nc')
    # bathy = bathy_nc.variables['depth']
    # bathy = -np.array(bathy)
    bathy = np.load('../../')
    print(bathy.shape)
    ts_nc = netCDFFile('dataset_2D/eta.nc')
    ts = ts_nc.variables['eta']
    ts = np.array(ts)
    print(ts.shape)

    ae1 = AutoEncoder(load_models='encoder-Model1_test13', version=0)
    cnn1 = CNN(load_models='cnn-Model2_test14', version=0)

    bathy_pred = np.zeros([100, 480])
    for i in range(bathy.shape[0]):
        bath1D = bathy[i, 21:501]
        ts1D = ts[200:720, i, 21:501]
        width, height = ts1D.shape
        ts1D = np.array([ts1D])

        ts1D_enc = ae1.predict(ts1D.reshape(len(ts1D), width, height, 1),
                               batch_size=1)
        a, width, height = ts1D_enc.shape
        ts1D_enc = np.array([ts1D_enc])

        bathy_pred[i, :] = cnn1.predict(ts1D_enc.reshape(
            len(ts1D_enc), width, height, 1),