Ejemplo n.º 1
0
def normalize(X, normalize_type, input_shape=None):

    X_norm = X.copy()
    norm_params = None

    if normalize_type is not None:

        # Min-max rescaling
        if normalize_type == 'min_max':
            min = np.min(X_norm, axis=1).reshape(-1, 1)
            max = np.max(X_norm, axis=1).reshape(-1, 1)
            X_norm = (X_norm - min) \
                / (max - min)
            norm_params = {'min': min, 'max': max}

        # Avg_std rescaling
        elif normalize_type == 'avg_std':
            avg = np.mean(X_norm, axis=1).reshape(-1, 1)
            std = np.std(X_norm, axis=1).reshape(-1, 1)
            X_norm = (X_norm - avg) \
                / std
            norm_params = {'avg': avg, 'std': std}

        # Avg_std rescaling (f-band-wise split)
        elif normalize_type == 'avg_std_fband_split':
            assert input_shape is not None
            X_norm = X_norm.reshape(X_norm.shape[0], *input_shape)
            avg = np.mean(X_norm, axis=2)
            std = np.std(X_norm, axis=2)
            std[std == 0] = 1
            avg = avg.reshape(*avg.shape, 1)
            std = std.reshape(*std.shape, 1)
            X_norm = (X_norm - avg) \
                / std
            X_norm = X_norm.reshape(X_norm.shape[0], -1)
            norm_params = {'avg': avg, 'std': std}

        # Avg_std rescaling (t-band-wise split)
        elif normalize_type == 'avg_std_tband_split':
            assert input_shape is not None
            X_norm = X_norm.reshape(X_norm.shape[0], *input_shape)
            avg = np.mean(X_norm, axis=1)
            std = np.std(X_norm, axis=1)
            std[std == 0] = 1
            avg = avg.reshape(avg.shape[0], 1, avg.shape[1])
            std = std.reshape(std.shape[0], 1, std.shape[1])
            X_norm = (X_norm - avg) \
                / std
            X_norm = X_norm.reshape(X_norm.shape[0], -1)
            norm_params = {'avg': avg, 'std': std}

        else:
            raise utils.UserError(
                f'Parameter normalize_type={normalize_type} is not allowed.')

    return X_norm, norm_params
Ejemplo n.º 2
0
 def custom_loss(yTrue, yPred):
     x = K.flatten(yTrue)
     z_decoded = K.flatten(yPred)
     # Reconstruction loss (MSE loss)
     if loss_type == 'mse':
         loss = keras.metrics.mean_squared_error(x, z_decoded)
     # Reconstruction loss (MAE loss)
     elif loss_type == 'mae':
         loss = keras.metrics.mean_absolute_error(x, z_decoded)
     else:
         raise utils.UserError(f'Parameter loss_type={loss_type} is not allowed.')
     return K.mean(loss)
Ejemplo n.º 3
0
def inv_normalize(X_norm, normalize_type, norm_params, input_shape=None):

    X = X_norm.copy()

    if normalize_type is not None:

        # Min-max denormalization
        if normalize_type == 'min_max':
            assert list(norm_params.keys()) == ['min', 'max']
            min, max = norm_params['min'], norm_params['max']
            X = min + X * (max - min)

        # Avg_std denormalization
        elif normalize_type == 'avg_std':
            assert list(norm_params.keys()) == ['avg', 'std']
            avg, std = norm_params['avg'], norm_params['std']
            X = avg + X * std

        # Avg_std denormalization (f-band-wise split)
        elif normalize_type == 'avg_std_fband_split':
            assert input_shape is not None
            assert list(norm_params.keys()) == ['avg', 'std']
            avg, std = norm_params['avg'], norm_params['std']
            X = X.reshape(X.shape[0], *input_shape)
            X = avg + X * std
            X = X.reshape(X.shape[0], -1)

        # Avg_std denormalization (t-band-wise split)
        elif normalize_type == 'avg_std_tband_split':
            assert input_shape is not None
            assert list(norm_params.keys()) == ['avg', 'std']
            avg, std = norm_params['avg'], norm_params['std']
            X = X.reshape(X.shape[0], *input_shape)
            X = avg + X * std
            X = X.reshape(X.shape[0], -1)

        else:
            raise utils.UserError(
                f'Parameter normalize_type={normalize_type} is not allowed.')

    return X
Ejemplo n.º 4
0
import json
import os
import shutil

import numpy as np

import utils.utils as utils
import utils.data_preprocessing as data_preprocessing

if __name__ == '__main__':

    import sys

    if len(sys.argv) != 2:
        raise utils.UserError(
            f"Illegal command. Please type \'>> python <python_script.py> <params_file.json>\'."
        )

    # Load parameters from json file
    with open(sys.argv[1]) as f:
        PREPROCESSING_PARAMS = json.load(f)

    # Retrieve paths from parameters
    source_dir = PREPROCESSING_PARAMS['source_dir']
    target_dir = PREPROCESSING_PARAMS['target_dir']

    # Check source directory.
    if not os.path.exists(source_dir):
        raise utils.UserError(
            f"Parameter source_dir={source_dir} not allowed. The path does not exists."
        )
Ejemplo n.º 5
0
def build_decoder(decoder_type, decoder_output, input_shape, latent_dim):

    # decoder_input = layers.Input(K.int_shape(z)[1:])
    decoder_input = layers.Input(shape=(latent_dim, ), name='decoder_input')

    if decoder_type == 'fc_1':
        x = layers.Dense(32,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_5')(decoder_input)
        x = layers.Dense(128,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_4')(x)
        x = layers.Dense(512,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_3')(x)
        x = layers.Dense(1024,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_2')(x)
        x = layers.Dense(4000,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_1')(x)

    elif decoder_type == 'fc_2':
        x = layers.Dense(32,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_5')(decoder_input)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)
        x = layers.Dense(128,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_4')(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.25)(x)
        x = layers.Dense(512,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_3')(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)
        x = layers.Dense(2048,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_2')(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.25)(x)
        x = layers.Dense(8192,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_1')(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)

    elif decoder_type == 'fc_3':
        x = layers.Dense(32, activation=tf.nn.leaky_relu)(decoder_input)
        x = layers.Dense(128, activation=tf.nn.leaky_relu)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.25)(x)
        x = layers.Dense(512, activation=tf.nn.leaky_relu)(x)
        x = layers.Dense(1024, activation=tf.nn.leaky_relu)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)
        x = layers.Dense(2048, activation=tf.nn.leaky_relu)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)
        x = layers.Dense(4000, activation=tf.nn.leaky_relu)(x)

    elif decoder_type == 'fc_4':
        x = layers.Dense(256, activation=tf.nn.leaky_relu)(decoder_input)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.25)(x)
        x = layers.Dense(512, activation=tf.nn.leaky_relu)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.5)(x)
        x = layers.Dense(1024, activation=tf.nn.leaky_relu)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Dropout(.25)(x)
        x = layers.Dense(2048, activation=tf.nn.leaky_relu)(x)

    else:
        raise utils.UserError(
            f'Parameter decoder_type={decoder_type} is not allowed.')

    input_length = np.prod(input_shape)

    if decoder_output == 'leaky_relu':
        x = layers.Dense(input_length,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_0')(x)
    elif decoder_output == 'tanh':
        x = layers.Dense(input_length,
                         activation=tf.nn.tanh,
                         name='dense_tanh')(x)
    elif decoder_output == 'linear':
        x = layers.Dense(input_length,
                         activation='linear',
                         name='dense_linear')(x)
    elif decoder_output == 'sigmoid':
        x = layers.Dense(input_length,
                         activation='sigmoid',
                         name='dense_sigmoid')(x)
    else:
        raise utils.UserError(
            f'Parameter decoder_type={decoder_type} is not allowed.')

    # decoder model statement
    decoder = Model(decoder_input, x, name='decoder_{}'.format(decoder_type))

    return decoder
Ejemplo n.º 6
0
def build_encoder(encoder_type, encoder_output, input_shape, latent_dim):

    encoder = keras.Sequential(name=f'encoder_{encoder_type}')

    # FC encoder 1
    if encoder_type == 'fc_1':
        encoder.add(
            layers.Dense(4000,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_1'))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(
            layers.Dense(1024,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_2'))
        encoder.add(
            layers.Dense(512,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_3'))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(
            layers.Dense(128,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_4'))
        encoder.add(
            layers.Dense(32,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_5'))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(
            layers.Dense(latent_dim,
                         activation=tf.nn.leaky_relu,
                         name='dense_leaky_relu_0'))

    # FC encoder 2
    elif encoder_type == 'fc_2':
        encoder.add(layers.Dense(1024, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(512, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(256, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(latent_dim, activation=tf.nn.leaky_relu))

    # FC encoder 3
    elif encoder_type == 'fc_3':
        encoder.add(layers.Dense(2048, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(1024, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(512, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(256, activation=tf.nn.leaky_relu))
        encoder.add(layers.BatchNormalization())
        encoder.add(layers.Dropout(.25))
        encoder.add(layers.Dense(latent_dim, activation=tf.nn.leaky_relu))

    # CNN encoder 1
    elif encoder_type[:3] == 'cnn':

        encoder.add(layers.Reshape((*input_shape, 1)))
        loaded_classifier = get_classifier_model((*input_shape, 1), latent_dim,
                                                 encoder_type)

        for layer in loaded_classifier.layers[:-1]:
            encoder.add(layer)

    else:
        raise utils.UserError(
            f'Parameter encoder_type={encoder_type} is not allowed.')

    if encoder_output == 'sin':
        encoder.add(layers.Dense(2, activation='linear', name='dense_linear'))
        encoder.add(layers.Lambda(K.sin, name='lambda_sin'))

    elif encoder_output == 'tanh':
        encoder.add(
            layers.Dense(latent_dim, activation=tf.nn.tanh, name='dense_tanh'))

    elif encoder_output == 'leaky_relu':
        encoder.add(
            layers.Dense(latent_dim,
                         activation=tf.nn.leaky_relu,
                         name='dense_leakyReLU_00'))

    elif encoder_output == 'linear':
        encoder.add(
            layers.Dense(latent_dim, activation='linear', name='dense_linear'))

    else:
        raise utils.UserError(
            f'Parameter encoder_output={encoder_output} is not allowed.')

    encoder = flatten_model(encoder)

    return encoder
Ejemplo n.º 7
0
def get_classifier_model(input_shape, num_classes, classifier_type):

    model = keras.Sequential()

    if classifier_type == 'cnn_1':

        model.add(
            layers.Conv2D(32,
                          kernel_size=(2, 2),
                          activation='relu',
                          input_shape=input_shape))
        model.add(layers.BatchNormalization())

        model.add(layers.Conv2D(48, kernel_size=(2, 2), activation='relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.Conv2D(120, kernel_size=(2, 2), activation='relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.25))

        model.add(layers.Flatten())

        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.25))
        model.add(layers.Dense(64, activation='relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))
        model.add(layers.Dense(num_classes, activation='softmax'))
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])

    elif classifier_type == 'cnn_2':

        model.add(
            layers.Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
        model.add(layers.Activation('relu'))

        model.add(layers.Conv2D(64, (3, 3)))
        model.add(layers.Activation('relu'))

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.25))

        model.add(layers.Conv2D(64, (3, 3), padding='same'))
        model.add(layers.Activation('relu'))

        model.add(layers.Conv2D(64, (3, 3)))
        model.add(layers.Activation('relu'))

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.5))

        model.add(layers.Conv2D(128, (3, 3), padding='same'))
        model.add(layers.Activation('relu'))

        # model.add(layers.Conv2D(128, (3, 3)))
        # model.add(layers.Activation('relu'))

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.5))

        model.add(layers.Flatten())

        model.add(layers.Dense(512))
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.5))

        model.add(layers.Dense(num_classes, activation='softmax'))

        adam = tf.keras.optimizers.RMSprop(learning_rate=0.001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])

    else:

        raise utils.UserError(
            f'Parameter classifier_type={classifier_type} not allowed.')

    return model
Ejemplo n.º 8
0
import numpy as np
import math as M

import keras
from keras import backend as K

import utils.model_training as model_training

import matplotlib.pyplot as plt

if __name__ == "__main__":

    import sys

    if len(sys.argv) != 2:
        raise utils.UserError(f"Illegal command. Please type \'>> python <python_script.py> <params_file.json>\'.")

    # Load parameters from json file
    with open(sys.argv[1]) as f:
        TRAINING_PARAMS = json.load(f)

    # Retrieve paths from parameters
    data_dir = TRAINING_PARAMS['data_dir']
    save_model_to = TRAINING_PARAMS['save_model_to']

    # Check source directory.
    if not os.path.exists(data_dir):
        raise utils.UserError(
            f"Parameter data_dir={data_dir} not allowed. The path does not exists.")

    # Check target directory and confirm overwriting if exists.