Esempio n. 1
0
    def iris_model(x_train, y_train, x_val, y_val, params):

        model = Sequential()
        model.add(
            Dense(params['first_neuron'],
                  input_dim=x_train.shape[1],
                  activation='relu'))

        model.add(Dropout(params['dropout']))
        model.add(Dense(y_train.shape[1],
                        activation=params['last_activation']))

        model.compile(optimizer=params['optimizer'](
            lr=lr_normalizer(params['lr'], params['optimizer'])),
                      loss=params['loss'],
                      metrics=['acc'])

        out = model.fit(x_train,
                        y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=0,
                        validation_data=[x_val, y_val])

        return out, model
Esempio n. 2
0
def create_model(params):
    # La capa LSTM espera una entrada 3D tal que [samples, timesteps, features]
    print ('Creando el modelo.')
    model = Sequential()
    model.add(LSTM(params['first_layer'], activation='tanh', recurrent_activation='sigmoid', recurrent_dropout=0, unroll=False, use_bias=True))
    model.add(Dropout(params['dropout']))
    model.add(Dense(1, activation=params['last_activation']))

    print ('Compilando el modelo.')
    model.compile(loss=params['losses'],
                  optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
                  metrics=['acc'])
    return model
Esempio n. 3
0
        def model(x_train, y_train, x_val, y_val, params):
            rcnn = RCNN(ImageData=self.ImageData,
                        loss=self.params['loss'],
                        opt=self.params['opt'],
                        lr=lr_normalizer(self.params['lr'],
                                         self.params['opt']),
                        seed=self.seed,
                        verbose=0)

            return rcnn.train(epochs=self.params['epochs'],
                              batch_size=self.params['batch_size'],
                              split_size=self.params['split_size'],
                              checkpoint_path=None,
                              early_stopping=False,
                              verbose=0)
Esempio n. 4
0
def talos_model(sub_train_images, y_train, sub_val_images, y_val, params):
    print(f"parameters: {params}")
    print(f"y_train.shape: {y_train.shape}")
    #input_tensor = Input(shape=(224, 224, 3))  # this assumes K.image_data_format() == 'channels_last'
    base_model = DenseNet121(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(num_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    es = EarlyStopping(monitor='val_loss',
                       min_delta=0,
                       patience=5,
                       verbose=0,
                       mode='auto',
                       baseline=None,
                       restore_best_weights=True)
    mc = ModelCheckpoint('best_model.h5',
                         monitor='val_loss',
                         mode='max',
                         verbose=1,
                         save_best_only=True)

    for layer in base_model.layers:
        layer.trainable = True
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=loss_fx,
                  metrics=metrics,
                  class_weight=class_weight)

    out = model.fit_generator(
        imageLoader(sub_train_images, y_train, params['batch_size']),
        steps_per_epoch=sub_train_images.shape[0] // params['batch_size'],
        epochs=20,
        validation_data=imageLoader(sub_val_images, y_val,
                                    params['batch_size']),
        validation_steps=sub_val_images.shape[0] // params['batch_size'],
        callbacks=[es, mc],
        verbose=2)

    #print(f"out:{out.history.keys()}")
    return out, model
Esempio n. 5
0
def best_model_train():
    from keras.optimizers import Adam
    # La capa LSTM espera una entrada 3D tal que [samples, timesteps, features]
    print ('Creando el modelo.')
    model = Sequential()
    model.add(LSTM(150, activation='tanh', recurrent_activation='sigmoid', recurrent_dropout=0, unroll=False, use_bias=True, input_shape=(250,136)))
    model.add(Dropout(0.35))
    model.add(Dense(1, activation='sigmoid'))

    print ('Compilando el modelo.')
    model.compile(loss='binary_crossentropy',
                optimizer=Adam(lr=lr_normalizer(6.04, Adam)),
                metrics=['acc'])
    data,labels = data_treatment(Path('./landmarks'))
    x_train, x_val, y_train, y_val = train_test_split(data, labels, test_size=0.2)
    return model.fit(x_train, y_train, 
                    epochs=100,
                    batch_size=400,
                    validation_data=(x_val, y_val), 
                    verbose=1, shuffle=False)  
def create_model(trainX, trainY, testX, testY, params):
    model = Sequential([
        Dense(params['first_neuron'],
              input_shape=(len(trainX[1]), ),
              activation='relu'),
        Dense(1)
    ])

    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=['mean_squared_error'],
                  metrics=['mean_squared_error'])

    model_out = model.fit(trainX,
                          trainY,
                          validation_data=[testX, testY],
                          batch_size=params['batch_size'],
                          callbacks=[keras.callbacks.History()],
                          epochs=params['epochs'],
                          verbose=0)

    return model_out, model
Esempio n. 7
0
def the_network(X_train, y_train, X_test, y_test, params):

    n_network = Sequential()
    n_network.add(
        Dense(params['first_neuron'],
              input_dim=X_train.shape[1],
              activation='linear'))
    n_network.add(BatchNormalization())
    n_network.add(
        Dense(params['second_layer'], activation=params['activation']))
    n_network.add(Dropout(params['dropout']))
    n_network.add(Dense(1, activation=params['activation']))

    # Compiling the network

    n_network.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                      loss=params['loss_func'],
                      metrics=['mse'])

    # Adding callback - early stopping

    cb = EarlyStopping(monitor='mse', patience=params['patience'])

    # Fitting the network and creating fitting plots

    history = n_network.fit(X_train,
                            y_train,
                            validation_data=(X_test, y_test),
                            epochs=params['epochs'],
                            verbose=0,
                            batch_size=params['batch_size'],
                            callbacks=[cb])

    return history, n_network

    plot_loss(history.history['loss'], history.history['val_loss'])
Esempio n. 8
0
analyze_object.plot_corr(metric='loss', exclude=['val_loss', 'mse', 'val_mse'])

# Run the neural network on best parameters - selected manually - to be automated

n_network = Sequential()

n_network.add(Dense(32, input_dim=X_train.shape[1], activation='linear'))
n_network.add(BatchNormalization())
n_network.add(Dense(32, activation='relu'))
n_network.add(Dropout(0.25))
n_network.add(Dense(1, activation='relu'))

# Compiling the network

n_network.compile(optimizer=Adam(lr=lr_normalizer(0.1, Adam)),
                  loss='mse',
                  metrics=['mse'])

# Adding callback - early stopping

cb = EarlyStopping(monitor='mse', patience=75)

# Fitting the network and creating fitting plots

history = n_network.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        epochs=1000,
                        verbose=1,
                        batch_size=512,
Esempio n. 9
0
def model(x_train, y_train, x_val, y_val, p):
    #Initialise the model
    model = keras.models.Sequential()

    #Grab the optimised parameters
    nPercent = p["nPercent"]
    nCount = nPercent * 50
    nShrink = p["nShrink"]
    maxLayer = p["maxLayer"]
    minNeuron = p["minNeuron"]
    firstactivation = p["first_activation"]
    hiddenlayeractivation = p["hidden_activation"]
    lastactivation = p["last_activation"]
    dropout = p["dropOut"]
    bSize = p["batchSize"]
    epochs = p["epochs"]
    
    #************************** Initialise the optimiser
    opt = p["optimiser"](
        lr=lr_normalizer(
            p["lr"],
            p["optimiser"]
        )
    )

    #************************** Grab the loss function
    loss = p["loss"]
    
    #********************* Get the regulariser with its expected factor
    kernel_regulariser = p["kernel_reg"]
    if kernel_regulariser == L1_reg:
        regulariser = kernel_regulariser(l1=p["alpha_l1"])
    elif kernel_regulariser == L2_reg:
        regulariser = kernel_regulariser(l2=p["alpha_l2"])
    elif kernel_regulariser == L1L2_reg:
        regulariser = kernel_regulariser(l1=p["alpha_l1"],l2=p["alpha_l2"])


    #Start the loop 
    while nCount > minNeuron and layer < maxLayer:
        #************************* The first (0th) hidden layer needs an input input_dim(neuronCount)
        if layer == 0:
            model.add(
                keras.layers.Dense(
                    nCount,
                    name="Input",
                    input_dim = x_train.shape[1],
                    activation = firstactivation,
                    use_bias= True,
                    kernel_initializer="he_uniform",
                    kernel_regularizer= regulariser
                )
            )
        else:
            model.add(
                keras.layers.Dense(
                    nCount, 
                    name = "Layer_%s"%(layer + 1)
                    activation=hiddenlayeractivation,
                    kernel_initializer ="he_uniform",
                    use_bias=True,
                    kernel_regularizer = regulariser
                )
            ) 
        layer += 1

        #*************************** Add dropout after each hidden layer
        model.add(backend
            Dropout(
                dropout,seed=constant
            )
        )

        #*************************** Shrink neuron count for each layer
        nCount = nCount * nShrink
    
    #*************************** Output layer
    model.add(
        keras.layers.Dense(
            y_train.shape[1]
            name="Output",
            activation = lastactivation,
            kernel_regularizer = regulariser
        )
    )