Exemple #1
0
def iris_model(x_train, y_train, x_val, y_val, params):

    # note how instead of passing the value, we pass a dictionary entry
    model = Sequential()
    model.add(Dense(params['first_neuron'],
                    input_dim=x_train.shape[1],
                    activation='relu'))

    # same here, just passing a dictionary entry
    model.add(Dropout(params['dropout']))

    # with this call we can create any number of hidden layers
    hidden_layers(model, params, y_train.shape[1])

    # again, instead of the activation name, we have a dictionary entry
    model.add(Dense(y_train.shape[1],
                    activation=params['last_activation']))

    # here are using a learning rate boundary
    model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'],
                  params['optimizer'])),
                  loss=params['losses'],
                  metrics=['acc'])

    # here we are also using the early_stopper function for a callback
    out = model.fit(x_train, y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    verbose=0,
                    validation_data=[x_val, y_val],
                    callbacks=early_stopper(params['epochs'], mode=[1,1]))

    return out, model
Exemple #2
0
def cervix_model(x_train, y_train, x_val, y_val, params):

    model = Sequential()
    model.add(Dense(params['first_neuron'],
                    input_dim=x_train.shape[1],
                    activation='relu'))

    model.add(Dropout(params['dropout']))

    hidden_layers(model, params, 1)

    model.add(Dense(1, activation=params['last_activation']))

    model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['loss'],
                  metrics=['acc',
                           fmeasure,
                           recall,
                           precision,
                           matthews_correlation])

    results = model.fit(x_train, y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=0,
                        validation_data=[x_val, y_val],
                        callbacks=early_stopper(params['epochs'], mode='moderate', monitor='val_fmeasure'))

    return results, model
def get_model(m_input_shape, m_params=None):
    global OUTPUT_RES

    if m_params is None:
        m_params = {'input_shape': x[0].shape,
             'optimizer': Adam,
             'lr': 2.2,
             'batch_size': 100,
             'second_layer': 0,
             'dense_size_1': 3000,
             'dense_size_2': 0,
             'dropout': 0.005,
             'dense_activation': 'relu',
             'output_activation': 'linear'}

    m_model = Sequential()

    m_model.add(Conv2D(8, (3, 3), input_shape=m_input_shape, activation="relu", data_format="channels_first"))
    m_model.add(Conv2D(32, (3, 3), activation="relu", dim_ordering="th"))
    m_model.add(MaxPooling2D((2, 2)))
    m_model.add(Conv2D(32, (3, 3), activation="relu", dim_ordering="th"))
    m_model.add(Conv2D(64, (3, 3), activation="relu", dim_ordering="th"))
    m_model.add(MaxPooling2D((2, 2)))
    m_model.add(Flatten())
    m_model.add(Dense(m_params['dense_size_1'], activation=m_params['dense_activation']))
    m_model.add(Dropout(m_params['dropout']))
    if m_params['second_layer'] == 1:
        m_model.add(Dense(m_params['dense_size_2'], activation=m_params['dense_activation']))
        m_model.add(Dropout(m_params['dropout']))
    m_model.add(Dense(OUTPUT_RES * OUTPUT_RES, activation=m_params['output_activation']))

    m_model.compile(m_params['optimizer'](lr=lr_normalizer(m_params['lr'], m_params['optimizer'])),
                    'mean_squared_error', metrics=['mean_absolute_error'])

    return m_model
def cactus_model(x_train, y_train, x_test, y_test, p):

    global model
    model = resnet_model(p)

    model.compile(
        loss=p['losses'],
        optimizer=p['optimizer'](lr=lr_normalizer(p['lr'], p['optimizer'])),
        metrics=['accuracy'])
    model.summary()

    # LOGS
    tb_callback = TensorBoard(log_dir=logs_name(p))

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=p['batch_size'],
                        epochs=p['epochs'],
                        callbacks=[
                            tb_callback,
                            LambdaCallback(on_train_begin=logs_refresh_mlf_run,
                                           on_epoch_end=logs_on_epoch_end)
                        ],
                        verbose=0)

    return history, model
Exemple #5
0
def mlp_main(x, y, x_val, y_val, params):

    model_mlp = Sequential()
    nSNP = x.shape[1]
    try:
        out_c = y.shape[1]
    except IndexError:
        out_c = 1

    model_mlp.add(
        Dense(params['first_neuron'],
              input_dim=nSNP,
              activation=params['activation'],
              kernel_initializer='normal',
              kernel_regularizer=regularizers.l2(params['reg1'])))

    model_mlp.add(Dropout(params['dropout_1']))
    if (params['hidden_layers'] != 0):
        # if we want to also test for number of layers and shapes, that's possible
        for _ in range(params['hidden_layers']):
            model_mlp.add(
                Dense(params['hidden_neurons'],
                      activation=params['activation'],
                      kernel_regularizer=regularizers.l2(params['reg1'])))

            # hidden_layers(model, params, 1)
            model_mlp.add(Dropout(params['dropout_2']))

    model_mlp.add(
        Dense(out_c,
              activation=params['last_activation'],
              kernel_regularizer=regularizers.l2(params['reg2'])))
    if params['optimizer'] == 'Adam':
        params['optimizer'] = Adam
    if params['optimizer'] == 'Nadam':
        params['optimizer'] = Nadam
    if params['optimizer'] == 'sgd':
        params['optimizer'] = sgd

    model_mlp.compile(loss=mean_squared_error,
                      optimizer=params['optimizer'](
                          lr=lr_normalizer(params['lr'], params['optimizer'])),
                      metrics=[acc_pearson_r])
    #es = EarlyStopping(monitor=mean_squared_error, mode='min', verbose=1)

    # callbacks=[live()] see the output
    # callbacks= es to EarlyStopping

    out_mlp = model_mlp.fit(x,
                            y,
                            validation_split=0.2,
                            verbose=0,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'])

    return out_mlp, model_mlp
Exemple #6
0
def mlp_main_cat(x, y, x_val, y_val, params):

    model_mlp = Sequential()
    nSNP = x.shape[1]
    last_layer = y.shape[1]

    model_mlp.add(
        Dense(params['first_neuron'],
              input_dim=nSNP,
              activation=params['activation'],
              kernel_initializer='normal',
              activity_regularizer=regularizers.l1(params['reg1'])))

    model_mlp.add(Dropout(params['dropout_1']))
    if (params['hidden_layers'] != 0):
        # if we want to also test for number of layers and shapes, that's possible
        for _ in range(params['hidden_layers']):
            model_mlp.add(
                Dense(params['hidden_neurons'],
                      activation=params['activation'],
                      activity_regularizer=regularizers.l2(params['reg1'])))

            # hidden_layers(model, params, 1)
            model_mlp.add(Dropout(params['dropout_2']))
    model_mlp.add(Dense(last_layer, activation='softmax'))
    if params['optimizer'] == 'Adam':
        params['optimizer'] = Adam
    if params['optimizer'] == 'Nadam':
        params['optimizer'] = Nadam
    if params['optimizer'] == 'sgd':
        params['optimizer'] = sgd

    model_mlp.compile(loss='categorical_crossentropy',
                      optimizer=params['optimizer'](
                          lr=lr_normalizer(params['lr'], params['optimizer'])),
                      metrics=['accuracy'])

    #acc or mean_squared_error in metrics
    # simple early stopping
    # if you monitor is an accuracy parameter (pearson here, you should chose mode="max"), otherwise it would be "min"
    #es = EarlyStopping(monitor=mean_squared_error, mode='min', verbose=1)

    # callbacks=[live()] see the output
    # callbacks= es to EarlyStopping

    out_mlp = model_mlp.fit(x,
                            y,
                            validation_split=0.2,
                            verbose=0,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'])

    return out_mlp, model_mlp
Exemple #7
0
def AmazonModel(x_train_throwaway, y_train_throwaway, x_val_throwaway,
                y_val_throwaway, params):
    # Pull quantities from params here for convenience
    # Create our training data inside the model
    batch_size = params['batch_size']
    max_length = params['max_length']
    vocab_size = params['vocab_size']
    loss = params['loss']
    num_data_points = params['num_data_points']
    num_units = params['num_units']
    embedding_size = params['embedding_size']
    multiple_LSTM_layers = params['multiple_LSTM_layers']

    x_train, y_train, x_valid, y_valid, = train_data[(vocab_size, max_length,
                                                      num_data_points)]

    earlyStopper = EarlyStopping(patience=3,
                                 verbose=0,
                                 restore_best_weights=True,
                                 monitor="val_acc")

    model = Sequential()
    model.add(
        Embedding(input_dim=vocab_size + 1,
                  output_dim=embedding_size,
                  input_length=max_length))
    model.add(CuDNNLSTM(num_units, return_sequences=multiple_LSTM_layers))
    if multiple_LSTM_layers:
        model.add(CuDNNLSTM(num_units))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss=loss,
                  optimizer=params['optimizer'](
                      lr=lr_normalizer(params['lr'], params['optimizer'])),
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        validation_data=[x_valid, y_valid],
                        batch_size=batch_size,
                        epochs=30,
                        callbacks=[earlyStopper],
                        verbose=2)

    return history, model
Exemple #8
0
def cervical_cancer(x_train, y_train, x_val, y_val, params):

    from keras.models import Sequential
    from keras.layers import Dropout, Dense
    from talos.model import lr_normalizer, early_stopper, hidden_layers

    from talos.metrics.keras_metrics import matthews_correlation_acc, precision_acc
    from talos.metrics.keras_metrics import recall_acc, fmeasure_acc

    model = Sequential()
    model.add(
        Dense(params['first_neuron'],
              input_dim=x_train.shape[1],
              activation='relu'))

    model.add(Dropout(params['dropout']))

    hidden_layers(model, params, 1)

    model.add(Dense(1, activation=params['last_activation']))

    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['losses'],
                  metrics=[
                      'acc', fmeasure_acc, recall_acc, precision_acc,
                      matthews_correlation_acc
                  ])

    results = model.fit(x_train,
                        y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=0,
                        validation_data=[x_val, y_val],
                        callbacks=[
                            early_stopper(params['epochs'],
                                          mode='moderate',
                                          monitor='val_fmeasure')
                        ])

    return results, model
Exemple #9
0
def cactus_model(x_train, y_train, x_test, y_test, p):

    # create and train model
    global model
    model = Sequential()
    model.add(Flatten(input_shape=(32, 32, 3)))

    model.add(
        Dense(p['first_neuron'],
              activation=p['activation'],
              kernel_initializer='normal'))
    model.add(Dropout(p['dropout']))
    hidden_layers(model, p, 1)
    model.add(
        Dense(NUM_CLASSES,
              activation=p['last_activation'],
              kernel_initializer='normal'))

    model.compile(
        loss=p['losses'],
        optimizer=p['optimizer'](lr=lr_normalizer(p['lr'], p['optimizer'])),
        metrics=['accuracy'])
    model.summary()

    # LOGS
    tb_callback = TensorBoard(log_dir=logs_name(p))

    history = model.fit(x_train,
                        y_train,
                        validation_data=(x_test, y_test),
                        batch_size=p['batch_size'],
                        epochs=p['epochs'],
                        callbacks=[
                            tb_callback,
                            LambdaCallback(on_train_begin=logs_refresh_mlf_run,
                                           on_epoch_end=logs_on_epoch_end)
                        ],
                        verbose=0)

    return history, model
def fp_model(x_train, y_train, x_val, y_val, params):
    # To train
    model = Sequential()
    layers = params['neuron_tuple']
    num_h = len(layers)
    for i in range(num_h):
        num_neurons = layers[i]
        model.add(Dense(num_neurons, activation=params['activation']))
        model.add(Dropout(params['dropout']))
    model.add(Dense(1, activation=params['last_activation']))
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['losses'],
                  metrics=['accuracy'])

    #Train the model, iterating on the data in batches of 32 samples
    history = model.fit(x_train,
                        y_train,
                        epochs=params['epochs'],
                        batch_size=params['batch_size'],
                        verbose=1,
                        validation_data=[x_val, y_val])
    return history, model
Exemple #11
0
    def fake_news_model(self, x_train, y_train, x_val, y_val, params):
        model = Sequential()
        model.add(Dense(10, input_dim=(len(self.HEADERS) - 1),
                        activation=params['activation'],
                        kernel_initializer='normal'))

        model.add(Dropout(params['dropout']))

        hidden_layers(model, params, 1)

        model.add(Dense(1, activation=params['last_activation'],
                        kernel_initializer='normal'))

        model.compile(loss=params['losses'],
                      optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
                      metrics=['acc'])

        history = model.fit(x_train, y_train,
                            validation_data=[x_val, y_val],
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            verbose=0)

        return history, model
    model_json = json_file.read()
    json_file.close()

    # load weights
    new_model = model_from_json(model_json)
    new_model.load_weights(BACKUP_PATH + "/" + filename + ".h5")

    return new_model


dataset, filenames = imagesToDataset(PROJECT_PATH + '/data/test/')


model = build_model()
model.summary()
model.compile(loss=params['losses'],
              optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
              metrics=['accuracy'])

results = model.predict(x=dataset)

# clean predict
hascactus_column_results = np.rint(results[:, [1]]).astype(int)
clean_results = np.column_stack([filenames, hascactus_column_results])

# export
np.savetxt(
    PREDICTIONS_PATH + "/" + filename + ".csv", clean_results,
    header="id,has_cactus",
    delimiter=",", fmt="%s", comments='')
Exemple #13
0
def cnn_main(x, y, x_val, y_val, params):
    # next we can build the model exactly like we would normally do it
    # Instantiate
    model_cnn = Sequential()
    nSNP = x.shape[1]
    try:
        out_c = y.shape[1]
    except IndexError:
        out_c = 1
    x = np.expand_dims(x, axis=2)
    x_val = np.expand_dims(x_val, axis=2)
    # add convolutional layer

    if (params['nconv'] == 1):
        model_cnn.add(
            Conv1D(params['nFilter'],
                   kernel_size=params['kernel_size'],
                   strides=params['nStride'],
                   input_shape=(nSNP, 1),
                   kernel_regularizer=regularizers.l2(params['reg2']),
                   kernel_initializer='normal',
                   activity_regularizer=regularizers.l1(params['reg1']),
                   activation=params['activation_1']))

        model_cnn.add(MaxPooling1D(pool_size=params['pool']))
        # Solutions above are linearized to accommodate a standard layer

    else:
        for _ in range(params['nconv']):
            if (_ == 0):
                model_cnn.add(
                    Conv1D(params['nFilter'],
                           kernel_size=params['kernel_size'],
                           strides=params['nStride'],
                           input_shape=(nSNP, 1),
                           kernel_regularizer=regularizers.l2(params['reg2']),
                           kernel_initializer='normal',
                           activity_regularizer=regularizers.l1(
                               params['reg1']),
                           activation=params['activation_1']))

                model_cnn.add(MaxPooling1D(pool_size=params['pool']))
                # Solutions above are linearized to accommodate a standard layer
            else:
                model_cnn.add(
                    Conv1D(params['nFilter'],
                           kernel_size=params['kernel_size'],
                           strides=params['nStride'],
                           kernel_regularizer=regularizers.l2(params['reg2']),
                           kernel_initializer='normal',
                           activity_regularizer=regularizers.l1(
                               params['reg1']),
                           activation=params['activation_1']))

                model_cnn.add(MaxPooling1D(pool_size=params['pool']))

    model_cnn.add(Flatten())

    if (params['hidden_layers'] != 0):
        # if we want to also test for number of layers and shapes, that's possible
        for _ in range(params['hidden_layers']):
            model_cnn.add(
                Dense(params['hidden_neurons'],
                      activation=params['activation_2'],
                      kernel_regularizer=regularizers.l2(params['reg2'])))
            model_cnn.add(Dropout(params['dropout_2']))

    model_cnn.add(
        Dense(out_c,
              activation=params['last_activation'],
              kernel_regularizer=regularizers.l2(params['reg3'])))
    if params['optimizer'] == 'Adam':
        params['optimizer'] = Adam
    if params['optimizer'] == 'Nadam':
        params['optimizer'] = Nadam
    if params['optimizer'] == 'sgd':
        params['optimizer'] = sgd
    model_cnn.compile(loss=mean_squared_error,
                      optimizer=params['optimizer'](
                          lr=lr_normalizer(params['lr'], params['optimizer'])),
                      metrics=[acc_pearson_r])

    # simple early stopping
    # if you monitor is an accuracy parameter (pearson here, you should chose mode="max"), otherwise it would be "min"
    # 7/08/2019 change mean_squared_error here by acc_pearson_r and mode='min' by mode='max'
    #es = EarlyStopping(monitor=acc_pearson_r, mode='max', verbose=1)

    # callbacks=[live()] see the output
    # callbacks= es to EarlyStopping

    out_cnn = model_cnn.fit(x,
                            y,
                            validation_split=0.2,
                            verbose=0,
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            callbacks=[live()])

    return out_cnn, model_cnn