Exemple #1
0
def cnn_model(x_train, y_train, x_val, y_val, params):
    embedding_layer = Embedding(MAX_NUM_WORDS,
                                EMBEDDING_DIM,
                                embeddings_initializer=Constant(embedding_matrix),
                                input_length=MAX_SEQUENCE_LENGTH,
                                trainable=False)
    print('Training model.')
    # train a 1D convnet with global maxpooling
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedded_sequences)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu', padding = 'same')(x)
    x = MaxPooling1D(5)(x)
    # x = BatchNormalization()(x)
    x = Conv1D(128, 5, activation='relu', padding = 'same')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    x = Dropout(params['dropout'])(x)
    preds = Dense(len(labels_index), activation='softmax')(x)

    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer=params['optimizer'](lr=lr_normalizer(params['lr'],params['optimizer'])),
                  metrics=['acc'])

    history = model.fit(x_train, y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        validation_data=(x_val, y_val))
    return history, model
def higgs_nn(X_train, Y_train, X_valid, Y_valid, params):
    model = Sequential()
    model.add(
        Dense(75,
              input_dim=X_train.shape[1],
              activation=params['activation'],
              kernel_initializer='normal'))
    model.add(Dropout(params['dropout']))

    hidden_layers(model, params, 2)

    model.add(
        Dense(2,
              activation=params['last_activation'],
              kernel_initializer='normal'))

    model.compile(loss=params['losses'],
                  optimizer=params['optimizer'](
                      lr=lr_normalizer(params['lr'], params['optimizer'])),
                  metrics=['acc'])

    history = model.fit(X_train,
                        Y_train,
                        validation_data=[X_valid, Y_valid],
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=2)

    # finally we have to make sure that history object and model are returned
    return history, model
def iris_model(x_train, y_train, x_val, y_val, params):

    model = Sequential()
    model.add(
        Dense(params['first_neuron'],
              input_dim=x_train.shape[1],
              activation='relu'))

    model.add(Dropout(params['dropout']))
    model.add(Dense(y_train.shape[1], activation=params['last_activation']))

    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['loss'],
                  metrics=['acc'])

    out = model.fit(x_train,
                    y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    verbose=0,
                    validation_data=[x_val, y_val],
                    callbacks=early_stopper(params['epochs'], mode='strict'))

    return out, model
def fake_news_model(x_train, y_train, x_val, y_val, params):

    t = Tokenizer(num_words=params['max_words'])
    t.fit_on_texts(x_train)
    train_sequence = t.texts_to_sequences(x_train)
    train_padded = pad_sequences(train_sequence, maxlen=params['max_length'])

    model = Sequential()
    model.add(
        Embedding(params['max_words'], 64, input_length=params['max_length']))
    model.add(Dropout(params['dropout']))
    model.add(LSTM(64))
    model.add(Dense(256, activation=params['activation']))
    model.add(Dropout(params['dropout']))
    model.add(Dense(1, activation=params['last_activation']))

    model.compile(loss=params['losses'],
                  optimizer=params['optimizer'](
                      lr=lr_normalizer(params['lr'], params['optimizer'])),
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(
        train_padded,
        y_train,
        batch_size=params['batch_size'],
        epochs=150,
        validation_split=0.1,
        callbacks=[EarlyStopping(monitor='val_loss', min_delta=0.00001)],
        verbose=2)

    return history, model
def build_model(X_train, Y_train, X_val, Y_val, params):
    model = Sequential()
    model.add(
        Embedding(params['vocab_size'],
                  params['e_size'],
                  input_length=params['seq_len']))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(params['dropout']))
    hidden_layers(model, params, 1)
    model.add(Dense(1, activation=params['last_activation']))

    ## COMPILE
    model.compile(optimizer=params['optimizer'](lr_normalizer(
        params['lr'], params['optimizer'])),
                  loss='binary_crossentropy',
                  metrics=['acc'])

    out = model.fit(X_train,
                    Y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    validation_data=[X_val, Y_val],
                    verbose=2)

    return out, model
def dl_model(x_train, y_train, x_val, y_val, params):


  model = Sequential()
  model.add(Conv2D(params['filters_first_conv'], kernel_size=(12,7), strides=(7,4),activation='relu', input_shape=(num_frames,num_feats,1)))
  model.add(Dropout(params['dropout']))
  model.add(Conv2D(params['filters_second_conv'], kernel_size=5,strides=(2,2),activation='relu', input_shape=(num_frames,num_feats,1)))
  model.add(Dropout(params['dropout']))
  model.add(Flatten())
  model.add(Dense(num_classes, activation='softmax'))
  model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['loss'],
                  metrics=['acc'])

  #Class weights to weigh up for imbalanced validation set
  class_weight = {0: 2,
                  1: 1.5,
                  2: 2,
                  3: 1.0}

  out = model.fit(x_train, y_train, 
                        validation_data=[x_val, y_val],
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=0,
                        class_weight=class_weight)

  return out, model
Exemple #7
0
    def get_model(self, x_train, y_train, x_val, y_val, params):

        model = models.Sequential()

        # Input layer with dropout
        model.add(
            layers.Dense(params['first_neuron'],
                         activation=params['activation'],
                         input_shape=(self.n_features_all, )))
        model.add(layers.Dropout(params['dropout']))

        # Hidden layers with dropout
        for i in range(params['hidden_layers']):
            model.add(
                layers.Dense(params['hidden_neuron'],
                             activation=params['activation']))
            model.add(layers.Dropout(params['dropout']))

        # Output layer
        model.add(layers.Dense(5, activation=params['last_activation']))

        # Build model
        model.compile(params['optimizer'](
            lr=lr_normalizer(params['lr'], params['optimizer'])),
                      loss='categorical_crossentropy',
                      metrics=['accuracy', self.dr, self.far])

        history = model.fit(x_train,
                            y_train,
                            validation_data=(x_val, y_val),
                            batch_size=params['batch_size'],
                            epochs=params['epochs'],
                            verbose=0)

        return history, model
Exemple #8
0
def hbb_model(train_data,train_y,valid_data,valid_y,param):
    model=Sequential()
    model.add(Dense(param['first_neuron'],input_dim=train_data.shape[1],activation=param['activation'],kernel_initializer='normal'))
    model.add(Dropout(param['dropout']))
    hidden_layers(model,param,1)
    model.add(Dense(train_y.shape[1],activation=param['last_activation'],kernel_initializer='normal'))
    model.compile(optimizer=param['optimizer'](lr=lr_normalizer(param['lr'],param['optimizer'])),loss=param['losses'],metrics=['acc'])

    out=model.fit(train_data,train_y,batch_size=param['batch_size'],epochs=param['epochs'],verbose=0,validation_data=[valid_data,valid_y])
    return out,model
def test_lr_normalizer():
    '''Test learning rate normalizer to confirm an invalid type is
    recognized and throws TalosModelError.'''

    from talos.model.normalizers import lr_normalizer
    from talos.utils.exceptions import TalosModelError

    print('Testing lr_normalizer() and invalid optimizer type...')

    # Using string as proxy for any invalid class
    # (ex., tensorflow-sourced optimizer)
    bad_optimizer = 'test'

    try:
        lr_normalizer(1, bad_optimizer)
    except TalosModelError:
        print('Invalid model optimizer caught successfully!')
    else:
        print('Invalid (string) model optimizer type not caught.')
Exemple #10
0
def make_opt_instance(opt, lr_scale):
    # Turn the optimizer into a class if necessary
    opt_trans = {
        'sgd': SGD,
        'rmsprop': RMSprop,
        'adagrad': Adagrad,
        'adadelta': Adadelta,
        'adam': Adam,
        'adamax': Adamax,
        'nadam': Nadam,
    }
    _opt = opt_trans[opt.lower()] if isinstance(opt, str) else opt
    return _opt(lr=lr_normalizer(float(lr_scale), _opt))
Exemple #11
0
def cnn_model2(x_train, y_trainHot, x_val, y_val, hyperP):
    INPUT_SHAPE = (80, 80, 3)
    num_kernels = hyperP['num_kernels']
    batch_size = hyperP['batch_size']
    cnn_layers = hyperP['cnn_layers']
    drop_out = hyperP['drop_out']
    optimizer = hyperP['optimizer']
    learning_rate = hyperP['learning_rate']
    OPTIMIZER = optimizer(lr=lr_normalizer(learning_rate, optimizer))

    kernel_initializer = hyperP['kernel_initializer']
    activation = hyperP['activation']

    model = Sequential()
    model.add(
        Conv2D(num_kernels,
               kernel_size=(3, 3),
               kernel_initializer=kernel_initializer,
               input_shape=INPUT_SHAPE,
               data_format='channels_last'))
    model.add(Activation(activation))
    model.add(MaxPool2D(pool_size=(2, 2)))
    #model.add(Conv2D(num_kernels, kernel_size=(3,3), kernel_initializer= kernel_initializer, input_shape=INPUT_SHAPE, data_format='channels_last'))
    #model.add(Activation(activation))
    for i in range(0, cnn_layers):
        model.add(
            Conv2D(num_kernels,
                   kernel_size=(3, 3),
                   kernel_initializer=kernel_initializer))
        model.add(Activation(activation))

    model.add(BatchNormalization())
    model.add(Dropout(drop_out))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(drop_out))
    model.add(Dense(5, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=OPTIMIZER,
                  metrics=['accuracy'])

    early_stopping_monitor = EarlyStopping(monitor='val_loss', patience=5)
    out = model.fit(x_train,
                    y_trainHot,
                    batch_size=batch_size,
                    epochs=10,
                    verbose=0,
                    validation_split=0.2,
                    callbacks=[early_stopping_monitor])
    return out, model
def telos_baseline_model(x_train, y_train, x_val, y_val, params):
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, GaussianNoise, BatchNormalization
    from keras.constraints import max_norm

    model = Sequential()
    model.add(
        Dropout(params['first_dropout'],
                input_shape=(Options.InputFeatureSize, )))
    model.add(
        Dense(params['first_neuron'],
              kernel_initializer='normal',
              activation=params['activation']))
    model.add(Dropout(params['second_dropout']))
    #kernel_constraint=max_norm(3)
    #model.add(BatchNormalization())
    #rms = RMSprop(lr = 0.00050)
    model.add(
        Dense(1,
              kernel_initializer='normal',
              activation=params['last_activation']))

    model.compile(
        loss=params['losses'],
        # here we add a regulizer normalization function from Talos
        optimizer=params['optimizer'](
            lr=lr_normalizer(params['lr'], params['optimizer'])),
        metrics=['mae', 'mse'])

    callbacks = []

    if (Options.KerasEarlyStopping):
        early_stop = keras.callbacks.EarlyStopping(
            monitor='loss',
            min_delta=0,
            patience=Options.KerasEarlyStoppingPatience,
            verbose=Options.KerasVerbose,
            mode='auto')
        callbacks.append(early_stop)

    out = model.fit(x_train,
                    y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    verbose=Options.KerasVerbose,
                    callbacks=callbacks
                    #,validation_data=[x_val, y_val]
                    )

    return out, model
def dae_model_hl(x_train, y_train, x_val, y_val, params):
    #print(params['x_train_noise'].shape)
    print(x_train.shape)
    print("masking training")
    x_train_noise = mask_function(dataframe=x_train,
                                  noise=float(params['noise']),
                                  batch_sizes=300)  #masking training
    print("masking validation")
    x_val_noise = mask_function(dataframe=x_val,
                                noise=float(params['noise']),
                                batch_sizes=300)  #masking validation

    print("building autoencoder network")
    model = Sequential()
    model.add(
        Dense(params['first_neuron'],
              activation=params['activation'],
              input_shape=(x_train.shape[1], )))
    model.add(Dropout(params['dropout']))

    #m.add(Dense(128,  activation='elu'))
    hidden_layers(model, params, 1)
    model.add(
        Dense(params['embedding_size'],
              activation=params['activation'],
              name="bottleneck"))
    hidden_layers(model, params, 1)
    model.add(Dense(params['first_neuron'], activation=params['activation']))
    #m.add(Dense(512,  activation='elu'))
    model.add(Dropout(params['dropout']))

    model.add(Dense(x_train.shape[1], activation=params['last_activation']))
    #m.compile(loss='mean_squared_error', optimizer = params['optmizer'])
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['loss'],
                  metrics=['accuracy'])
    print("training neural network")
    out = model.fit(
        x_train,
        x_train_noise,  #x_train_noise,
        batch_size=params['batch_size'],
        epochs=params['epochs'],
        verbose=0,
        validation_data=[x_val, x_val_noise],  #x_val_noise],
        callbacks=early_stopper(params['epochs'], mode='moderate'))
    #callbacks=early_stopper(params['epochs'], mode='strict'))#noisy_train, train, batch_size=128, epochs=params['epochs'], verbose=1,
    return out, model
Exemple #14
0
def iris_model(x_train, y_train, x_val, y_val, params):

    model = Sequential()
    model.add(Dense(32, input_dim=4, activation=params['activation']))
    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['losses'])
    # metrics=['acc'])

    out = model.fit(x_train,
                    y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    validation_data=[x_val, y_val],
                    verbose=0)

    return out, model
Exemple #15
0
def deletion_model_aug(x_train, y_train, x_val, y_val, params):
    if model_type == "VGG16":
        base_model = VGG16(weights='imagenet', include_top=False)
    elif model_type == "Res50":
        base_model = ResNet50(weights='imagenet', include_top=False)
    else:
        base_model = InceptionV3(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='sigmoid')(x)
    x = Dense(1, activation='sigmoid')(x)
    model = Model(inputs=base_model.input, outputs=x)
    # train the top added layers first, keep the VGG16 layers frozen
    # we can relax this restriction after the last layer is trained
    for layer in base_model.layers:
        layer.trainable = False
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=binary_crossentropy,
                  metrics=['accuracy', f1score, precision, recall])
    # Perform Data Augmentation while training
    batch_size = params['batch_size']
    out = model.fit_generator(train_generator.flow(x_train,
                                                   y_train,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                              steps_per_epoch=x_train.shape[0] // batch_size,
                              epochs=params['epochs'],
                              validation_data=val_generator.flow(
                                  x_val,
                                  y_val,
                                  batch_size=batch_size,
                                  shuffle=False),
                              validation_steps=x_val.shape[0] // batch_size,
                              class_weight=get_class_weights(y_train)
                              if params['class_weights'] else None,
                              callbacks=[
                                  EarlyStopping(monitor="val_f1score",
                                                patience=7,
                                                min_delta=0.001,
                                                mode='max')
                              ])
    return out, model
Exemple #16
0
def import_model(X_train, y_train, X_val, y_val, params):

    #Load model architecture
    model = get_model(X_train,
                      y_train,
                      params['dense_one'],
                      params['dense_two'],
                      params['dense_three'],
                      params['activation'],
                      params['last_activation'],
                      initialize=True)

    #Add multi-gpu support if available
    if get_available_gpus() > 1:
        model = multi_gpu(model)

    #Compile model
    model.compile(loss=params['losses'],
                  metrics=[
                      'mean_squared_error', 'mean_absolute_error',
                      'mean_absolute_percentage_error'
                  ],
                  optimizer=params['optimizer'](
                      lr=lr_normalizer(params['lr'], params['optimizer'])))

    #Set early stopping
    earlystop = EarlyStopping(monitor='categorical_accuracy',
                              min_delta=0.1,
                              patience=9,
                              mode='auto')

    #Train model
    out = model.fit(X_train,
                    y_train,
                    batch_size=params['batch_size'],
                    epochs=params['epochs'],
                    validation_data=[X_val, y_val],
                    verbose=0,
                    callbacks=[earlystop])

    return out, model
def emotions_model(dummyXtrain, dummyYtrain, dummyXval, dummyYval, params):

    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               kernel_initializer=params['kernel_initializer']))
    model.add(Activation(params['activation_1']))
    model.add(Conv2D(params['neurons_layer_2'], (3, 3)))
    model.add(Activation(params['activation_2']))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(params['dropout']))
    model.add(Conv2D(params['neurons_layer_3'], (3, 3), padding='same'))
    model.add(Activation(params['activation_3']))
    model.add(Conv2D(params['neurons_layer_4'], (3, 3)))
    model.add(Activation(params['activation_4']))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(params['dropout']))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation(params['activation_5']))
    model.add(Dropout(params['dropout']))
    model.add(Dense(7, activation=params['last_activation']))
    model.compile(optimizer=params['optimizer'](
        lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['loss'],
                  metrics=['accuracy'])

    history = model.fit(dummyXtrain,
                        dummyYtrain,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        validation_data=(dummyXval, dummyYval),
                        callbacks=[
                            ModelCheckpoint("conv2d_mwilchek.hdf5",
                                            monitor="val_loss",
                                            save_best_only=True),
                            early_stopper(params['epochs'], mode='strict')
                        ])

    return history, model
Exemple #18
0
    def hyperparameter_Scan(self,normed_train_dataset, train_labels, test_data, test_labels, param):
    #patience_model, nOutput, nNodes=16, actFn='relu', EPOCHS=100,learnRate=0.001,momentumN=0.0,lastActFn='linear',regCons=0.0,doRate=0.0):
        input_layer = Input(shape=(len(normed_train_dataset.keys()),))
        dense_1 = Dense(param['nNodes'], activation=param["activation"],activity_regularizer=regularizers.l2(param["regCons"]))(input_layer)
        # dense_1 = Dropout(param["dropRate"])(dense_1)
        dense_2 = Dense(param['nNodes'], activation=param["activation"],activity_regularizer=regularizers.l2(param["regCons"]))(dense_1)
        dense_2 = Dropout(param["dropRate"])(dense_2)
        dense_3 = Dense(param['nNodes'], activation=param["activation"], activity_regularizer=regularizers.l2(param["regCons"]))(dense_2)
        # dense_3 = Dropout(param["dropRate"])(dense_3)
        dense_4 = Dense(param['nNodes'], activation=param["activation"],activity_regularizer=regularizers.l2(param["regCons"]))(dense_3)
        dense_4 = Dropout(param["dropRate"])(dense_4)
        # separating outputs for density and GSD for custom loss function
        output_1 = Dense(1,activation=param["last_activation"],name='Density', activity_regularizer=regularizers.l2(param["regCons"]))(dense_4)
        # output_2 = Dense(8,activation='sigmoid',name='out2',activity_regularizer=regularizers.l1(regCons))(dense_4)
        output_2 = Dense(8,activation=param["last_activation"],name='GSD', activity_regularizer=regularizers.l2(param["regCons"]))(dense_4)
        
        train_labels_copy = pd.DataFrame.copy(train_labels)
        label1 = train_labels['Granule_density']
        labels = ['Bin1','Bin2','Bin3','Bin4','Bin5','Bin6','Bin7','Coarse']
        
        # label2 = ['Bin1','Bin2','Bin3','Bin4','Bin5','Bin6','Bin7','Coarse']
        label2 = pd.DataFrame([train_labels_copy.pop(i) for i in labels]).T
        model = Model(inputs=[input_layer], outputs=[output_1,output_2])
        
        model.compile(optimizer=param['optimizer'](lr=lr_normalizer(param['learningRate'],param['optimizer'])),loss=self.lossFunc_DensityStde(output_1), metrics = ['mae','mse'])
        # model.compile(optimizer=Adam(learning_rate=0.0001,beta_1=0.1,beta_2=0.2,epsilon=0.001,amsgrad=True),loss=self.lossFunc_DensityStde(output_1), metrics = ['mae','mse'])
        
        w1 = np.full(len(self.normed_train_dataset),1)
        w2 = np.full(len(self.normed_train_dataset),1)
        # print(model.summary())
        test_label_copy = pd.DataFrame.copy(test_labels)
        tlabel1 = test_labels['Granule_density']
        tlabel2 = pd.DataFrame([test_label_copy.pop(i) for i in labels]).T

        out = model.fit(normed_train_dataset, [label1, label2], epochs=param['epochs'], 
                            verbose=0, validation_data=[test_data,[tlabel1,tlabel2]],sample_weight={'Density': w1, 'GSD':w2},use_multiprocessing=True)
        

        # for layer in model.layers: print(layer.get_config(), layer.get_weights())
        return out, model
    def autoFNN(X_train, Y_train, X_val, Y_val, params):
        model = Sequential()
        model.add(
            Embedding(params['vocab_size'],
                      params['e_size'],
                      input_length=params['seq_len']))
        model.add(Flatten())
        hidden_layers(model, params, 1)
        model.add(Dense(1, activation=params['last_activation']))

        model.compile(optimizer=params['optimizer'](lr_normalizer(
            params['lr'], params['optimizer'])),
                      loss='binary_crossentropy',
                      metrics=['acc'])

        out = model.fit(X_train,
                        Y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        validation_data=[X_val, Y_val],
                        verbose=2)

        return out, model
def build_model(x_train, y_train, x_val, y_val, params):

    model = keras.Sequential()
    model.add(keras.layers.Dense(10, activation=params['activation'],
                                 input_dim=x_train.shape[1],
                                 use_bias=True,
                                 kernel_initializer='glorot_uniform',
                                 bias_initializer='zeros',
                                 kernel_regularizer=keras.regularizers.l1_l2(l1=params['l1'], l2=params['l2']),
                                 bias_regularizer=None))

    model.add(keras.layers.Dropout(params['dropout']))

    # If we want to also test for number of layers and shapes, that's possible
    hidden_layers(model, params, 1)

    # Then we finish again with completely standard Keras way
    model.add(keras.layers.Dense(1, activation=params['activation'], use_bias=True,
                                 kernel_initializer='glorot_uniform',
                                 bias_initializer='zeros',
                                 kernel_regularizer=keras.regularizers.l1_l2(l1=params['l1'], l2=params['l2']),
                                 bias_regularizer=None))

    model.compile(optimizer=params['optimizer'](lr=lr_normalizer(params['lr'], params['optimizer'])),
                  loss=params['losses'],
                  metrics=['mse'])

    history = model.fit(x_train, y_train,
                        validation_data=[x_val, y_val],
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        callbacks=[early_stopper(epochs=params['epochs'], mode='moderate')],
                        #callbacks=[early_stopper(epochs=params['epochs'], mode='strict')],
                        verbose=0)

    # Finally we have to make sure that history object and model are returned
    return history, model
Exemple #21
0
def smallRNN(x_train, y_train, x_test, y_test, params):

    #HIDDEN_UNITS = 5

    model = Sequential()

    model.add(
        Bidirectional(LSTM(params["hidden_unit"],
                           activation='tanh',
                           inner_activation='sigmoid',
                           kernel_constraint=maxnorm(2),
                           kernel_initializer=KERNEL_INITIAL,
                           return_sequences=True,
                           dropout=0.3),
                      input_shape=x_train.shape[1:],
                      merge_mode='concat'))

    #model.add(AveragePooling1D(pool_size=400,strides=400))

    #model.add(Bidirectional(LSTM(HIDDEN_UNITS,activation='tanh',inner_activation='sigmoid',kernel_constraint = maxnorm(2),kernel_initializer=KERNEL_INITIAL,return_sequences=True),input_shape=x_train.shape[1:],merge_mode='concat'))

    #model.add(AveragePooling1D(pool_size=400,strides=400))

    model.add(Flatten())

    model.add(Dense(1))

    #a soft max classifier

    model.add(Activation("sigmoid"))

    #model.compile(loss=LOSS, optimizer = params["optimizer"](lr_normalizer(params["lr"], params['optimizer'])), metrics =METRICS)

    filepath = "SmallRNN" + str(params["hidden_unit"]) + "best_smallRNN.hdf5"

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True)

    early_stopping_monitor = EarlyStopping(monitor='val_loss', patience=8)

    model.compile(loss=LOSS,
                  optimizer=params["optimizer"](lr_normalizer(
                      params["lr"], params['optimizer'])),
                  metrics=METRICS)

    print(model.summary())

    out = model.fit(
        x_train,
        y_train,
        batch_size=BATCH_SIZE,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_split=0.2,
        callbacks=[checkpoint, early_stopping_monitor]
    )  #,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    ## if you want early stopping

    #Tuning = model.fit(Train_Predictors,Train_class,batch_size=BATCH_SIZE, epochs = NB_EPOCH, verbose = VERBOSE,

    #validation_split = VALIDATION_SPLIT,callbacks=[early_stopping_monitor,checkpoint])

    #finalModel = load_model(filepath)

    #deepPredict(finalModel,Test_Predictors,Test_class)

    return out, model  #,roc_train,roc_test,acc_train,acc_test
Exemple #22
0
def miniCNN_RNN(x_train, y_train, x_test, y_test, params):

    model = Sequential()

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2),
               input_shape=x_train.shape[1:3]))

    model.add(Activation("relu"))

    model.add(MaxPooling1D(pool_size=10, strides=10))

    #model.add(GlobalMaxPooling1D())

    #model.add(Flatten())

    #model.add(Reshape((50,1))) # shape becomes (batch_size,200,1)

    model.add(
        Bidirectional(
            LSTM(params["hidden_unit"],
                 activation='tanh',
                 inner_activation='sigmoid',
                 kernel_constraint=maxnorm(2),
                 kernel_initializer=KERNEL_INITIAL,
                 return_sequences=True,
                 dropout=0.3)))

    #model.add(GlobalMaxPooling1D())

    model.add(Flatten())

    #model.add(Dense(25))

    #model.add(Activation("relu"))

    #model.add(GlobalMaxPooling1D())

    model.add(Dense(1))

    #a soft max classifier

    model.add(Activation("sigmoid"))

    model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=METRICS)

    #filepath="largeRNN_dropout_"+str(DROP_OUT)+"_{epoch:02d}_{val_acc:.2f}.hdf5"

    filepath = "best_smallCNN_RNN_dropout_" + str(
        params["hidden_unit"]) + ".hdf5"

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    early_stopping_monitor = EarlyStopping(monitor='val_loss', patience=8)

    #callbacks_list = [checkpoint]

    model.compile(loss=LOSS,
                  optimizer=params["optimizer"](lr_normalizer(
                      params["lr"], params['optimizer'])),
                  metrics=METRICS)

    print(model.summary())

    #Tuning = model.fit(x_train,y_train,batch_size=BATCH_SIZE, epochs = NB_EPOCH, verbose = VERBOSE,

    #                        validation_split = 0.2) #(x_test,y_test),callbacks=[checkpoint,early_stopping_monitor,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    out = model.fit(
        x_train,
        y_train,
        batch_size=BATCH_SIZE,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_split=VALIDATION_SPLIT,
        callbacks=[checkpoint, early_stopping_monitor]
    )  #,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    return out, model
Exemple #23
0
    def _create_input_model(self, x_train, y_train, x_val, y_val, params):

        import wrangle as wr

        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Dropout, Flatten
        from tensorflow.keras.layers import LSTM, Conv1D, SimpleRNN, Dense, Bidirectional

        model = Sequential()

        if params['network'] != 'dense':
            x_train = wr.array_reshape_conv1d(x_train)
            x_val = wr.array_reshape_conv1d(x_val)

        if params['network'] == 'conv1d':
            model.add(Conv1D(params['first_neuron'], x_train.shape[1]))
            model.add(Flatten())

        elif params['network'] == 'lstm':
            model.add(LSTM(params['first_neuron']))

        if params['network'] == 'bidirectional_lstm':
            model.add(Bidirectional(LSTM(params['first_neuron'])))

        elif params['network'] == 'simplernn':
            model.add(SimpleRNN(params['first_neuron']))

        elif params['network'] == 'dense':
            model.add(
                Dense(params['first_neuron'],
                      input_dim=x_train.shape[1],
                      activation='relu',
                      kernel_initializer=params['kernel_initializer']))

        model.add(Dropout(params['dropout']))

        # add hidden layers to the model
        from talos.model.hidden_layers import hidden_layers
        hidden_layers(model, params, 1)

        # get the right activation and last_neuron based on task
        from talos.model.output_layer import output_layer
        activation, last_neuron = output_layer(self.task,
                                               params['last_activation'],
                                               y_train, y_val)

        model.add(
            Dense(last_neuron,
                  activation=activation,
                  kernel_initializer=params['kernel_initializer']))

        # bundle the optimizer with learning rate changes
        from talos.model.normalizers import lr_normalizer
        optimizer = params['optimizer'](
            lr=lr_normalizer(params['lr'], params['optimizer']))

        # compile the model
        model.compile(optimizer=optimizer,
                      loss=params['losses'],
                      metrics=self.metrics)

        # fit the model
        out = model.fit(
            x_train,
            y_train,
            batch_size=params['batch_size'],
            epochs=params['epochs'],
            verbose=0,
            callbacks=[self.callback(self.experiment_name, params)],
            validation_data=[x_val, y_val])

        # pass the output to Talos
        return out, model
Exemple #24
0
def largeRNN(x_train, y_train, x_test, y_test, params):

    HIDDEN_UNITS = 20

    model = Sequential()

    model.add(
        LSTM(params["hidden_unit"],
             activation='tanh',
             inner_activation='sigmoid',
             kernel_constraint=maxnorm(2),
             kernel_initializer=KERNEL_INITIAL,
             return_sequences=True,
             dropout=0.3,
             input_shape=x_train.shape[1:]))

    model.add(MaxPooling1D(pool_size=10, strides=5))

    model.add(
        LSTM(params["hidden_unit"],
             activation='tanh',
             inner_activation='sigmoid',
             kernel_constraint=maxnorm(2),
             kernel_initializer=KERNEL_INITIAL,
             return_sequences=True,
             dropout=0.3))

    model.add(
        LSTM(params["hidden_unit"],
             activation='tanh',
             inner_activation='sigmoid',
             kernel_constraint=maxnorm(2),
             kernel_initializer=KERNEL_INITIAL,
             return_sequences=True,
             input_shape=x_train.shape[1:]))

    model.add(MaxPooling1D(pool_size=10, strides=5))

    model.add(
        LSTM(params["hidden_unit"],
             activation='tanh',
             inner_activation='sigmoid',
             kernel_constraint=maxnorm(2),
             kernel_initializer=KERNEL_INITIAL,
             return_sequences=True))

    model.add(Flatten())

    model.add(Dense(1))

    #a soft max classifier

    model.add(Activation("sigmoid"))

    model.compile(loss=LOSS,
                  optimizer=params["optimizer"](lr_normalizer(
                      params["lr"], params['optimizer'])),
                  metrics=METRICS)

    #filepath="largeRNN_dropout_"+str(DROP_OUT)+"_{epoch:02d}_{val_acc:.2f}.hdf5"

    filepath = "largeRNN" + "best_largeRNN_dropout_" + str(
        params["hidden_unit"]) + ".hdf5"

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    early_stopping_monitor = EarlyStopping(monitor='val_loss', patience=8)

    #callbacks_list = [checkpoint]

    #model.compile(loss=LOSS, optimizer = 'Adam', metrics =METRICS)

    print(model.summary())

    #Tuning = model.fit(x_train,y_train,batch_size=BATCH_SIZE, epochs = NB_EPOCH, verbose = VERBOSE,

    #                        validation_split = 0.2) #(x_test,y_test),callbacks=[checkpoint,early_stopping_monitor,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    out = model.fit(
        x_train,
        y_train,
        batch_size=BATCH_SIZE,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_data=(x_test, y_test),
        callbacks=[checkpoint, early_stopping_monitor]
    )  #,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    print(model.summary())

    return out, model
Exemple #25
0
def verylargeCNN(x_train, y_train, x_test, y_test, params):

    model = Sequential()

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2),
               input_shape=x_train.shape[1:3]))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.3))

    model.add(MaxPooling1D())

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(MaxPooling1D())

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(MaxPooling1D())

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(
        Conv1D(params["num_kernel"],
               kernel_size=params["kernel_size"],
               kernel_initializer=KERNEL_INITIAL,
               kernel_constraint=maxnorm(2)))

    model.add(Activation("relu"))

    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    model.add(GlobalMaxPooling1D())

    model.add(Dense(1))

    model.add(Activation("sigmoid"))

    filepath = "VerylargeCNN" + "best_veryLargeCNN_dropout_" + str(
        params["kernel_size"]) + ".hdf5"

    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='auto')

    early_stopping_monitor = EarlyStopping(monitor='val_loss', patience=8)

    model.compile(loss=LOSS,
                  optimizer=params["optimizer"](lr_normalizer(
                      params["lr"], params['optimizer'])),
                  metrics=METRICS)

    print(model.summary())

    out = model.fit(
        x_train,
        y_train,
        batch_size=BATCH_SIZE,
        epochs=NB_EPOCH,
        verbose=VERBOSE,
        validation_split=0.2,
        callbacks=[checkpoint, early_stopping_monitor]
    )  #,roc_callback(training_data=(x_train, y_train),validation_data=(x_test, y_test))]) #,callbacks=callbacks_list)

    #finalmodel = load_model(filepath)

    return out, model  #,roc_train,roc_test,acc_train,acc_test
Exemple #26
0
    def _create_input_model(self, x_train, y_train, x_val, y_val, params):

        model = Sequential()

        if params['network'] != 'dense':
            x_train = array_reshape_conv1d(x_train)
            x_val = array_reshape_conv1d(x_val)

        if params['network'] == 'conv1d':
            model.add(Conv1D(params['first_neuron'], x_train.shape[1]))
            model.add(Flatten())

        elif params['network'] == 'lstm':
            model.add(LSTM(params['first_neuron']))

        if params['network'] == 'bidirectional_lstm':
            model.add(Bidirectional(LSTM(params['first_neuron'])))

        elif params['network'] == 'simplernn':
            model.add(SimpleRNN(params['first_neuron']))

        elif params['network'] == 'dense':
            model.add(Dense(params['first_neuron'],
                            input_dim=x_train.shape[1],
                            activation='relu'))

        model.add(Dropout(params['dropout']))

        # add hidden layers to the model
        hidden_layers(model, params, 1)

        # output layer (this is scetchy)
        try:
            last_neuron = y_train.shape[1]
        except IndexError:
            if len(np.unique(y_train)) == 2:
                last_neuron = 1
            else:
                last_neuron = len(np.unique(y_train))

        model.add(Dense(last_neuron,
                        activation=params['last_activation']))

        # bundle the optimizer with learning rate changes
        optimizer = params['optimizer'](lr=lr_normalizer(params['lr'],
                                                         params['optimizer']))

        # compile the model
        model.compile(optimizer=optimizer,
                      loss=params['losses'],
                      metrics=['acc'])

        # fit the model
        out = model.fit(x_train, y_train,
                        batch_size=params['batch_size'],
                        epochs=params['epochs'],
                        verbose=0,
                        validation_data=[x_val, y_val])

        # pass the output to Talos
        return out, model