Example #1
0
def create_model(train_gen, val_gen):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    base_model = Xception(include_top=False, weights='imagenet')

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024,
              activation='relu',
              kernel_regularizer=regularizers.l2({{uniform(0, 0.1)}}))(x)
    x = Dropout({{uniform(0, 1)}})(x)
    predictions = Dense(2, activation='softmax')(x)
    model = Model(input=base_model.input, output=predictions)

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer='adam')

    model.fit_generator(train_gen,
                        steps_per_epoch=1,
                        epochs=1,
                        verbose=2,
                        validation_data=val_gen)
    score, acc = model.evaluate_generator(val_gen, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #2
0
def create_model(X_train, Y_train, X_valid, Y_valid, X_test, Y_test):
    # hyperparams of the model
    n_layer1 = {{choice([128, 256, 512])}}
    n_layer2 = {{choice([128, 256, 512])}}
    dropout_1 = {{uniform(0, 0.5)}}
    dropout_2 = {{uniform(0, 0.5)}}
    optim = {{choice(['rmsprop', 'adam'])}}
    n_batch = {{choice([64, 128, 256])}}

    # an LSTM model that can learn character sequences
    model = Sequential()
    model.add(
        LSTM(n_layer1,
             input_shape=(X_train.shape[1], X_train.shape[2]),
             return_sequences=True))
    model.add(Dropout(dropout_1))
    model.add(LSTM(n_layer2))
    model.add(Dropout(dropout_2))
    model.add(Dense(Y_train.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=optim)

    # training the model
    result = model.fit(X_train,
                       Y_train,
                       batch_size=n_batch,
                       epochs=10,
                       verbose=2,
                       validation_data=(X_valid, Y_valid),
                       shuffle=True)

    validation_loss = np.amax(result.history['val_loss'])
    return {'loss': validation_loss, 'status': STATUS_OK, 'model': model}
Example #3
0
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(16384, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([16, 32, 64, 128, 256, 512])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if {{choice(['three', 'four'])}} == 'four':
        model.add(Dense(100))
        # We can also choose between complete sets of layers
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(2))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    result = model.fit(x_train,
                       y_train,
                       batch_size={{choice([30, 48, 64, 128])}},
                       epochs=8,
                       verbose=2,
                       validation_split=0.3)

    #get the highest validation accuracy of the training epochs
    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def keras_model(X_train, X_test, y_train, y_test):
    NUM_EPOCHS = 125
    BATCH_SIZE = 128
    
    inputs = Input(shape=(304, ))
    x = Dropout({{uniform(0.1, 0.5)}})(inputs)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
        
    predictions = Dense(1)(x)

    model = Model(inputs=[inputs], outputs=[predictions])

    model.compile(loss="mse", optimizer={{choice(["adam", "RMSprop"])}})

    model.fit(X_train, y_train,
              batch_size=BATCH_SIZE, nb_epoch=NUM_EPOCHS,
              verbose=2,
              validation_data=(X_test, y_test))

    score = model.evaluate(X_test, y_test, verbose=0)
    return {'loss': -score, 'status': STATUS_OK, 'model': model}
def model(X_train, Y_train, X_test, Y_test):

    model = Sequential()
    model.add(Dense({{choice([256, 512, 1024])}}, input_shape=(7, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if {{choice(['three', 'four'])}} == 'four':
        model.add(Dense({{choice([256, 512, 1024])}}))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['accuracy'])

    model.fit(X_train,
              Y_train,
              batch_size={{choice([5, 10, 15])}},
              nb_epoch=20,
              verbose=2,
              validation_split=0.3)
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def keras_model():
    from keras.regularizers import l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from keras.layers.recurrent import LSTM
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.layers.embeddings import Embedding
    from keras.regularizers import l1, activity_l1
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    LSTM_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    out_dim = 5
    model = Sequential()
    model.add(Embedding(input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length))
    model.add(LSTM(LSTM_size, activation = 'sigmoid'))
    model.add(Dense(Dense_size, activation = 'sigmoid',W_regularizer=l2({{uniform(0, 1)}}), activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation = 'linear',W_regularizer=l2({{uniform(0, 1)}}), activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='mse', optimizer= opt) # kalutera leei rmsprop o fchollet  enw  adam leei enas allos
    model.fit(train_data['features'], train_data['labels'], nb_epoch=50, show_accuracy=False, verbose=2)
    score = model.evaluate( validation_data['features'], validation_data['labels'])
    #score = model.evaluate( train_data['features'], train_data['labels'])
    return {'loss': score, 'status': STATUS_OK}
Example #7
0
def tuned_model(x_t, y_t, x_v, y_v):
    model = Sequential()
    model.add(
        Dense(72, input_dim=72, kernel_initializer='normal',
              activation='relu'))
    model.add(Dropout({{uniform(0.15, 0.3)}}))

    for i in range(1, {{choice([8, 10, 12])}}):
        model.add(
            Dense({{choice([36, 72])}},
                  kernel_initializer='normal',
                  activation='relu'))
        model.add(Dropout({{uniform(0.15, 0.3)}}))

    model.add(Dense(1, activation='relu'))

    model.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer='adam')

    model.fit(x_t,
              y_t,
              batch_size={{choice([8, 10, 15])}},
              epochs=100,
              verbose=2)
    score, acc = model.evaluate(x_v, y_v, verbose=0)
    print("Test accuracy: ", acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(1, 64, 64, 64)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())

    model.add(Dense(2))
    model.add(Activation('softmax'))

    #rms = RMSprop()
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    nb_epoch = 1
    batch_size = 50

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(trainX, trainY, valX, valY, vocab_size, GRU_config, report_folder_GRU, window_size):
    logger = logging.getLogger(__name__)


    contextEmbedding = Embedding(input_dim=vocab_size, output_dim={{choice([64, 128, 256])}},
                                 input_length=window_size)

    tensor = Input(shape=(window_size,))
    c = contextEmbedding(tensor)
    c = Dropout({{uniform(0, 0.5)}})(c)
    c = GRU({{choice([50, 100, 200])}}, recurrent_dropout={{uniform(0, 0.5)}})(c)
    c = Dropout({{uniform(0, 0.5)}})(c)
    c = Dense({{choice([70, 100, 200])}}, activation={{choice(['relu', 'elu', 'selu'])}})(c)
    c = Dropout({{uniform(0, 0.5)}})(c)

    answer = Dense(vocab_size, activation='softmax')(c)

    model = Model(tensor, answer)
    optimizer = Adam(lr={{choice([0.001, 3e-4])}})
    model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=['acc'])

    early_stopping = EarlyStopping(monitor='val_loss',
                                   mode= 'min',
                                   patience=5)

    model.fit(trainX, trainY,
              batch_size={{choice([64, 128])}},
              epochs={{choice([10, 15, 20, 30])}},
              verbose=2,
              validation_data=(valX, valY),
              callbacks=[early_stopping]
              )
    score, acc = model.evaluate(valX, valY, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #10
0
def create_model(x_train, y_train, x_valid, y_valid):
    model = Sequential()
    model.add(Dense(512, input_shape=(69, ), activation='relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(
        Dense({{choice([256, 512, 1024])}},
              activation={{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if {{choice(['three', 'four'])}} == 'four':
        model.add(
            Dense({{choice([128, 256])}},
                  activation={{choice(['relu', 'sigmoid'])}}))

    model.add(Dense(1))

    model.compile(optimizer={{choice(['rmsprop', 'adam', 'sgd'])}}, loss='mse')

    history = model.fit(x_train,
                        y_train,
                        batch_size={{choice([64, 128])}},
                        epochs={{choice([10, 15, 20])}},
                        validation_split=.1)

    validation_loss = np.amax(history.history['val_loss'])
    print('Best validation loss of epochs:', validation_loss)
    return {'loss': -validation_loss, 'status': STATUS_OK, 'model': model}
Example #11
0
def model(x_train, y_train, x_val, y_val, embedding_dim, maxlen, max_words):

    model = Sequential()
    model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
    model.add(Flatten())
    model.add(Dense({{choice([8, 16, 32, 64, 128])}}, activation='relu'))
    #model.add(layers.BatchNormalization())
    model.add(layers.Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([4, 8, 16, 32, 64])}}, activation='relu'))
    #model.add(layers.BatchNormalization())
    model.add(layers.Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([2, 4, 8, 16, 32])}}, activation='relu'))
    #model.add(layers.BatchNormalization())
    #model.add(layers.Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    #model.summary()

    model.compile(optimizer='adam',
        loss='binary_crossentropy',
        metrics=['accuracy'])

    model.fit(x_train, y_train,
        epochs=50,
        batch_size=32,
        verbose=2,
        validation_data=(x_val, y_val))
    #callbacks=callbacks_list,
    score, acc = model.evaluate(x_val, y_val, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #12
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train,
              Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
Example #13
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #14
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy',
                  optimizer=rms,
                  metrics=['acc'])

    model.fit(X_train,
              Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {
        'loss': -acc,
        'status': STATUS_OK,
        'model': model.to_yaml(),
        'weights': pickle.dumps(model.get_weights())
    }
def keras_model():
    from keras.models import Sequential
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from aiding_funcs.label_handling import MaxMin, myRMSE, MaxMinFit
    import pickle
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    mins, maxs = MaxMin(train_data['AV'])
    T_AV =  MaxMinFit(train_data['AV'], mins, maxs)
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size3 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    out_dim = 5
    model = Sequential()
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}),input_dim = train_data['AV'].shape[-1] ))
    model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='rmse', optimizer=opt)
    model.fit(T_AV, train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
    #score = model.evaluate( validation_data['features'], validation_data['labels'])
    score = model.evaluate( T_AV, train_data['labels'])
    print("score : " +str(score))
    return {'loss': score, 'status': STATUS_OK}
Example #16
0
def model(x_train, y_train, x_test, y_test):

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([128, 256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'fomv ur', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))

        # We can also choose between complete sets of layers

        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train, y_train,
              batch_size={{choice([128, 256])}},
              epochs=5,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #17
0
def create_model(x_train, y_train, x_val, y_val, x_test, y_test):
    ACTIONS_FILE = "sc.txt"
    # read action names
    ACTIONS = []
    for l in codecs.open(ACTIONS_FILE, "r").readlines():
        ACTIONS.append(l.split("\t")[1].replace("\n", "").replace("\r", ""))

    model = Sequential()
    # have several parameters to choose with meta-parametrization optimization
    model.add(
        LSTM({{choice([128, 256, 512])}}, input_shape=(None, len(ACTIONS))))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(len(ACTIONS)))
    model.add(Activation('softmax'))
    optimizer = Adam(lr={{uniform(0.001, 0.01)}})

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimizer)

    result = model.fit(x_train,
                       y_train,
                       batch_size={{choice([16, 32, 64])}},
                       epochs=10,
                       verbose=2,
                       shuffle=True,
                       validation_data=(x_val, y_val))
    # get the highest validation accuracy of the training epochs
    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
Example #18
0
def create_model(train_X, train_y, val_X, val_y):
    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true)))

    # design network
    model = Sequential()
    model.add(
        LSTM(units=128,
             return_sequences=True,
             input_shape=(train_X.shape[1], train_X.shape[2]),
             bias_regularizer=L1L2(l1=0.1, l2=0.05)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(LSTM(units=64))
    model.add(Dropout({{uniform(0, 1)}}))

    # model.add(Dense(16,init='uniform',activation='relu'))
    model.add(Dense({{choice([64, 32, 16, 8])}}))

    model.add(Dense(units=1))
    model.compile(optimizer={{choice(['adam'])}}, loss=root_mean_squared_error)
    # fit network
    history = model.fit(train_X,
                        train_y,
                        epochs=500,
                        batch_size={{choice([25])}},
                        verbose=2,
                        shuffle=False,
                        validation_split=0.1)
    #get the highest validation accuracy of the training epochs
    validation_acc = np.amin(history.history['val_loss'])
    print('Best validation loss of epoch:', validation_acc)
    return {'loss': validation_acc, 'status': STATUS_OK, 'model': model}
Example #19
0
def cnn_model():
    model = Sequential()

    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               input_shape=(3, 48, 48),
               activation='relu'))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(43, activation='softmax'))
    return model
Example #20
0
def create_model(X_trn, y_trn, X_val, y_val):
    model = Sequential()
    model.add(
        Dense({{choice([np.power(2, 5),
                        np.power(2, 6),
                        np.power(2, 7)])}},
              input_dim=X_trn.shape[1]))
    model.add(LeakyReLU(alpha={{uniform(0, 0.5)}}))
    model.add(Dropout({{uniform(0.5, 1)}}))
    model.add(
        Dense({{choice([np.power(2, 5),
                        np.power(2, 6),
                        np.power(2, 7)])}},
              input_dim=X_trn.shape[1]))
    model.add(LeakyReLU(alpha={{uniform(0, 0.5)}}))
    model.add(Dropout({{uniform(0.5, 1)}}))
    model.add(Dense(1, activation='sigmoid'))
    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_acc',
                                            factor=0.2,
                                            patience=5,
                                            min_lr=0.0001)
    model.compile(optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.fit(X_trn,
              y_trn,
              epochs={{choice([25, 50, 75, 100])}},
              batch_size={{choice([16, 32, 64])}},
              validation_data=(X_val, y_val),
              verbose=1,
              callbacks=[reduce_lr])
    score, acc = model.evaluate(X_val, y_val, verbose=0)
    print('Test accuracy:', acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model_creation(x_train, y_train, x_test, y_test):

    model = Sequential()
    model.add(
        Embedding(input_dim=210338,
                  output_dim={{choice([100, 150, 300, 500])}},
                  embeddings_initializer='uniform',
                  input_length=100))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(LSTM({{choice([150, 256, 512, 1024])}}, return_sequences=True))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(LSTM({{choice([150, 256, 512, 1024])}}, return_sequences=False))
    model.add(Dropout({{uniform(0, 1)}}))

    #if conditional({{choice(['three', 'four'])}}) == 'three':
    #    model.add(LSTM(1024, return_sequences=False))
    #    model.add(Dropout(0.32))

    model.add(Dense(6, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
    print(model.summary())

    model.fit(x_train,
              y_train,
              epochs=1,
              batch_size={{choice([32, 64, 128, 256, 512])}})
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy: ', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #22
0
def model(X_train, Y_train, X_test, Y_test):
	model = models.Sequential()
	f = {{choice([[16, 16, 32, 32, 32], [32, 32, 64, 64, 64], [16, 32, 32, 64, 64]])}}
	model.add(layers.Conv2D(f[0], {{choice([(3, 3), (5, 5)])}}, padding='same', activation='relu', input_shape=(28, 28, 1)))
	model.add(layers.Conv2D(f[1], {{choice([(3, 3), (5, 5)])}}, padding='same', activation='relu'))
	model.add(layers.MaxPooling2D((2, 2)))
	model.add(layers.Dropout({{uniform(0, 1)}}, seed=7382))
	model.add(layers.Conv2D(f[2], {{choice([(3, 3), (5, 5)])}}, padding='same', activation='relu'))
	model.add(layers.Conv2D(f[3], {{choice([(3, 3), (5, 5)])}}, padding='same', activation='relu'))
	model.add(layers.Conv2D(f[4], {{choice([(3, 3), (5, 5)])}}, padding='same', activation='relu'))
	model.add(layers.BatchNormalization())
	model.add(layers.MaxPooling2D((2, 2)))
	model.add(layers.Dropout({{uniform(0, 1)}}, seed=7382))

	model.add(layers.Flatten())
	model.add(layers.Dense({{choice([64, 84, 128])}}, activation='relu'))
	model.add(layers.BatchNormalization())
	model.add(layers.Dropout({{uniform(0, 1)}}))
	model.add(layers.Dense(10, activation='softmax'))

	model.compile(optimizer='adam',
				  loss='sparse_categorical_crossentropy',
				  metrics=['accuracy'])

	model.fit(X_train, Y_train,
			  batch_size={{choice([None, 64, 128])}},
			  epochs=10,
			  verbose=2,
			  validation_data=(X_test, Y_test),
			  callbacks=[WandbCallback()])

	score, acc = model.evaluate(X_test, Y_test, verbose=0)
	print('Test accuracy:', acc)
	return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #23
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
Example #24
0
def create_model(X_train, y_train, X_val, y_val):
    model = Sequential()
    embedding_vector_length = 500
    model.add(
        layers.Embedding(vocab_size,
                         embedding_vector_length,
                         input_length=max_sequence_length))
    model.add(
        layers.LSTM(
            units={{choice([np.power(2, 5),
                            np.power(2, 6),
                            np.power(2, 7)])}},
            dropout={{uniform(0.5, 1)}},
            recurrent_dropout={{uniform(0.5, 1)}}))
    model.add(
        Dense(units=10, activation="softmax", kernel_initializer="uniform"))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                            factor=0.2,
                                            patience=5,
                                            min_lr=0.001)
    model.fit(X_train,
              y_train,
              epochs={{choice([25, 50, 75, 100])}},
              batch_size={{choice([16, 32, 64])}},
              validation_data=(X_val, y_val),
              callbacks=[reduce_lr])
    score, acc = model.evaluate(X_val, y_val, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(train_X, test_X, train_y, test_y):
    model = Sequential()
    model.add(Dense(500, input_shape=(60,),kernel_initializer= {{choice(['glorot_uniform','random_uniform'])}}))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None))
    model.add(Activation({{choice(['relu','sigmoid','tanh'])}}))
    model.add(Dropout({{uniform(0, 0.3)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 0.4)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation({{choice(['relu','tanh'])}}))
    model.add(Dropout(0.3))

    model.add(Dense(41))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam'])}})
    model.summary()
    early_stops = EarlyStopping(patience=3, monitor='val_acc')
    ckpt_callback = ModelCheckpoint('keras_model', 
                                 monitor='val_loss', 
                                 verbose=1, 
                                 save_best_only=True, 
                                 mode='auto')

    model.fit(train_X, train_y, batch_size={{choice([128,264])}}, nb_epoch={{choice([10,20])}}, validation_data=(test_X, test_y), callbacks=[early_stops,ckpt_callback])
    score, acc = model.evaluate(test_X, test_y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #26
0
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(
        LSTM(units={{choice([16, 32, 64, 128, 256])}},
             input_shape=(x_train.shape[1], x_train.shape[2])))

    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(units={{choice([2, 4, 8, 16, 32, 64, 128, 256])}}))
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(1))

    model.compile(
        loss='mae',  # because mse gives me NaN's some times
        optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train,
              y_train,
              batch_size={{choice([8, 16, 32, 64, 128])}},
              epochs=50,
              verbose=0,
              shuffle=False,
              validation_data=(x_test, y_test))
    mae = model.evaluate(x_test, y_test, verbose=0)
    print('MAE:', mae)
    return {'loss': mae, 'status': STATUS_OK, 'model': model}
Example #27
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy',
                  optimizer=rms,
                  metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #28
0
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(50, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([20, 30, 40])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy',
                  optimizer=rms,
                  metrics=['accuracy'])

    model.fit(X_train,
              Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #29
0
def model(x_train, y_train, x_test, y_test):
    # 25 > 10 (relu) dropout > 10 (relu) dropout > 1 (relu)
    model = Sequential()
    model.add(
        Dense(output_dim={{
            choice([5, 10, 15, 20, 25, 30, 50, 75, 100, 200, 500])
        }},
              input_dim=x_train.shape[1]))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh', 'linear'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['extra-layer', 'no'])}}) == 'extra-layer':
        model.add(
            Dense(output_dim={{
                choice([5, 10, 15, 20, 25, 30, 50, 75, 100, 200, 500])
            }}))
        model.add(Activation({{choice(['relu', 'sigmoid', 'tanh',
                                       'linear'])}}))
        model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(output_dim=1))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh', 'linear'])}}))

    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=10,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(train_X, test_X, train_y, test_y):
    model = Sequential()
    model.add(Dense(500, input_shape=(238,),kernel_initializer= {{choice(['glorot_uniform','random_uniform'])}}))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None))
    model.add(Activation({{choice(['relu','sigmoid','tanh'])}}))
    model.add(Dropout({{uniform(0, 0.3)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 0.4)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation({{choice(['relu','tanh'])}}))
    model.add(Dropout(0.3))

    model.add(Dense(41))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam'])}})
    model.summary()
    early_stops = EarlyStopping(patience=3, monitor='val_acc')
    ckpt_callback = ModelCheckpoint('keras_model', 
                                 monitor='val_loss', 
                                 verbose=1, 
                                 save_best_only=True, 
                                 mode='auto')

    model.fit(train_X, train_y, batch_size={{choice([128,264])}}, nb_epoch={{choice([10,20])}}, validation_data=(test_X, test_y), callbacks=[early_stops,ckpt_callback])
    score, acc = model.evaluate(test_X, test_y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def new_model(bendata, benlabel, benmir, benslabel):

    n = len(bendata[0])
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    model = Sequential()
    model.add(Dense({{choice([32, 64, 128, 256, 512])}}, input_shape=(n, )))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    #model.add({{choice([Dropout(0.1), Dropout(0.2), Dropout(0.3)])}})
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if {{choice(['two', 'three'])}} == 'three':
        model.add(Dense({{choice([32, 64, 128, 256, 512])}}))
        model.add(Activation({{choice(['relu', 'sigmoid'])}}))
        #model.add({{choice([Dropout(0.1), Dropout(0.2), Dropout(0.3)])}})
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(1))
    model.add(Activation({{choice(['sigmoid', 'softmax', 'linear'])}}))

    adam = Adam(lr={{choice([10**-4, 10**-3, 10**-2, 10**-1])}})
    rmsprop = RMSprop(lr={{choice([10**-4, 10**-3, 10**-2, 10**-1])}})
    sgd = SGD(lr={{choice([10**-4, 10**-3, 10**-2, 10**-1])}})

    chosen_par = {{choice(['adam', 'rmsprop', 'sgd'])}}
    if chosen_par == 'adam':
        optm = adam
    elif chosen_par == 'rmsprop':
        optm = rmsprop
    else:
        optm = sgd

    model.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optm)

    result = model.fit(
        bendata,
        benlabel,
        batch_size={{choice([32, 64, 128])}},
        epochs={{choice([2, 4, 10, 20])}},
        #epochs=2,
        verbose=2,
        validation_split=0.1)
    #get the highest validation accuracy of the training epochs
    validation_acc = np.amax(result.history['val_acc'])
    print('Best test accuracy of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
Example #32
0
def model(train, test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(666,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(666))
    model.add(Activation('linear'))

    rms = RMSprop()
    model.compile(loss='mse', optimizer=rms, metrics=['mse'])

    model.fit(train, train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2
              )
    score, mse = model.evaluate(test, test, verbose=0)
    print('Test accuracy for new:', mse)
    return {'loss': mse, 'status': STATUS_OK, 'model': model}
Example #33
0
def model(X_train, X_test, y_train, y_test):
    model = Sequential()
    model.add(con_model())
    model.add(Conv2D({{choice([64, 128, 256, 512])}}, (3, 3), activation='relu', padding='same', name="block_converge_2"))  # ,input_shape=(IMG_SIZE, IMG_SIZE, 3)))
    model.add(Dropout({{uniform(0,0.5)}}))
    model.add(Conv2D({{choice([64, 128, 256, 512])}}, (3, 3), activation='relu', padding='same', name="block_converge_3"))
    model.add(Dropout({{uniform(0, 0.5)}}))
    model.add(Conv2D({{choice([64, 128, 256, 512])}}, (3, 3), activation='relu', padding='same', name="block_converge_4"))
    model.add(Dropout({{uniform(0, 0.5)}}))
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(2, activation='softmax', name="block_converge_5k"))

    sgd = SGD(lr=1e-4, decay={{choice([1e-4, 1e-5, 1e-6])}}, momentum={{uniform(0, 0.9)}}, nesterov=True)

    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])

    model.fit(
        [X_train[0], X_train[1]],
        y_train,
        batch_size=16,
        epochs=30,
        validation_data=( [X_test[0], X_test[1]], y_test))

    score, acc = model.evaluate([X_test[0], X_test[1]], y_test, verbose=0)
    print('Test score:', score)
    print('Test accuracy:', acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(x_train, y_train, x_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    model = Sequential()
    model.add(Dense({{choice([8, 16, 32])}}, input_shape=(19,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([8, 16, 32])}}))
    model.add(Activation({{choice(['relu', 'linear'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mae', optimizer="adam", metrics=['mae'])

    result = model.fit(x_train, y_train,
              batch_size={{choice([4,8,16,32, 64, 128])}},
              epochs=50,
              verbose=1,
               validation_data=(x_test, y_test))

    #get the highest validation accuracy of the training epochs
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def nn(X, Y):
    model = Sequential()
    M = Masking(mask_value=0, input_shape=(54, 1))
    model.add(M)
    model.add(LSTM(500, input_shape=(None, 1), activation="tanh"))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'tanh', 'softmax'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'tanh', 'softmax'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer={{choice(['rmsprop', 'adam', 'adadelta'])}},
                  loss='binary_crossentropy',
                  metrics=['accuracy'])


    # Debugging: Predict training examples themselves (since no validation split)...
    num_examples = 1
    model.fit(X[:num_examples, :, :], Y[:num_examples], epochs=1, batch_size=32)

    # for i in range(NUM_EXAMPLES):
    #     print(model.predict_classes(X[i].reshape(1, 54, 1)))

    score, acc = model.evaluate(X[:num_examples, :, :], Y[:num_examples], verbose=0)
    print('Test score:', score)
    print('Test accuracy:', acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #36
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #37
0
def model(data, labels, val_data, val_labels):

    model = Sequential()
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(10))

    # Prior precision lower and upper bounds are from appendix I.1 tests

    optimizer = Vadam(lr={{uniform(1e-4, 1e-2)}},
                      prior_prec={{uniform(1e-2, 25)}},
                      train_set_size=1000)

    model.compile(loss='kld',
                  metrics=['mae'],
                  optimizer=optimizer)

        result = model.fit(data,
                           labels,
                           epochs=10,
                           batch_size=32,
                           validation_data=(val_data, val_labels))

        # get the lowest validation loss of each training epoch
        validation_loss = np.amin(result.history['val_loss'])
        print('Best loss of epoch:', validation_loss)

        return {'loss': validation_loss, 'status': STATUS_OK, 'model': model}
def model(depnet_feat_dev1, depnet_feat_dev2, depnet_feat_val, img_feat_dev1, img_feat_dev2, img_feat_val, q_dev1, q_dev2, q_val, a_dev1, a_dev2, a_val, qdict_dev1, adict_dev1):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Lambda, Dense, Activation, Merge, Dropout, Reshape
    from keras.callbacks import EarlyStopping, ModelCheckpoint
    import keras.backend as K
    import os

    path2outdir = os.environ.get('OUTDIR', 'no')

    vocab_size = len(qdict_dev1)
    nb_ans = len(adict_dev1) - 1

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init={{choice(['uniform', 'normal', 'glorot_uniform', 'glorot_normal', 'he_normal', 'he_uniform'])}},
                              mask_zero=False, dropout={{uniform(0,1)}}
                              )
                    )
    quest_model.add(Lambda(function=lambda x: K.sum(x, axis=1), output_shape=lambda shape: (shape[0], ) + shape[2:]))

    nb_img_feat = img_feat_dev1.shape[1]
    img_model = Sequential()
    img_model.add(Reshape((nb_img_feat, ), input_shape=(nb_img_feat, )))

    nb_depnet_feat = depnet_feat_dev1.shape[1]
    depnet_model = Sequential()
    depnet_model.add(Reshape((nb_depnet_feat, ), input_shape=(nb_depnet_feat, )))

    multimodal = Sequential()
    multimodal.add(Merge([img_model, depnet_model, quest_model], mode='concat', concat_axis=1))
    multimodal.add(Dropout({{uniform(0, 1)}}))
    multimodal.add(Dense(nb_ans))
    multimodal.add(Activation('softmax'))

    multimodal.compile(loss='categorical_crossentropy',
                       optimizer={{choice(['sgd', 'adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                       metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath=os.path.join(path2outdir, 'cnn_bow_weights.hdf5'), verbose=1, save_best_only=True)
    multimodal.fit([img_feat_dev1, depnet_feat_dev1, q_dev1], a_dev1, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                   validation_data=([img_feat_dev2, depnet_feat_dev2, q_dev2], a_dev2),
                   callbacks=[early_stopping, checkpointer])
    multimodal.load_weights(os.path.join(path2outdir, 'cnn_bow_weights.hdf5'))
    score, acc = multimodal.evaluate([img_feat_val, depnet_feat_val, q_val], a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': multimodal}
def model(train_X, train_Y, test_X, test_Y):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    
    model = Sequential()
    model.add(Dense(500,input_shape=(train_X.shape[1],)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))
    

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(500))
        # We can also choose between complete sets of layers
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(train_Y.shape[1]))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(train_X, train_Y,
              batch_size={{choice([128, 256])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(test_X, test_Y))
    score, acc = model.evaluate(test_X, test_Y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #40
0
def model(datagen, X_train, Y_train, X_test, Y_test):
    batch_size = 32
    nb_epoch = 200

    # input image dimensions
    img_rows, img_cols = 32, 32
    # the CIFAR10 images are RGB
    img_channels = 3

    model = Sequential()

    model.add(Convolution2D(32, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Convolution2D(64, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    # fit the model on the batches generated by datagen.flow()
    model.fit_generator(datagen.flow(X_train, Y_train,
                        batch_size=batch_size),
                        samples_per_epoch=X_train.shape[0],
                        nb_epoch=nb_epoch,
                        validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #41
0
    def build_model(self, window_size, word_vector_size, activation_function, dense_layer_sizes,
                    hidden_dropout_rate, dropout, k_output, name, hyperparameter_search=False):

        self.model_info = {}

        self.model_info['window_size'] = window_size
        self.model_info['activation_function'] = activation_function
        self.model_info['k_output'] = k_output
        self.model_info['dropout'] = dropout
        self.model_info['hidden_dropout_rate'] = hidden_dropout_rate
        self.model_info['dense_layer_sizes'] = dense_layer_sizes
        self.model_info['name'] = name
        self.model_info['word_vector_size'] = word_vector_size
        self.model_info['hyperparameter_search'] = hyperparameter_search

        model = Sequential()
        model.add(Dense(100, input_dim=(window_size * 2 + 1) * word_vector_size))
        model.add(Activation(activation_function))

        for layer_size in dense_layer_sizes:
            model.add(Dense(layer_size))
            if dropout:
                if hyperparameter_search:
                    hidden_dropout_rate = {{uniform(0, 1)}}
                model.add(Dropout(hidden_dropout_rate))

        model.add(Dense(k_output))
        model.add(Activation('softmax'))

        self.model = model

        return model
Example #42
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #43
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out
Example #44
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #45
0
def keras_model():
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop
    from keras.utils import np_utils

    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
Example #46
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #47
0
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(50, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([20, 30, 40])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(x_train, y_train, x_test, y_test):
    
    batch_size = 256
    epochs = 1
    learning_rate = 0.8713270582626444
    momentum = 0.8671876498073315
    decay = 0.0
    early_stop_th = 10**-5
    input_dim = (784,)

    dropout_1 = 0.026079803111884514
    dropout_2 = 0.4844455237320119

    # Stop the training if the accuracy is not moving more than a delta
    # keras.callbacks.History is by default added to all keras model
    # callbacks = [EarlyStopping(monitor='acc', min_delta=early_stop_th, patience=5, verbose=0, mode='auto')]

    # Code up the network
    x_input = Input(input_dim)
    x = Dropout(dropout_1)(x_input)
    x = Dense(1024, activation='relu', name ="dense1",kernel_constraint=max_norm( {{uniform(0.9, 5)}} ) )(x)
    x = Dropout(dropout_2)(x)
    x = Dense(1024, activation='relu', name = "dense2",kernel_constraint=max_norm( {{uniform(0.9,5)}} ) )(x)
    predictions = Dense(10, activation='softmax')(x)

    # Optimizer
    sgd = optimizers.SGD(lr=learning_rate, momentum=momentum, decay=0, nesterov=False)


    # Create and train model
    model = Model(inputs = x_input, outputs = predictions)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(x=x_train,y= y_train, validation_split = 0.1, batch_size = batch_size ,epochs = epochs, verbose = 1)
    metrics = model.evaluate(x=x_test, y=y_test, batch_size=batch_size, verbose=0, sample_weight=None, steps=None)


    accuracy = metrics[1]
    return {'loss': 1-accuracy, 'status': STATUS_OK, 'model': model}
def keras_model():
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    CNN_filters = {{choice([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95])}}
    CNN_rows = {{choice([1,2,3,4,5,6])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    cols = D
    out_dim = 5
    model = Sequential()
    model.add(Embedding(input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length))
    model.add(Reshape((1, max_input_length, D)))
    model.add(Convolution2D( CNN_filters, CNN_rows, cols, dim_ordering='th', activation='sigmoid' ))
    sh = model.layers[-1].output_shape
    model.add(MaxPooling2D(pool_size=(sh[-2], sh[-1]),dim_ordering = 'th'))
    model.add(Flatten())
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='mse', optimizer=opt)
    model.fit(train_data['features'], train_data['labels'], nb_epoch=50, show_accuracy=False, verbose=2)
    #score = model.evaluate( validation_data['features'], validation_data['labels'])
    score = model.evaluate( train_data['features'], train_data['labels'])
    return {'loss': score, 'status': STATUS_OK}
def create_model(tr_pairs, tr_y, te_pairs, te_y,input_shape):
    epochs = 20
    dropout1 = {{uniform(0,1)}}
    dropout2 = {{uniform(0,1)}}
    dense_filter1 = {{choice([64,128,256])}}
    dense_filter2 = {{choice([64,128,256])}}
    dense_filter3 = {{choice([64,128,256])}}
    # network definition
    base_network = create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2)

    input_a = Input(shape=input_shape)
    input_b = Input(shape=input_shape)

    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance,
                      output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model = Model([input_a, input_b], distance)

    rms = RMSprop()
    model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
    model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
              batch_size=128,
              epochs=epochs,
              verbose=1,
              validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))

    y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
    tr_acc = compute_accuracy(tr_y, y_pred)
    y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
    te_acc = compute_accuracy(te_y, y_pred)
    print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
    print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))

    return {'loss': -te_acc, 'status': STATUS_OK, 'model': model}
def keras_model():
    from keras.models import Sequential, Graph
    from keras.layers.embeddings import Embedding
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    CNN_filters = {{choice([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200])}}
    CNN_rows = {{choice([1,2,3,4,5,6,7,8,9,10])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size3 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    cols = D
    out_dim = train_data['labels'].shape[-1]
    graph = Graph()
    graph.add_input(name='txt_data', input_shape=[train_data['features'].shape[-1]], dtype='int')
    graph.add_node(Embedding( input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length), name='Emb', input='txt_data')
    graph.add_node(Reshape((1, max_input_length, D)), name = "Reshape", input='Emb')
    graph.add_node( Convolution2D(CNN_filters, CNN_rows, cols, activation='sigmoid' ) , name='Conv', input='Reshape')
    sh = graph.nodes['Conv'].output_shape
    graph.add_node(  MaxPooling2D(pool_size=(sh[-2], sh[-1])) ,  name='MaxPool', input='Conv')
    graph.add_node(  Flatten()  ,  name='Flat', input='MaxPool')
    graph.add_node(  Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dtxt', input='Flat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout1', input='Dtxt')
    graph.add_input(name='av_data', input_shape=[train_data['AV'].shape[-1]])
    graph.add_node(  Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dav', input='av_data')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout2', input='Dav')
    graph.add_node(  Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})),  name='Dense1', inputs=['Dropout2', 'Dropout1'], merge_mode='concat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout3', input='Dense1')
    graph.add_node(  Dense(out_dim, activation='linear')  ,  name='Dense2', input='Dropout3')
    graph.add_output(name='output', input = 'Dense2')
    graph.compile(optimizer=opt, loss={'output':'rmse'})
    graph.fit(
        {
            'txt_data':train_data['features'],
            'av_data':train_data['AV'],
            'output':train_data['labels']
        },
        nb_epoch=500,
        batch_size=64
    )
    scores = graph.evaluate({'txt_data':validation_data['features'], 'av_data':validation_data['AV'], 'output':validation_data['labels']})
    print(scores)
    return {'loss': scores, 'status': STATUS_OK}
Example #52
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.layers.embeddings import Embedding
    from keras.layers.recurrent import LSTM
    from keras.layers.convolutional import Convolution1D, MaxPooling1D

    # Embedding
    embedding_size = 300

    # Convolution
    filter_length = 6
    nb_filter = 64
    pool_length = 4

    # LSTM
    lstm_output_size = 100

    # Training
    batch_size = 60
    nb_epoch = 2

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Convolution1D(nb_filter=nb_filter,
                            filter_length=filter_length,
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  class_mode='binary')

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test), show_accuracy=True)
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size,
                                show_accuracy=True)
    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -score[1], 'status': STATUS_OK}
Example #53
0
def model(q_train, q_dev, q_val, a_train, a_dev, a_val, qdict, adict):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Lambda, Dense, Activation
    from keras.callbacks import EarlyStopping, ModelCheckpoint
    import keras.backend as K

    vocab_size = len(qdict)
    nb_ans = len(adict)

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init = {{choice(['uniform', 'normal', 'glorot_uniform', 'glorot_normal', 'he_normal', 'he_uniform'])}},
                              mask_zero=False, dropout={{uniform(0,1)}}
                              )
                    )
    quest_model.add(Lambda(function=lambda x: K.sum(x, axis=1), output_shape=lambda shape: (shape[0], ) + shape[2:]))
    quest_model.add(Dense(nb_ans))
    quest_model.add(Activation('softmax'))

    quest_model.compile(loss='categorical_crossentropy',
                        optimizer={{choice(['sgd', 'adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                        metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5', verbose=1, save_best_only=True)
    quest_model.fit(q_train, a_train, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                    validation_data=(q_dev, a_dev),
                    callbacks=[early_stopping, checkpointer])

    score, acc = quest_model.evaluate(q_val, a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': quest_model}
def keras_model():
    from keras.models import Sequential
    from keras.layers.core import Dense
    from keras.regularizers import l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from aiding_funcs.label_handling import MaxMin, MaxMinFit
    import pickle
    print('loading test.p')
    test = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/test.p", "rb" ) )
    print('loading train.p')
    train = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    mins, maxs = MaxMin(train_data['labels'])
    T_l = MaxMinFit(train_data['labels'], mins, maxs)
    t_l = MaxMinFit(validation_data['labels'], mins, maxs)


    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    out_dim = 5
    activity_l2_0 = {{uniform(0, 1)}}
    activity_l2_1 = {{uniform(0, 1)}}
    activity_l2_2 = {{uniform(0, 1)}}
    l2_0 = {{uniform(0, 1)}}
    l2_1 = {{uniform(0, 1)}}
    l2_2 = {{uniform(0, 1)}}

    model = Sequential()
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2(l2_0),activity_regularizer=activity_l2(activity_l2_0),input_dim = train_data['skipthoughts'].shape[-1] ))
    model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2(l2_1),activity_regularizer=activity_l2(activity_l2_1)))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2(l2_2),activity_regularizer=activity_l2(activity_l2_2)))
    model.compile(loss='rmse', optimizer=opt)

    #model.fit(train_data['skipthoughts'], train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
    #score = model.evaluate( train_data['skipthoughts'], train_data['labels'])

    model.fit(train_data['skipthoughts'], T_l, nb_epoch=500, show_accuracy=False, verbose=2)
    score = model.evaluate( train_data['skipthoughts'], T_l)

    print("score : " +str(score))
    return {'loss': score, 'status': STATUS_OK}
Example #55
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.layers.embeddings import Embedding
    from keras.layers.recurrent import LSTM
    from keras.callbacks import EarlyStopping, ModelCheckpoint

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  class_mode="binary")

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    hist = model.fit(X_train, y_train,
                     batch_size={{choice([32, 64, 128])}},
                     # batch_size=128,
                     nb_epoch=1,
                     validation_split=0.08,
                     show_accuracy=True,
                     callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, show_accuracy=True, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #56
0
def model(X_train, Y_train, X_test, Y_test):
    img_rows, img_cols = 32, 32
    img_channels = 3
    nb_dim = 50
    nb_epoch=15#35#30

    dense_layer_size = {{choice([256, 512, 1024])}}
    objective = 'mse'
    #optimizer = {{choice(['rmsprop', 'adam', 'sgd'])}}
    optimizer = {{choice(['rmsprop', 'sgd'])}}
    batch_size = {{choice([32, 64, 128])}}
    num_conv1 = int({{quniform(24, 64, 1)}})
    num_conv2 = int({{quniform(32, 96, 1)}})
    size_conv1 = int({{quniform(2, 5, 1)}})
    size_conv2 = int({{quniform(2, 5, 1)}})
    early_dropout = {{uniform(0,.75)}}
    late_dropout = {{uniform(0,.75)}}
    data_augmentation = {{choice(['True','False'])}}
    final_activation = {{choice(['none','linear'])}}
    params = {'dense_layer_size':dense_layer_size,
              'optimizer':optimizer,
              'batch_size':batch_size,
              'num_conv1':num_conv1,
              'num_conv2':num_conv2,
              'size_conv1':size_conv1,
              'size_conv2':size_conv2,
              'final_activation':final_activation,
              'early_dropout':early_dropout,
              'late_dropout':late_dropout
             }
    if optimizer == 'sgd':
        learning_rate = {{loguniform(np.log(0.001),np.log(0.999))}}
        params['learning_rate'] = learning_rate

    if data_augmentation:
        more_augmentation = {{choice(['True','False'])}}
        params['more_augmentation'] = more_augmentation

    model = Sequential()


    model.add(Convolution2D(num_conv1, size_conv1, size_conv1, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(num_conv1, size_conv1, size_conv1))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(early_dropout))

    model.add(Convolution2D(num_conv2, size_conv2, size_conv2, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(num_conv2, size_conv2, size_conv2))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(early_dropout))

    model.add(Flatten())
    model.add(Dense(dense_layer_size))
    model.add(Activation('relu'))
    model.add(Dropout(late_dropout))
    model.add(Dense(nb_dim))

    if final_activation != 'none':
        model.add(Activation(final_activation))

    if optimizer == 'sgd':
        # let's train the model using SGD + momentum (how original).
        sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=objective, optimizer=sgd)
    elif optimizer == 'rmsprop':
        model.compile(loss=objective, optimizer='rmsprop')
    else:
        model.compile(loss=objective, optimizer=optimizer)

    print(params)

    if not data_augmentation:
        print('Not using data augmentation.')
        history = model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, Y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')
        if more_augmentation:
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=True,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=True,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False)  # randomly flip images
        else:
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, Y_test),
                            nb_worker=1)

    #score, acc = model.evaluate(X_test, Y_test, verbose=0)
    loss = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)

    return {'loss': loss, 'status': STATUS_OK, 'params':params}
Example #57
0
def model(q_train, q_dev, q_val, a_train, a_dev, a_val, qdict, adict):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Dense, Activation
    from keras.layers.recurrent import LSTM
    from keras.callbacks import EarlyStopping, ModelCheckpoint

    vocab_size = len(qdict)
    nb_ans = len(adict)

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init = {{choice(['uniform', 'lecun_uniform', 'normal',
                                               'identity', 'glorot_uniform', 'glorot_normal',
                                               'he_normal', 'he_uniform'])}},
                              mask_zero=True, dropout={{uniform(0, 1)}}
                              )
                    )
    nb_ltsmlayer = {{choice([1, 2])}}

    if nb_ltsmlayer == 1:
        quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                             init={{choice(['uniform', 'lecun_uniform', 'normal',
                                            'identity', 'glorot_uniform', 'glorot_normal',
                                            'orthogonal', 'he_normal', 'he_uniform'])}},
                             inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                  'identity', 'glorot_uniform', 'glorot_normal',
                                                  'orthogonal', 'he_normal', 'he_uniform'])}},
                             activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             dropout_W={{uniform(0, 1)}},
                             dropout_U={{uniform(0, 1)}},
                             return_sequences=False))
    else:
        for i in range(nb_ltsmlayer-1):
            quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                                 init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                'identity', 'glorot_uniform', 'glorot_normal',
                                                'orthogonal', 'he_normal', 'he_uniform'])}},
                                 inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                      'identity', 'glorot_uniform', 'glorot_normal',
                                                      'orthogonal', 'he_normal', 'he_uniform'])}},
                                 activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                                 inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                                 W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 dropout_W={{uniform(0, 1)}},
                                 dropout_U={{uniform(0, 1)}},
                                 return_sequences=True))

        quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                             init={{choice(['uniform', 'lecun_uniform', 'normal',
                                            'identity', 'glorot_uniform', 'glorot_normal',
                                            'orthogonal', 'he_normal', 'he_uniform'])}},
                             inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                  'identity', 'glorot_uniform', 'glorot_normal',
                                                  'orthogonal', 'he_normal', 'he_uniform'])}},
                             activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             dropout_W={{uniform(0, 1)}},
                             dropout_U={{uniform(0, 1)}},
                             return_sequences=False))

    quest_model.add(Dense(nb_ans))
    quest_model.add(Activation('softmax'))

    quest_model.compile(loss='categorical_crossentropy',
                        optimizer={{choice(['adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                        metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath='lstm_keras_weights.hdf5', verbose=1, save_best_only=True)
    quest_model.fit(q_train, a_train, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                    validation_data=(q_dev, a_dev),
                    callbacks=[early_stopping, checkpointer])

    quest_model.load_weights('lstm_keras_weights.hdf5')
    score, acc = quest_model.evaluate(q_val, a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': quest_model}
	Pool_Valid_Acc = np.zeros(shape=(nb_epoch, 1)) 	
	Pool_Train_Acc = np.zeros(shape=(nb_epoch, 1)) 
	x_pool_All = np.zeros(shape=(1))

	Y_train = np_utils.to_categorical(y_train, nb_classes)

	print('Training Model Without Acquisitions in Experiment', e)


	model = Sequential()
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout({{uniform(0, 1)}}))
	
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout({{uniform(0, 1)}}))

	c = 0.25
	Weight_Decay = c / float(X_train.shape[0])
	model.add(Flatten())
	model.add(Dense(128, W_regularizer=l2(Weight_Decay)))
	model.add(Activation('relu'))
	model.add(Dropout({{uniform(0, 1)}}))
	model.add(Dense(nb_classes))