def keras_model():
    from keras.regularizers import l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from keras.layers.recurrent import LSTM
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.layers.embeddings import Embedding
    from keras.regularizers import l1, activity_l1
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    LSTM_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    out_dim = 5
    model = Sequential()
    model.add(Embedding(input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length))
    model.add(LSTM(LSTM_size, activation = 'sigmoid'))
    model.add(Dense(Dense_size, activation = 'sigmoid',W_regularizer=l2({{uniform(0, 1)}}), activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation = 'linear',W_regularizer=l2({{uniform(0, 1)}}), activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='mse', optimizer= opt) # kalutera leei rmsprop o fchollet  enw  adam leei enas allos
    model.fit(train_data['features'], train_data['labels'], nb_epoch=50, show_accuracy=False, verbose=2)
    score = model.evaluate( validation_data['features'], validation_data['labels'])
    #score = model.evaluate( train_data['features'], train_data['labels'])
    return {'loss': score, 'status': STATUS_OK}
示例#2
0
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
    embedding_size = 300
    pool_length = 4
    lstm_output_size = 100
    batch_size = 200
    nb_epoch = 1

    model = Sequential()
    model.add(Embedding(max_features, embedding_size, input_length=maxlen))
    model.add(Dropout({{uniform(0, 1)}}))
    # Note that we use unnamed parameters here, which is bad style, but is used here
    # to demonstrate that it works. Always prefer named parameters.
    model.add(Convolution1D({{choice([64, 128])}},
                            {{choice([6, 8])}},
                            border_mode='valid',
                            activation='relu',
                            subsample_length=1))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(LSTM(lstm_output_size))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              validation_data=(X_test, y_test))
    score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)

    print('Test score:', score)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def keras_model():
    from keras.models import Sequential
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from aiding_funcs.label_handling import MaxMin, myRMSE, MaxMinFit
    import pickle
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    mins, maxs = MaxMin(train_data['AV'])
    T_AV =  MaxMinFit(train_data['AV'], mins, maxs)
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size3 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    out_dim = 5
    model = Sequential()
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}),input_dim = train_data['AV'].shape[-1] ))
    model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='rmse', optimizer=opt)
    model.fit(T_AV, train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
    #score = model.evaluate( validation_data['features'], validation_data['labels'])
    score = model.evaluate( T_AV, train_data['labels'])
    print("score : " +str(score))
    return {'loss': score, 'status': STATUS_OK}
示例#4
0
文件: simple.py 项目: ShuaiW/hyperas
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def keras_model(X_train, X_test, y_train, y_test):
    NUM_EPOCHS = 125
    BATCH_SIZE = 128
    
    inputs = Input(shape=(304, ))
    x = Dropout({{uniform(0.1, 0.5)}})(inputs)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
    
    x = Dense({{choice([64, 128, 256])}})(x)
    x = Activation("relu")(x)
    x = Dropout({{uniform(0.1, 0.5)}})(x)
        
    predictions = Dense(1)(x)

    model = Model(inputs=[inputs], outputs=[predictions])

    model.compile(loss="mse", optimizer={{choice(["adam", "RMSprop"])}})

    model.fit(X_train, y_train,
              batch_size=BATCH_SIZE, nb_epoch=NUM_EPOCHS,
              verbose=2,
              validation_data=(X_test, y_test))

    score = model.evaluate(X_test, y_test, verbose=0)
    return {'loss': -score, 'status': STATUS_OK, 'model': model}
示例#6
0
def model(X_train, Y_train, X_test, Y_test):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
def model(X_train, Y_train, X_test, Y_test):

    nb_dim = 20
    img_rows, img_cols = 32, 32
    img_channels = 3

    dense_layer_size = {{choice([256, 512, 1024])}}
    optimizer = {{choice(['rmsprop', 'adam', 'sgd'])}}
    batch_size = {{choice([32, 64, 128])}}
    num_conv1 = int({{quniform(24, 64, 1)}})
    num_conv2 = int({{quniform(32, 96, 1)}})
    params = {'dense_layer_size':dense_layer_size,
              'optimizer':optimizer,
              'batch_size':batch_size,
              'num_conv1':num_conv1,
              'num_conv2':num_conv2,
             }


    model = Sequential()

    model.add(Convolution2D(num_conv1, 3, 3, border_mode='same',
                            input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(num_conv1, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(num_conv2, 3, 3, border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(num_conv2, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(dense_layer_size))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_dim))
    model.add(Activation('softmax'))
    
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=optimizer)

    model.fit(X_train, Y_train,
             batch_size=batch_size,
             nb_epoch=30,
             show_accuracy=True,
             verbose=2,
             validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    #return {'loss': -acc, 'status': STATUS_OK, 'model':model}
    return {'loss': -acc, 'status': STATUS_OK, 'params':params}
def model(X_train, Y_train, X_val, Y_val):    
	model = Sequential()
	model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, input_dim=67, kernel_initializer='uniform', activation='relu'))
	model.add(Dropout(0.2))
	layers = {{choice(['two', 'three', 'four'])}}
	if layers == 'two':
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))
	elif layers == 'three':
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))
	else:
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))
		model.add(Dense({{choice([32, 64, 67, 128, 134, 201, 256, 512])}}, kernel_initializer='uniform', activation='relu'))
		model.add(Dropout(0.2))

	model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))

	adam = keras.optimizers.Adam(lr=0.001)
	model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

	model.fit(X_train, Y_train, validation_data=(X_val,Y_val), batch_size={{choice([2500, 5000, 10000, 15000, 20000])}}, nb_epoch=50, verbose=2)

	score, acc = model.evaluate(X_val, Y_val, verbose=0)
	print('Validation accuracy:', acc)
	return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model(depnet_feat_dev1, depnet_feat_dev2, depnet_feat_val, img_feat_dev1, img_feat_dev2, img_feat_val, q_dev1, q_dev2, q_val, a_dev1, a_dev2, a_val, qdict_dev1, adict_dev1):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Lambda, Dense, Activation, Merge, Dropout, Reshape
    from keras.callbacks import EarlyStopping, ModelCheckpoint
    import keras.backend as K
    import os

    path2outdir = os.environ.get('OUTDIR', 'no')

    vocab_size = len(qdict_dev1)
    nb_ans = len(adict_dev1) - 1

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init={{choice(['uniform', 'normal', 'glorot_uniform', 'glorot_normal', 'he_normal', 'he_uniform'])}},
                              mask_zero=False, dropout={{uniform(0,1)}}
                              )
                    )
    quest_model.add(Lambda(function=lambda x: K.sum(x, axis=1), output_shape=lambda shape: (shape[0], ) + shape[2:]))

    nb_img_feat = img_feat_dev1.shape[1]
    img_model = Sequential()
    img_model.add(Reshape((nb_img_feat, ), input_shape=(nb_img_feat, )))

    nb_depnet_feat = depnet_feat_dev1.shape[1]
    depnet_model = Sequential()
    depnet_model.add(Reshape((nb_depnet_feat, ), input_shape=(nb_depnet_feat, )))

    multimodal = Sequential()
    multimodal.add(Merge([img_model, depnet_model, quest_model], mode='concat', concat_axis=1))
    multimodal.add(Dropout({{uniform(0, 1)}}))
    multimodal.add(Dense(nb_ans))
    multimodal.add(Activation('softmax'))

    multimodal.compile(loss='categorical_crossentropy',
                       optimizer={{choice(['sgd', 'adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                       metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath=os.path.join(path2outdir, 'cnn_bow_weights.hdf5'), verbose=1, save_best_only=True)
    multimodal.fit([img_feat_dev1, depnet_feat_dev1, q_dev1], a_dev1, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                   validation_data=([img_feat_dev2, depnet_feat_dev2, q_dev2], a_dev2),
                   callbacks=[early_stopping, checkpointer])
    multimodal.load_weights(os.path.join(path2outdir, 'cnn_bow_weights.hdf5'))
    score, acc = multimodal.evaluate([img_feat_val, depnet_feat_val, q_val], a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': multimodal}
def create_model(train_X, test_X, train_y, test_y):
    model = Sequential()
    model.add(Dense(500, input_shape=(238,),kernel_initializer= {{choice(['glorot_uniform','random_uniform'])}}))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None))
    model.add(Activation({{choice(['relu','sigmoid','tanh'])}}))
    model.add(Dropout({{uniform(0, 0.3)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 0.4)}}))

    model.add(Dense({{choice([128,256])}}))
    model.add(Activation({{choice(['relu','tanh'])}}))
    model.add(Dropout(0.3))

    model.add(Dense(41))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer={{choice(['rmsprop', 'adam'])}})
    model.summary()
    early_stops = EarlyStopping(patience=3, monitor='val_acc')
    ckpt_callback = ModelCheckpoint('keras_model', 
                                 monitor='val_loss', 
                                 verbose=1, 
                                 save_best_only=True, 
                                 mode='auto')

    model.fit(train_X, train_y, batch_size={{choice([128,264])}}, nb_epoch={{choice([10,20])}}, validation_data=(test_X, test_y), callbacks=[early_stops,ckpt_callback])
    score, acc = model.evaluate(test_X, test_y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#11
0
文件: lstm.py 项目: ShuaiW/hyperas
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#12
0
def model(X_train, y_train, X_test, y_test):
    # create model
    model = Sequential()
    model.add(Dense({{choice([54, 27, 13])}}, input_dim=54, init='normal', activation='linear'))
    model.add(Dense({{choice([104, 54, 27, 13])}}, init='normal', activation='linear'))
    
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense({{choice([27, 13, 7])}}, activation='linear'))

    model.add(Dense(1, init='normal', activation='linear'))
    # Compile model
    model.compile(loss='mse', optimizer='rmsprop')
    model.fit(X_train, y_train, nb_epoch=50, batch_size={{choice([64, 128, 256])}}, verbose=2)
    acc = model.evaluate(X_test, y_test)
    print('\nTest accuracy:', acc)
    return {'loss': acc, 'status': STATUS_OK, 'model': model}
示例#13
0
def model(X_train, X_test, Y_train, Y_test):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([400, 512, 600])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    nb_epoch = 10
    batch_size = 128

    model.fit(X_train, Y_train,
              batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=2,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#14
0
def keras_model():

    import pandas as pd
    import numpy as np

    from keras.preprocessing import sequence
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation, Flatten
    from keras.layers.convolutional import Convolution1D, MaxPooling1D
    from keras.callbacks import EarlyStopping
    from keras.utils import np_utils

    from data_util import load_csvs, load_other
    import ml_metrics as metrics

    nb_words = 6500
    maxlen = 175
    filter_length = 10
    other_col_dim = 4

    X_train, Y_train, X_test, Y_test, nb_classes = load_csvs('data/tpov4/train_1.csv',
                                                             'data/tpov4/test_1.csv',
                                                              nb_words, maxlen, 'self', w2v=None)

    # read _other.csv
    other_train = load_other('data/tpov4/train_1_other.csv', maxlen, other_col_dim)
    other_test = load_other('data/tpov4/test_1_other.csv', maxlen, other_col_dim)

    print('other tensor:', other_train.shape)

    pool_length = maxlen - filter_length + 1

    model = Sequential()
    model.add(Convolution1D(nb_filter=50,
                            filter_length=filter_length,
                            border_mode="valid", activation="relu",
                            input_shape=(maxlen, other_col_dim)))
    model.add(MaxPooling1D(pool_length=pool_length))
    model.add(Flatten())
    model.add(Dropout(0.05))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer={{choice(['rmsprop', 'adam', 'adadelta', 'adagrad'])}})

    earlystop = EarlyStopping(monitor='val_loss', patience=1, verbose=1)

    model.fit(other_train, Y_train, batch_size=32, nb_epoch=25,
              validation_split=0.1, show_accuracy=True, callbacks=[earlystop])

    classes = earlystop.model.predict_classes(other_test, batch_size=32)
    org_classes = np_utils.categorical_probas_to_classes(Y_test)

    acc = np_utils.accuracy(classes, org_classes)  # accuracy only supports classes
    print('Test accuracy:', acc)
    kappa = metrics.quadratic_weighted_kappa(classes, org_classes)
    print('Test Kappa:', kappa)
    return {'loss': -acc, 'status': STATUS_OK}
def keras_model():
    from keras.models import Sequential
    from keras.layers.core import Dense
    from keras.regularizers import l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    from aiding_funcs.label_handling import MaxMin, MaxMinFit
    import pickle
    print('loading test.p')
    test = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/test.p", "rb" ) )
    print('loading train.p')
    train = pickle.load( open( "/data/dpappas/Common_Crawl_840B_tokkens_pickles/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    mins, maxs = MaxMin(train_data['labels'])
    T_l = MaxMinFit(train_data['labels'], mins, maxs)
    t_l = MaxMinFit(validation_data['labels'], mins, maxs)


    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    out_dim = 5
    activity_l2_0 = {{uniform(0, 1)}}
    activity_l2_1 = {{uniform(0, 1)}}
    activity_l2_2 = {{uniform(0, 1)}}
    l2_0 = {{uniform(0, 1)}}
    l2_1 = {{uniform(0, 1)}}
    l2_2 = {{uniform(0, 1)}}

    model = Sequential()
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2(l2_0),activity_regularizer=activity_l2(activity_l2_0),input_dim = train_data['skipthoughts'].shape[-1] ))
    model.add(Dense(Dense_size2, activation='sigmoid',W_regularizer=l2(l2_1),activity_regularizer=activity_l2(activity_l2_1)))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2(l2_2),activity_regularizer=activity_l2(activity_l2_2)))
    model.compile(loss='rmse', optimizer=opt)

    #model.fit(train_data['skipthoughts'], train_data['labels'], nb_epoch=500, show_accuracy=False, verbose=2)
    #score = model.evaluate( train_data['skipthoughts'], train_data['labels'])

    model.fit(train_data['skipthoughts'], T_l, nb_epoch=500, show_accuracy=False, verbose=2)
    score = model.evaluate( train_data['skipthoughts'], T_l)

    print("score : " +str(score))
    return {'loss': score, 'status': STATUS_OK}
示例#16
0
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(Dense(44, input_shape=(784,)))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dense(44))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dense(10))

    model.compile(loss='mae', metrics=['mse'], optimizer="adam")

    es = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=10)
    rlr = ReduceLROnPlateau(factor=0.1, patience=10)
    _ = model.fit(x_train, y_train, epochs=1, verbose=0, callbacks=[es, rlr],
                  batch_size=24, validation_data=(x_test, y_test))

    mae, mse = model.evaluate(x_test, y_test, verbose=0)
    print('MAE:', mae)
    return {'loss': mae, 'status': STATUS_OK, 'model': model}
示例#17
0
def keras_model():
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop
    from keras.utils import np_utils

    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    nb_classes = 10

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test,
                           show_accuracy=True, verbose=0)
    print('Test accuracy:', score[1])
    return {'loss': -score[1], 'status': STATUS_OK}
def keras_model():
    from keras.models import Sequential, Graph
    from keras.layers.embeddings import Embedding
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    CNN_filters = {{choice([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100,105,110,115,120,125,130,135,140,145,150,155,160,165,170,175,180,185,190,195,200])}}
    CNN_rows = {{choice([1,2,3,4,5,6,7,8,9,10])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size2 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    Dense_size3 = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    cols = D
    out_dim = train_data['labels'].shape[-1]
    graph = Graph()
    graph.add_input(name='txt_data', input_shape=[train_data['features'].shape[-1]], dtype='int')
    graph.add_node(Embedding( input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length), name='Emb', input='txt_data')
    graph.add_node(Reshape((1, max_input_length, D)), name = "Reshape", input='Emb')
    graph.add_node( Convolution2D(CNN_filters, CNN_rows, cols, activation='sigmoid' ) , name='Conv', input='Reshape')
    sh = graph.nodes['Conv'].output_shape
    graph.add_node(  MaxPooling2D(pool_size=(sh[-2], sh[-1])) ,  name='MaxPool', input='Conv')
    graph.add_node(  Flatten()  ,  name='Flat', input='MaxPool')
    graph.add_node(  Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dtxt', input='Flat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout1', input='Dtxt')
    graph.add_input(name='av_data', input_shape=[train_data['AV'].shape[-1]])
    graph.add_node(  Dense(Dense_size2, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}}))  ,  name='Dav', input='av_data')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout2', input='Dav')
    graph.add_node(  Dense(Dense_size3, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})),  name='Dense1', inputs=['Dropout2', 'Dropout1'], merge_mode='concat')
    graph.add_node(  Dropout({{uniform(0, 1)}})  ,  name='Dropout3', input='Dense1')
    graph.add_node(  Dense(out_dim, activation='linear')  ,  name='Dense2', input='Dropout3')
    graph.add_output(name='output', input = 'Dense2')
    graph.compile(optimizer=opt, loss={'output':'rmse'})
    graph.fit(
        {
            'txt_data':train_data['features'],
            'av_data':train_data['AV'],
            'output':train_data['labels']
        },
        nb_epoch=500,
        batch_size=64
    )
    scores = graph.evaluate({'txt_data':validation_data['features'], 'av_data':validation_data['AV'], 'output':validation_data['labels']})
    print(scores)
    return {'loss': scores, 'status': STATUS_OK}
示例#19
0
def model(q_train, q_dev, q_val, a_train, a_dev, a_val, qdict, adict):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Lambda, Dense, Activation
    from keras.callbacks import EarlyStopping, ModelCheckpoint
    import keras.backend as K

    vocab_size = len(qdict)
    nb_ans = len(adict)

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init = {{choice(['uniform', 'normal', 'glorot_uniform', 'glorot_normal', 'he_normal', 'he_uniform'])}},
                              mask_zero=False, dropout={{uniform(0,1)}}
                              )
                    )
    quest_model.add(Lambda(function=lambda x: K.sum(x, axis=1), output_shape=lambda shape: (shape[0], ) + shape[2:]))
    quest_model.add(Dense(nb_ans))
    quest_model.add(Activation('softmax'))

    quest_model.compile(loss='categorical_crossentropy',
                        optimizer={{choice(['sgd', 'adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                        metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5', verbose=1, save_best_only=True)
    quest_model.fit(q_train, a_train, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                    validation_data=(q_dev, a_dev),
                    callbacks=[early_stopping, checkpointer])

    score, acc = quest_model.evaluate(q_val, a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': quest_model}
示例#20
0
def model(X_train, Y_train, X_test, Y_test):
    model = Sequential()
    model.add(Dense(50, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([20, 30, 40])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def keras_model():
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers.core import Dense, Reshape, Activation, Flatten, Dropout
    from keras.regularizers import l1, activity_l1, l2, activity_l2
    from aiding_funcs.embeddings_handling import get_the_folds, join_folds
    import pickle
    embeddings = pickle.load( open( "/data/dpappas/personality/emb.p", "rb" ) )
    train = pickle.load( open( "/data/dpappas/personality/train.p", "rb" ) )
    no_of_folds = 10
    folds = get_the_folds(train,no_of_folds)
    train_data = join_folds(folds,folds.keys()[:-1])
    validation_data = folds[folds.keys()[-1]]
    max_input_length = validation_data['features'].shape[1]
    CNN_filters = {{choice([5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95])}}
    CNN_rows = {{choice([1,2,3,4,5,6])}}
    Dense_size = {{choice([50, 100, 150, 200, 250, 300, 350, 400, 450, 500])}}
    opt = {{choice([ 'adadelta','sgd','rmsprop', 'adagrad', 'adadelta', 'adam'])}}
    is_trainable = {{choice([ True, False ])}}
    D = embeddings.shape[-1]
    cols = D
    out_dim = 5
    model = Sequential()
    model.add(Embedding(input_dim = embeddings.shape[0], output_dim=D, weights=[embeddings], trainable=is_trainable, input_length = max_input_length))
    model.add(Reshape((1, max_input_length, D)))
    model.add(Convolution2D( CNN_filters, CNN_rows, cols, dim_ordering='th', activation='sigmoid' ))
    sh = model.layers[-1].output_shape
    model.add(MaxPooling2D(pool_size=(sh[-2], sh[-1]),dim_ordering = 'th'))
    model.add(Flatten())
    model.add(Dense(Dense_size, activation='sigmoid',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.add(Dense(out_dim, activation='linear',W_regularizer=l2({{uniform(0, 1)}}),activity_regularizer=activity_l2({{uniform(0, 1)}})))
    model.compile(loss='mse', optimizer=opt)
    model.fit(train_data['features'], train_data['labels'], nb_epoch=50, show_accuracy=False, verbose=2)
    #score = model.evaluate( validation_data['features'], validation_data['labels'])
    score = model.evaluate( train_data['features'], train_data['labels'])
    return {'loss': score, 'status': STATUS_OK}
def create_model(tr_pairs, tr_y, te_pairs, te_y,input_shape):
    epochs = 20
    dropout1 = {{uniform(0,1)}}
    dropout2 = {{uniform(0,1)}}
    dense_filter1 = {{choice([64,128,256])}}
    dense_filter2 = {{choice([64,128,256])}}
    dense_filter3 = {{choice([64,128,256])}}
    # network definition
    base_network = create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2)

    input_a = Input(shape=input_shape)
    input_b = Input(shape=input_shape)

    processed_a = base_network(input_a)
    processed_b = base_network(input_b)

    distance = Lambda(euclidean_distance,
                      output_shape=eucl_dist_output_shape)([processed_a, processed_b])

    model = Model([input_a, input_b], distance)

    rms = RMSprop()
    model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
    model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
              batch_size=128,
              epochs=epochs,
              verbose=1,
              validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))

    y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
    tr_acc = compute_accuracy(tr_y, y_pred)
    y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
    te_acc = compute_accuracy(te_y, y_pred)
    print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
    print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))

    return {'loss': -te_acc, 'status': STATUS_OK, 'model': model}
def model(train_X, train_Y, test_X, test_Y):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    
    model = Sequential()
    model.add(Dense(500,input_shape=(train_X.shape[1],)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))
    

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(500))
        # We can also choose between complete sets of layers
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(train_Y.shape[1]))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(train_X, train_Y,
              batch_size={{choice([128, 256])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(test_X, test_Y))
    score, acc = model.evaluate(test_X, test_Y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#24
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.layers.embeddings import Embedding
    from keras.layers.recurrent import LSTM
    from keras.callbacks import EarlyStopping, ModelCheckpoint

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  class_mode="binary")

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    hist = model.fit(X_train, y_train,
                     batch_size={{choice([32, 64, 128])}},
                     # batch_size=128,
                     nb_epoch=1,
                     validation_split=0.08,
                     show_accuracy=True,
                     callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, show_accuracy=True, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#25
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(X_train, y_train, X_test, y_test):
    model = Sequential()
    model.add(
        CuDNNLSTM({{choice([4, 8, 16, 32])}},
                  input_shape=(look_back, num_features),
                  return_sequences=True,
                  kernel_initializer='TruncatedNormal'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout({{uniform(0, 1)}}))

    for _ in range({{choice([0, 1, 2, 3, 4, 8])}}):
        model.add(
            CuDNNLSTM({{choice([4, 8, 16, 32])}},
                      kernel_initializer='TruncatedNormal',
                      return_sequences=True))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(
        CuDNNLSTM({{choice([4, 8, 16, 32])}},
                  kernel_initializer='TruncatedNormal',
                  return_sequences=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout({{uniform(0, 1)}}))

    for _ in range({{choice([0, 1, 2, 3, 4, 8, 16, 32])}}):
        model.add(
            Dense({{choice([4, 8, 16, 32, 64, 128, 256, 512])}},
                  kernel_initializer='TruncatedNormal'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout({{uniform(0, 1)}}))

    for _ in range({{choice([0, 1, 2, 3, 4, 8, 16, 32])}}):
        model.add(
            Dense({{choice([4, 8, 16, 32, 64, 128, 256, 512])}},
                  kernel_initializer='TruncatedNormal'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout({{uniform(0, 1)}}))

    for _ in range({{choice([0, 1, 2, 3, 4, 8, 16, 32])}}):
        model.add(
            Dense({{choice([4, 8, 16, 32, 64, 128, 256, 512])}},
                  kernel_initializer='TruncatedNormal'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(
        Dense({{choice([8, 16, 32, 64, 128, 256, 512, 1024])}},
              kernel_initializer='TruncatedNormal'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(1))

    #adam = Adam(lr={{uniform(1e-5, 1e-1)}})
    #nadam = Nadam(lr={{uniform(1e-5, 1e-1)}})

    model.compile(loss='mse', metrics=['mape'], optimizer='nadam')
    #optimizer={{choice(['adadelta', 'adagrad', 'adam', 'nadam'])}})

    early_stopping_monitor = EarlyStopping(
        patience=25
    )  # Not using earlystopping monitor for now, that's why patience is high

    bs = {{choice([32, 64, 128, 256])}}

    if bs == 32:
        epoch_size = 109
    elif bs == 64:
        epoch_size = 56
    elif bs == 128:
        epoch_size = 28
    elif bs == 256:
        epoch_size = 14

    #bs = 256
    #epoch_size = 14
    schedule = SGDRScheduler(
        min_lr={{uniform(1e-8, 1e-5)}},  #1e-5
        max_lr={{uniform(1e-3, 1e-1)}},  # 1e-2
        steps_per_epoch=np.ceil(epoch_size / bs),
        lr_decay=0.9,
        cycle_length=5,  # 5
        mult_factor=1.5)

    result = model.fit(X_train,
                       y_train,
                       batch_size=bs,
                       epochs=100,
                       verbose=2,
                       validation_split=0.2,
                       callbacks=[early_stopping_monitor, schedule])

    #get the highest validation accuracy of the training epochs
    val_loss = np.amin(result.history['val_loss'])
    print('Best validation loss of epoch:', val_loss)

    K.clear_session()  # Clear the tensorflow session (Free up RAM)

    return {
        'loss': val_loss,
        'status': STATUS_OK
    }  # Not returning model to save RAM
示例#27
0
def Conv2DClassifierIn1(x_train, y_train, x_test, y_test):
    summary = True
    verbose = 1

    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = {{choice([32, 64, 128, 256, 512])}}
    epoch = {{choice([25, 50, 75, 100, 125, 150, 175, 200])}}

    conv_block = {{choice(['two', 'three', 'four'])}}

    conv1_num = {{choice([8, 16, 32, 64])}}
    conv2_num = {{choice([16, 32, 64, 128])}}
    conv3_num = {{choice([32, 64, 128])}}
    conv4_num = {{choice([32, 64, 128, 256])}}

    dense1_num = {{choice([128, 256, 512])}}
    dense2_num = {{choice([64, 128, 256])}}

    l1_regular_rate = {{uniform(0.00001, 1)}}
    l2_regular_rate = {{uniform(0.000001, 1)}}
    drop1_num = {{uniform(0.1, 1)}}
    drop2_num = {{uniform(0.0001, 1)}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}
    optimizer = {{choice(['adam', 'rmsprop', 'SGD'])}}

    #---------------------------------------------------------------------------------------------------------------
    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = 'binary_crossentropy'
    metrics = ['accuracy']
    my_callback = None
    # early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    # checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
    #                                verbose=1,
    #                                save_best_only=True)
    # my_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
    #                                           patience=5, min_lr=0.0001)

    # build --------------------------------------------------------------------------------------------------------
    input_layer = Input(shape=x_train.shape[1:])
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(input_layer)
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(conv)
    pool = layers.MaxPooling2D(pool_size, padding=padding_style)(conv)
    if conv_block == 'two':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'three':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'four':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

    flat = layers.Flatten()(pool)
    drop = layers.Dropout(drop1_num)(flat)

    dense = layers.Dense(dense1_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)
    BatchNorm = layers.BatchNormalization(axis=-1)(dense)
    drop = layers.Dropout(drop2_num)(BatchNorm)

    dense = layers.Dense(dense2_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)

    output_layer = layers.Dense(len(np.unique(y_train)),
                                activation='softmax')(dense)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

# train(self):
    class_weights = class_weight.compute_class_weight('balanced',
                                                      np.unique(y_train),
                                                      y_train.reshape(-1))
    class_weights_dict = dict(enumerate(class_weights))
    model.compile(
        optimizer=optimizer,
        loss=loss_type,
        metrics=metrics  # accuracy
    )

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epoch,
                       verbose=verbose,
                       callbacks=my_callback,
                       validation_data=(x_test, y_test),
                       shuffle=True,
                       class_weight=class_weights_dict)

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
示例#28
0
def model(q_train, q_dev, q_val, a_train, a_dev, a_val, qdict, adict):
    from keras.models import Sequential
    from keras.layers.embeddings import Embedding
    from keras.layers.core import Dense, Activation
    from keras.layers.recurrent import LSTM
    from keras.callbacks import EarlyStopping, ModelCheckpoint

    vocab_size = len(qdict)
    nb_ans = len(adict)

    nb_epoch = 1000

    quest_model = Sequential()
    quest_model.add(Embedding(input_dim=vocab_size, output_dim={{choice([100, 200, 300, 500])}},
                              init = {{choice(['uniform', 'lecun_uniform', 'normal',
                                               'identity', 'glorot_uniform', 'glorot_normal',
                                               'he_normal', 'he_uniform'])}},
                              mask_zero=True, dropout={{uniform(0, 1)}}
                              )
                    )
    nb_ltsmlayer = {{choice([1, 2])}}

    if nb_ltsmlayer == 1:
        quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                             init={{choice(['uniform', 'lecun_uniform', 'normal',
                                            'identity', 'glorot_uniform', 'glorot_normal',
                                            'orthogonal', 'he_normal', 'he_uniform'])}},
                             inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                  'identity', 'glorot_uniform', 'glorot_normal',
                                                  'orthogonal', 'he_normal', 'he_uniform'])}},
                             activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             dropout_W={{uniform(0, 1)}},
                             dropout_U={{uniform(0, 1)}},
                             return_sequences=False))
    else:
        for i in range(nb_ltsmlayer-1):
            quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                                 init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                'identity', 'glorot_uniform', 'glorot_normal',
                                                'orthogonal', 'he_normal', 'he_uniform'])}},
                                 inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                      'identity', 'glorot_uniform', 'glorot_normal',
                                                      'orthogonal', 'he_normal', 'he_uniform'])}},
                                 activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                                 inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                                 W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                                 dropout_W={{uniform(0, 1)}},
                                 dropout_U={{uniform(0, 1)}},
                                 return_sequences=True))

        quest_model.add(LSTM(output_dim={{choice([100, 200, 300, 500])}},
                             init={{choice(['uniform', 'lecun_uniform', 'normal',
                                            'identity', 'glorot_uniform', 'glorot_normal',
                                            'orthogonal', 'he_normal', 'he_uniform'])}},
                             inner_init={{choice(['uniform', 'lecun_uniform', 'normal',
                                                  'identity', 'glorot_uniform', 'glorot_normal',
                                                  'orthogonal', 'he_normal', 'he_uniform'])}},
                             activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             inner_activation={{choice(['relu', 'tanh', 'sigmoid', 'hard_sigmoid', 'linear'])}},
                             W_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             U_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             b_regularizer={{choice([None, 'l1', 'l2', 'l1l2'])}},
                             dropout_W={{uniform(0, 1)}},
                             dropout_U={{uniform(0, 1)}},
                             return_sequences=False))

    quest_model.add(Dense(nb_ans))
    quest_model.add(Activation('softmax'))

    quest_model.compile(loss='categorical_crossentropy',
                        optimizer={{choice(['adam', 'rmsprop', 'adagrad', 'adadelta', 'adamax'])}},
                        metrics=['accuracy'])

    print('##################################')
    print('Train...')
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath='lstm_keras_weights.hdf5', verbose=1, save_best_only=True)
    quest_model.fit(q_train, a_train, batch_size={{choice([32, 64, 100])}}, nb_epoch=nb_epoch,
                    validation_data=(q_dev, a_dev),
                    callbacks=[early_stopping, checkpointer])

    quest_model.load_weights('lstm_keras_weights.hdf5')
    score, acc = quest_model.evaluate(q_val, a_val, verbose=1)

    print('##################################')
    print('Test accuracy:%.4f' % acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': quest_model}
示例#29
0
def create_model(x_train, y_train, x_test, y_test):
    init = TruncatedNormal(mean=0.0, stddev=0.05, seed=None)

    model = Sequential()

    model.add(Dense(52, input_dim=52))
    model.add(BatchNormalization())
    model.add(Activation('softmax'))

    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))
    model.add(Dense({{choice([32, 64, 128, 256, 512, 1024])}}))
    model.add(BatchNormalization())
    model.add(Activation({{choice(['relu', 'sigmoid', 'softmax'])}}))

    model.add(Dense(2, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer=SGD(lr=0.1, decay=1e-6),
                  metrics=['mse'])

    model.fit(
        x_train,
        y_train,
        epochs=10,
        batch_size=1000,
        validation_split=0.1,
        class_weight={
            0: 0.78,
            1: 0.22
        },
    )
    score = model.evaluate(x_test, y_test, verbose=0)
    mean_squared_error = score[1]
    accuracy = score[1]
    return {'loss': -mean_squared_error, 'status': STATUS_OK, 'model': model}
示例#30
0
def create_model(x_train, y_train, x_test, y_test):
    args = parse_args()
    set_logger(args.log_path, args.log_level)
    logging.debug('Args:')
    logging.debug(args)
    lang = construct_languages(args.train)
    assert len(lang) == 1
    lang = lang[0]
    game = initialize_game(train_file=lang.train,
                           test_file=lang.test,
                           dev_file=lang.dev,
                           emb_file=lang.emb,
                           budget=args.budget,
                           max_seq_len=args.max_seq_len,
                           max_vocab_size=args.max_vocab_size,
                           emb_size=args.embedding_size,
                           model_name=args.model_name)
    max_len = args.max_seq_len
    input_dim = args.max_vocab_size
    output_dim = args.embedding_size
    embedding_matrix = game.w2v
    logging.debug('building Keras model...')
    input = Input(shape=(max_len, ))
    model = Embedding(input_dim=input_dim,
                      output_dim=output_dim,
                      input_length=max_len,
                      weights=[embedding_matrix],
                      trainable=False)(input)
    model = Dropout(0.1)(model)
    n_units = 128
    model = Bidirectional(
        LSTM(units=n_units, return_sequences=True,
             recurrent_dropout=0.1))(model)
    n_tags = 5
    out = TimeDistributed(Dense(n_tags, activation='softmax'))(model)
    model = Model(input, out)
    logging.debug('Model type: ')
    logging.debug(type(model))
    logging.debug('Model summary: ')
    logging.debug(model.summary())
    rmsprop = keras.optimizers.RMSprop(lr={{choice([0.0001])}})
    model.compile(optimizer=rmsprop,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    logging.debug('done building model...')
    logging.debug('starting training...')
    num_train_examples = len(x_train)
    for i in range(num_train_examples):
        print('i: ', i)
        model.fit(x_train[:i],
                  y_train[:i],
                  batch_size=200,
                  epochs=20,
                  verbose=0)
    logging.debug('done training...')
    logging.debug('starting testing...')
    num_samples = x_test.shape[0]
    logging.debug('Number of samples: {}'.format(num_samples))
    max_batch_size = 4096
    batch_size = min(num_samples, max_batch_size)
    predictions_probability = model.predict(x_test, batch_size=batch_size)
    predictions = numpy.argmax(predictions_probability, axis=-1)
    fscore = compute_fscore(Y_pred=predictions, Y_true=y_test)
    logging.debug('done testing...')
    return -fscore
示例#31
0
def model(X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings,
          label2Idx, char2Idx, sentences_maxlen, words_maxlen):

    lstm_state_size = 275

    print("sentences maxlen: %s" % sentences_maxlen)
    print("words maxlen: %s" % words_maxlen)
    print("wordEmbeddings: (%s, %s)" % wordEmbeddings.shape)
    print("caseEmbeddings: (%s, %s)" % caseEmbeddings.shape)
    print("char2Idx len: %s" % len(char2Idx))
    print("label2Idx len: %s" % len(label2Idx))
    """Model layers"""

    # character input
    character_input = Input(shape=(
        None,
        words_maxlen,
    ),
                            name="Character_input")

    # embedding -> Size of input dimension based on dictionary, output dimension
    embed_char_out = TimeDistributed(
        Embedding(len(char2Idx),
                  30,
                  embeddings_initializer=RandomUniform(minval=-0.5,
                                                       maxval=0.5)),
        name="Character_embedding")(character_input)

    dropout = Dropout({{uniform(0, 1)}})(embed_char_out)

    # CNN
    conv1d_out = TimeDistributed(Conv1D(
        kernel_size={{choice([3, 4, 5, 6, 7])}},
        filters=30,
        padding='same',
        activation={{choice(['tanh', 'relu', 'sigmoid'])}},
        strides=1),
                                 name="Convolution")(dropout)
    maxpool_out = TimeDistributed(MaxPooling1D(words_maxlen),
                                  name="maxpool")(conv1d_out)
    char = TimeDistributed(Flatten(), name="Flatten")(maxpool_out)
    char = Dropout({{uniform(0, 1)}})(char)

    # word-level input
    words_input = Input(shape=(None, ), dtype='int32', name='words_input')
    words = Embedding(input_dim=wordEmbeddings.shape[0],
                      output_dim=wordEmbeddings.shape[1],
                      weights=[wordEmbeddings],
                      trainable=False)(words_input)

    # case-info input
    casing_input = Input(shape=(None, ), dtype='int32', name='casing_input')
    casing = Embedding(input_dim=caseEmbeddings.shape[0],
                       output_dim=caseEmbeddings.shape[1],
                       weights=[caseEmbeddings],
                       trainable=False)(casing_input)

    # concat & BLSTM
    output = concatenate([words, casing, char])
    output = Bidirectional(
        LSTM(
            lstm_state_size,
            return_sequences=True,
            dropout={{uniform(0, 1)}},  # on input to each LSTM block
            recurrent_dropout={{uniform(0, 1)}}  # on recurrent input signal
        ),
        name="BLSTM")(output)

    output = TimeDistributed(Dense(len(label2Idx),
                                   activation={{choice(['relu', 'sigmoid'])}}),
                             name="activation_layer")(output)

    # set up model
    model = Model(inputs=[words_input, casing_input, character_input],
                  outputs=[output])

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer={{choice(['nadam', 'rmsprop', 'adam', 'sgd'])}},
                  metrics=['accuracy'])

    model.summary()

    print(len(X_train[0][1]))
    print(X_train[0][2].shape)

    model.fit(X_train,
              Y_train,
              batch_size={{choice([32, 64, 128, 256])}},
              epochs={{choice([10, 20, 30, 40])}},
              verbose=2,
              validation_data=(X_val, Y_val))

    score, acc = model.evaluate(X_val, Y_val, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#32
0
文件: models.py 项目: dhandl/MLkit
def create_model(X_train, y_train, X_test, y_test, class_weight):
    #Fix parameters
    initializer = 'normal'
    activation = 'relu'
    dropout = 0.5
    epochs = 50

    model = Sequential()
    model.add(
        Dense({{choice([8, 16, 32, 64, 128])}},
              input_shape=X_train.shape[1],
              activation=activation,
              kernel_initializer=initializer))
    model.add(BatchNormalization())

    model.add(
        Dense({{choice([8, 16, 32, 64, 128])}},
              activation=activation,
              kernel_initializer=initializer,
              kernel_regularizer=l1(regularizer)))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())
    model.add(
        Dense({{choice([8, 16, 32, 64, 128])}},
              activation=activation,
              kernel_initializer=initializer,
              kernel_regularizer=l1(regularizer)))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())
    model.add(
        Dense({{choice([8, 16, 32, 64, 128])}},
              activation=activation,
              kernel_initializer=initializer,
              kernel_regularizer=l1(regularizer)))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())
    model.add(
        Dense({{choice([8, 16, 32, 64, 128])}},
              activation=activation,
              kernel_initializer=initializer,
              kernel_regularizer=l1(regularizer)))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())

    model.add(Dense(classes, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        batch_size={{choice([32, 64, 128])}},
                        epochs=epochs,
                        verbose=2,
                        shuffle=True,
                        class_weight=class_weight,
                        sample_weight=None,
                        validation_data=(X_test, y_test, None),
                        callbacks=[
                            EarlyStopping(verbose=True,
                                          patience=10,
                                          monitor='val_acc')
                        ])
    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {
        'loss': -acc,
        'status': STATUS_OK,
        'model': model,
        'history': history
    }
示例#33
0
def create_model(train_gen,X_test,y_test,train_len,balanced,imbalanced):

    num_channels = 3
    img_size = 224
    num_classes=10
    WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'
    WEIGHTS_PATH_NO_TOP = 'C:/GIST/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
    def VGG19_Wz(include_top=True, weights='imagenet',
                 input_tensor=None, input_shape=None,
                 pooling=None,
                 classes=1000):
        """Instantiates the VGG19 architecture.

        Optionally loads weights pre-trained
        on ImageNet. Note that when using TensorFlow,
        for best performance you should set
        `image_data_format="channels_last"` in your Keras config
        at ~/.keras/keras.json.

        The model and the weights are compatible with both
        TensorFlow and Theano. The data format
        convention used by the model is the one
        specified in your Keras config file.

        # Arguments
            include_top: whether to include the 3 fully-connected
                layers at the top of the network.
            weights: one of `None` (random initialization)
                or "imagenet" (pre-training on ImageNet).
            input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
                to use as image input for the model.
            input_shape: optional shape tuple, only to be specified
                if `include_top` is False (otherwise the input shape
                has to be `(224, 224, 3)` (with `channels_last` data format)
                or `(3, 224, 224)` (with `channels_first` data format).
                It should have exactly 3 inputs channels,
                and width and height should be no smaller than 48.
                E.g. `(200, 200, 3)` would be one valid value.
            pooling: Optional pooling mode for feature extraction
                when `include_top` is `False`.
                - `None` means that the output of the model will be
                    the 4D tensor output of the
                    last convolutional layer.
                - `avg` means that global average pooling
                    will be applied to the output of the
                    last convolutional layer, and thus
                    the output of the model will be a 2D tensor.
                - `max` means that global max pooling will
                    be applied.
            classes: optional number of classes to classify images
                into, only to be specified if `include_top` is True, and
                if no `weights` argument is specified.

        # Returns
            A Keras model instance.

        # Raises
            ValueError: in case of invalid argument for `weights`,
                or invalid input shape.
        """
        if weights not in {'imagenet', None}:
            raise ValueError('The `weights` argument should be either '
                             '`None` (random initialization) or `imagenet` '
                             '(pre-training on ImageNet).')

        if weights == 'imagenet' and include_top and classes != 1000:
            raise ValueError('If using `weights` as imagenet with `include_top`'
                             ' as true, `classes` should be 1000')
        # Determine proper input shape
        input_shape = _obtain_input_shape(input_shape,
                                          default_size=224,
                                          min_size=48,
                                          data_format=K.image_data_format(),
                                          include_top=include_top)

        if input_tensor is None:
            img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                img_input = input_tensor
        # Block 1
        x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
        x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

        # Block 2
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
        x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

        # Block 3
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
        x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

        # Block 4
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

        # Block 5
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
        x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

        if include_top:
            # Classification block
            x = Flatten(name='flatten')(x)
            x = Dense(4096, activation='relu', name='fc1')(x)
            x = Dense(4096, activation='relu', name='fc2')(x)
            x = Dense(classes, activation='softmax', name='predictions')(x)
        else:
            if pooling == 'avg':
                x = GlobalAveragePooling2D()(x)
            elif pooling == 'max':
                x = GlobalMaxPooling2D()(x)

        # Ensure that the model takes into account
        # any potential predecessors of `input_tensor`.
        if input_tensor is not None:
            inputs = get_source_inputs(input_tensor)
        else:
            inputs = img_input
        # Create model.
        model = Model(inputs, x, name='vgg19')

        # load weights
        if weights == 'imagenet':
            if include_top:
                weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',
                                        WEIGHTS_PATH,
                                        cache_subdir='models')
            else:
                weights_path = WEIGHTS_PATH_NO_TOP  # ('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',                                   WEIGHTS_PATH_NO_TOP,                                   )
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                layer_utils.convert_all_kernels_in_model(model)

            if K.image_data_format() == 'channels_first':
                if include_top:
                    maxpool = model.get_layer(name='block5_pool')
                    shape = maxpool.output_shape[1:]
                    dense = model.get_layer(name='fc1')
                    layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')

                if K.backend() == 'tensorflow':
                    warnings.warn('You are using the TensorFlow backend, yet you '
                                  'are using the Theano '
                                  'image data format convention '
                                  '(`image_data_format="channels_first"`). '
                                  'For best performance, set '
                                  '`image_data_format="channels_last"` in '
                                  'your Keras config '
                                  'at ~/.keras/keras.json.')
        return model


    conv_base = VGG19_Wz(weights='imagenet',
                         include_top=False,
                         input_shape=(img_size, img_size, num_channels))
    model = Sequential()
    model.add(conv_base)
    model.add(Flatten(name='flatten_1'))
    model.add(Dense(4096, name='fc_1'))
    model.add(Activation('relu', name='fc_actv_1'))
    model.add(Dense(4096, name='fc_2'))
    model.add(Activation('relu', name='fc_actv_2'))
    model.add(Dropout({{uniform(0, 0.5)}}, name='fc_dropout_2'))
    model.add(Dense(1000, name='fc_6'))
    model.add(Activation('relu', name='fc_actv_6'))
    model.add(BatchNormalization())
    model.add(Dense(num_classes, name='fc_7'))
    model.add(Activation('softmax', name='fc_actv_7'))
    conv_base.trainable = False
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.4, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    n_epoch = 3
    model.fit_generator(generator=train_gen, steps_per_epoch=50, epochs=n_epoch,verbose=1, validation_data=(X_test, y_test), class_weight={{choice([imbalanced,balanced])}})
    score, acc = model.evaluate(X_test,y_test)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#34
0
def create_model(X_train, X_test, y_train, y_test):
    input = Input(shape=(60, 8, 1))
    X = Conv2D(filters={{choice([32, 64, 128])}},
               kernel_size={{choice([(3, 3), (4, 4), (5, 5)])}},
               activation={{choice(['relu', 'sigmoid', 'tanh'])}},
               padding='same')(input)
    X = Conv2D(filters={{choice([256, 512, 1024, 1216])}},
               kernel_size={{choice([(3, 3), (4, 4), (5, 5)])}},
               activation={{choice(['relu', 'sigmoid', 'tanh'])}},
               padding='same')(X)
    X = MaxPooling2D()(X)
    X = Dropout({{uniform(0, 1)}})(X)

    X = Conv2D(filters={{choice([256, 512, 1024])}},
               kernel_size={{choice([(3, 3), (4, 4), (5, 5)])}},
               activation={{choice(['relu', 'sigmoid', 'tanh'])}},
               padding='same')(X)
    X = Dropout({{uniform(0, 1)}})(X)

    X = Conv2D(filters={{choice([512, 1024, 1600])}},
               kernel_size={{choice([(3, 3), (4, 4), (5, 5)])}},
               activation={{choice(['relu', 'sigmoid', 'tanh'])}},
               padding='same')(X)
    X = GlobalMaxPooling2D()(X)
    X = Dropout({{uniform(0, 1)}})(X)
    X = Dense(19, activation='softmax')(X)
    model = Model([input], X)
    model.compile(loss='categorical_crossentropy',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['acc'])
    plateau = ReduceLROnPlateau(monitor="val_acc",
                                verbose=0,
                                mode='max',
                                factor=0.1,
                                patience=6)
    early_stopping = EarlyStopping(monitor='val_acc',
                                   verbose=0,
                                   mode='max',
                                   patience=10)
    result = model.fit(X_train,
                       y_train,
                       epochs=100,
                       batch_size={{choice([64, 128])}},
                       verbose=2,
                       shuffle=True,
                       validation_split=0.1,
                       callbacks=[plateau, early_stopping])

    # get the highest validation accuracy of the training epochs
    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def model(train, train_labels, validation, validation_labels, GPU, NB_EPOCHS, VGG_WEIGHTS):
    import os
    import h5py
    from hyperas.distributions import choice
    from mcc_multiclass import multimcc
    #import keras.backend.tensorflow_backend as K
    import numpy as np
    from keras.models import Sequential
    from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
    from keras.layers import Dropout, Flatten, Dense
    from mcc_multiclass import multimcc
    from hyperopt import STATUS_OK
    from keras.optimizers import SGD, RMSprop, Adam

    # path to the model weights files.

    weights_path = VGG_WEIGHTS

    img_width, img_height = 224, 224
    nb_epochs = NB_EPOCHS
    print ("Entering GPU Model")
    #with K.tf.device('/gpu:' + str(GPU)):
    with open('FAKELOG',"w"):
        #K.set_session(K.tf.Session(config=K.tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)))
        #session = K.get_session()
        # build the VGG16 network
        model = Sequential()
        model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))

        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))

        # load the weights of the VGG16 networks
        # (trained on ImageNet, won the ILSVRC competition in 2014)
        # note: when there is a complete match between your model definition
        # and your weight savefile, you can simply call model.load_weights(filename)

        assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
        f = h5py.File(weights_path)
        for k in range(f.attrs['nb_layers']):
            if k >= len(model.layers):
                # we don't look at the last (fully-connected) layers in the savefile
                break
            g = f['layer_{}'.format(k)]
            weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
            model.layers[k].set_weights(weights)
        f.close()
        print('Model loaded.')

        # build a classifier model to put on top of the convolutional model
        activation_function = 'relu'
        print "\n\t#Chosen Activation:", activation_function
        dense_size = 512
        print "\t#Chosen Dense Size:", dense_size
        dropout_rate = {{choice([0.0,0.25,0.5,0.75])}}
        print "\t#Chosen Dropout Rate:", dropout_rate
        model.add(Flatten())
        model.add(Dense(dense_size, activation=activation_function))
        model.add(Dropout(dropout_rate))
        if 'two' == 'two':
            print "\t#Chosen FC Size: Double"
            model.add(Dense(dense_size, activation=activation_function))
            model.add(Dropout(dropout_rate))
        else:
            print "\t#Chosen FC Size: Single"
        final_classifier = 'softmax'
        print "\t#Chosen Final Classifier:", final_classifier
        model.add(Dense(3, activation=final_classifier))

        # note that it is necessary to start with a fully-trained
        # classifier, including the top classifier,
        # in order to successfully do fine-tuning
        # top_model.load_weights(top_model_weights_path)


        # set the first 25 layers (up to the last conv block)
        # to non-trainable (weights will not be updated)
        for layer in model.layers[:25]:
            layer.trainable = False

        trial_model_optimizer_dict = {}
        #trial_model_optimizer_list = {{choice(['rmsprop', 'adam', 'sgd','adagrad','adadelta','adamax'])}}
        trial_model_optimizer_list = {{choice([ 'adam', 'sgd'])}}
        print "\t#Chosen Optimizer: ", trial_model_optimizer_list
        epsilon = 1e-08
        lr = {{choice([1e-1, 1e-2,1e-3,1e-4,1e-5,1e-6,1e-7])}}
        momentum={{choice([0.7,0.8,0.9,1.0])}}
        nesterov = {{choice([True,False])}}
        if trial_model_optimizer_list == 'adam':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adam(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adam'] = {'lr': lr,
                                              'epsilon': epsilon}

        elif trial_model_optimizer_list == 'rmsprop':
            #epsilon={{choice([0,1e-04, 1e-05,1e-06,1e-07,1e-08, 1e-09, 1e-10])}}
            print "\t\t#Chosen Epsilon:", epsilon
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr
            # rho = {{uniform(0.5, 1)}}
            #trial_model_optimizer = RMSprop(lr=lr, rho=rho, epsilon=epsilon)
            trial_model_optimizer = RMSprop(lr=lr, epsilon=epsilon)
            trial_model_optimizer_dict['rmsprop'] = {'lr': lr,
                                              'epsilon': epsilon}

        elif trial_model_optimizer_list == 'sgd':

            print "\t\t#Chosen Nesterov:", nesterov
            #lr = {{choice([0.1,0.5,0.01,0.05,0.001,0.005,0.0001,0.0005])}}

            print "\t\t#Chosen Learning Rate:", lr

            print "\t\t#Chosen Momentum:", momentum
            # decay={{uniform(0, 0.5)}}
            #trial_model_optimizer = SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)
            trial_model_optimizer = SGD(lr=lr, momentum=momentum, nesterov=nesterov)
            trial_model_optimizer_dict['sgd'] = {'lr': lr,
                                              'momentum': momentum,
                                              'nesterov': nesterov}
        elif trial_model_optimizer_list == 'adagrad':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adagrad(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adagrad'] = {'lr': lr,
                                              'epsilon': epsilon}
        elif trial_model_optimizer_list == 'adamax':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adamax(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adamax'] = {'lr': lr,
                                              'epsilon': epsilon}
        elif trial_model_optimizer_list == 'adadelta':
            print "\t\t#Chosen Epsilon:", epsilon

            print "\t\t#Chosen Learning Rate:", lr
            # beta_1 = {{uniform(0.5, 1)}}
            # beta_2 = {{uniform(0.6, 1)}}
            #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
            trial_model_optimizer = Adadelta(lr=lr,epsilon=epsilon )
            trial_model_optimizer_dict['adadelta'] = {'lr': lr,
                                              'epsilon': epsilon}
        # elif trial_model_optimizer_list == 'nadam':
        #     print "\t\t#Chosen Epsilon:", epsilon
        #     lr = 1e-4
        #     print "\t\t#Chosen Learning Rate:", lr
        #     # beta_1 = {{uniform(0.5, 1)}}
        #     # beta_2 = {{uniform(0.6, 1)}}
        #     #trial_model_optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2,epsilon=epsilon )
        #     trial_model_optimizer = Nadam(lr=lr,epsilon=epsilon )
        #     trial_model_optimizer_dict['nadam'] = {'lr': lr,
        #                                       'epsilon': epsilon}




        saved_clean_model = model.to_json()

        # compile the model with a SGD/momentum optimizer
        # and a very slow learning rate.
        model.compile(loss='categorical_crossentropy',
                      optimizer=trial_model_optimizer,
                      metrics=['accuracy'])

        # fit the model
        batch_size = 128
        print "\t#Chosen batch size:", batch_size,"\n"
        model.fit(train, train_labels, nb_epoch=nb_epochs, batch_size=batch_size)
        predicted_labels = model.predict(validation)
        predicted_labels_linear = []
        for i in range(len(predicted_labels)):
            cls_prob = predicted_labels[i]
            predicted_labels_linear.append(np.argmax(cls_prob))

        validation_labels_linear = []

        for lbl in validation_labels:
            if lbl[0] == 1:
                validation_labels_linear.append(0)
            if lbl[1] == 1:
                validation_labels_linear.append(1)
            if lbl[2] == 1:
                validation_labels_linear.append(2)

        validation_labels_linear = np.array(validation_labels_linear)
        predicted_labels_linear = np.array(predicted_labels_linear)

        MCC = multimcc(validation_labels_linear, predicted_labels_linear)
        print(MCC)
        output_model = {
            'model': saved_clean_model,
            'optimizer': trial_model_optimizer_dict,
            'batch_size': batch_size
        }
    #session.close()
    return {'loss': -MCC, 'status': STATUS_OK, 'model': output_model}
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:
 
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add(Dropout(0.5))
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}
    if choiceval == 'adam':
        adam = Adam(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)
        optim = adam
    elif choiceval == 'rmsprop':
        rmsprop = RMSprop(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)
        optim = rmsprop
    else:
        sgd = SGD(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)

        optim = sgd

    model.compile(loss='categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    globalvars.globalVar += 1

    filepath = "../output/weights_fcn_hyperas" + str(
        globalvars.globalVar) + ".hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    csv_logger = CSVLogger('../output/hyperas_test_log.csv',
                           append=True,
                           separator=';')

    hist = model.fit(X_train,
                     Y_train,
                     batch_size={{choice([64, 128])}},
                     nb_epoch=1,
                     verbose=2,
                     validation_data=(X_test, Y_test),
                     callbacks=[checkpoint, csv_logger])

    h1 = hist.history
    acc_ = numpy.asarray(h1['acc'])
    loss_ = numpy.asarray(h1['loss'])
    val_loss_ = numpy.asarray(h1['val_loss'])
    val_acc_ = numpy.asarray(h1['val_acc'])

    acc_and_loss = numpy.column_stack((acc_, loss_, val_acc_, val_loss_))
    save_file_mlp = '../output/mlp_run_' + '_' + str(
        globalvars.globalVar) + '.txt'
    with open(save_file_mlp, 'w') as f:
        numpy.savetxt(save_file_mlp, acc_and_loss, delimiter=" ")

    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#37
0
def create_model(x_train, y_train, x_test, y_test):
    BATCH_SIZE = 64
    EPOCHS = 25
    RESIZE_DIM = 256
    RANDOM_CROP_DIM = 224
    NO_FILTERS = [16, 32, 64]

    history = None

    model = Sequential()

    f1 = {{choice(NO_FILTERS)}}
    f2 = {{choice(NO_FILTERS)}}

    model.add(
        Conv2D(f1, (3, 3),
               padding='same',
               input_shape=(RANDOM_CROP_DIM, RANDOM_CROP_DIM, 1)))
    model.add(Activation('relu'))
    model.add(Conv2D(f1, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(f2, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(f2, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(500))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, kernel_initializer='normal'))
    model.add(Activation('linear'))

    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    model.compile(loss='mean_squared_error', optimizer=opt)

    class customValidationCallback(keras.callbacks.Callback):
        def __init__(self, tr_params):
            self.tr_params = tr_params

        def on_train_begin(self, logs={}, min_delta=0, patience=3):
            self.losses = []
            self.val_error_means = []
            self.val_error_stds = []
            self.min_delta = min_delta
            self.patience = patience
            self.patience_left = patience

        def on_epoch_end(self, epoch, logs={}):
            self.losses.append(logs.get('loss'))
            prediction = self.model.predict(self.validation_data[0])
            val_error = np.abs(self.validation_data[1] - prediction)
            val_error = np.sqrt(np.dot(val_error**2, np.array([1, 1])))
            current_error = np.mean(val_error)

            if len(self.val_error_means) > 0:
                delta = current_error - self.val_error_means[self.patience_left
                                                             - 4]
                if delta > self.min_delta:
                    self.patience_left -= 1
                    if self.patience_left == 0:
                        self.model.stop_training = True

                else:
                    # Reset patience_left if there is a decrease
                    self.patience_left = self.patience

            self.val_error_means.append(current_error)
            self.val_error_stds.append(np.std(val_error))
            with open('lenet_params_search.txt', 'a') as f:
                f.write(
                    str(self.tr_params) + ',' + str(self.val_error_means) +
                    ',' + str(self.val_error_stds) + '\n')

    # Train the CNN
    history = customValidationCallback(tr_params=[f1, f2])

    model.fit(x_train,
              y_train,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              validation_data=(x_test, y_test),
              callbacks=[history])

    return {
        'loss': np.min(history.val_error_means),
        'status': STATUS_OK,
        'history': {
            'loss': history.losses,
            'val_e_mean': history.val_error_means,
            'val_e_std': history.val_error_stds,
            # 'best_model': history.best_model
        },
        'model': None
    }
def create_model(x_train, y_train, x_val, y_val, layer_sizes):

  def coeff_determination(y_true, y_pred):
    SS_res = K.sum(K.square(y_true - y_pred)) 
    SS_tot = K.sum(K.square(y_true - K.mean(y_true))) 
    return (1 - SS_res / (SS_tot + K.epsilon()))

  model = models.Sequential()

  model.add(layers.Dense({{choice([np.power(2, 0), np.power(2, 1), np.power(2, 2), np.power(2, 3),
                                   np.power(2, 4), np.power(2, 5), np.power(2, 6), np.power(2, 7)])}}, 
            activation={{choice(['relu','selu','tanh','softmax','softplus','linear',None])}}, 
            input_shape=(len(data.columns),)))

  model.add(layers.Dense({{choice([np.power(2, 0), np.power(2, 1), np.power(2, 2), np.power(2, 3),
                                   np.power(2, 4), np.power(2, 5), np.power(2, 6), np.power(2, 7)])}}, 
            activation={{choice(['relu','selu','tanh','softmax','softplus','linear',None])}}))

  model.add(layers.Dense({{choice([np.power(2, 0), np.power(2, 1), np.power(2, 2), np.power(2, 3),
                                   np.power(2, 4), np.power(2, 5), np.power(2, 6), np.power(2, 7)])}}, 
            activation={{choice(['relu','selu','tanh','softmax','softplus','linear',None])}}))

  model.add(layers.Dense(1, activation={{choice(['relu','selu','tanh','softmax','softplus','linear',None])}}))

  RMS = optimizers.SGD(lr={{choice([0.0001, 0.001, 0.01, 0.1])}}, nesterov={{choice([False, True])}})

  model.compile(optimizer=RMS,
                loss={{choice(['mean_absolute_error','mean_squared_error','mean_absolute_percentage_error',
                               'mean_squared_logarithmic_error','hinge','squared_hinge','logcosh'])}},
                metrics=[coeff_determination])

  model.fit(x_train, y_train, epochs={{choice([25, 50, 75, 100, 500])}}, 
            batch_size={{choice([10, 16, 20, 32, 64])}}, validation_data=(x_val, y_val))

  score, acc = model.evaluate(x_val, y_val, verbose=0)
  print('Validation accuracy:', acc)

  return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(X_train, y_train, X_val, y_val, X_test, y_test):
    epochs = 30
    es_patience = 5
    lr_patience = 3
    dropout = None
    depth = 25
    nb_dense_block = 3
    nb_filter = 16
    growth_rate = 18
    bn = True
    reduction_ = 0.5
    bs = 32
    lr = 5E-4
    opt = {{choice([Adam(lr=5E-4), RMSprop(lr=5E-4), Adamax(lr=5E-4)])}}
    weight_file = 'hyperas_dn_lr_optimizer_wt_3Oct_1600.h5'
    nb_classes = 1
    img_dim = (2, 96, 96)
    n_channels = 2

    model = DenseNet(depth=depth,
                     nb_dense_block=nb_dense_block,
                     growth_rate=growth_rate,
                     nb_filter=nb_filter,
                     dropout_rate=dropout,
                     activation='sigmoid',
                     input_shape=img_dim,
                     include_top=True,
                     bottleneck=bn,
                     reduction=reduction_,
                     classes=nb_classes,
                     pooling='avg',
                     weights=None)

    model.summary()
    model.compile(loss=binary_crossentropy,
                  optimizer=opt,
                  metrics=['accuracy'])

    es = EarlyStopping(monitor='val_loss', patience=es_patience, verbose=1)
    checkpointer = ModelCheckpoint(filepath=weight_file,
                                   verbose=1,
                                   save_best_only=True)

    lr_reducer = ReduceLROnPlateau(monitor='val_loss',
                                   factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=lr_patience,
                                   min_lr=0.5e-6,
                                   verbose=1)

    model.fit(X_train,
              y_train,
              batch_size=bs,
              epochs=epochs,
              callbacks=[lr_reducer, checkpointer, es],
              validation_data=(X_val, y_val),
              verbose=2)

    score, acc = model.evaluate(X_val, y_val)
    print("current val accuracy:%0.3f" % acc)
    pred = model.predict(X_val)
    auc_score = roc_auc_score(y_val, pred)
    print("current auc_score ------------------> %0.3f" % auc_score)

    model = load_model(weight_file)  #This is the best model
    score, acc = model.evaluate(X_val, y_val)
    print("Best saved model val accuracy:%0.3f" % acc)
    pred = model.predict(X_val)
    auc_score = roc_auc_score(y_val, pred)
    print("best saved model auc_score ------------------> %0.3f" % auc_score)

    return {'loss': -auc_score, 'status': STATUS_OK, 'model': model}
示例#40
0
文件: Models.py 项目: gsndr/CLAIRE
def Autoencoder(x_train, y_train, x_test, y_test):
    input_shape = (x_train.shape[1],)
    input2 = Input(input_shape)


    # encoder_layer
    # Dropoout?
    #  input1 = Dropout(.2)(input)
    encoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod0')(input2)
    encoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod1')(encoded)
    encoded = Dense(10, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='encod2')(encoded)

    encoded= Dropout({{uniform(0, 1)}})(encoded)
    decoded = Dense(30, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder1')(encoded)
    decoded = Dense(80, activation='relu',
                    kernel_initializer='glorot_uniform',
                    name='decoder2')(decoded)
    decoded = Dense(x_train.shape[1], activation='linear',
                    kernel_initializer='glorot_uniform',
                    name='decoder3')(decoded)


    model = Model(inputs=input2, outputs=decoded)
    model.summary()

    adam=Adam(lr={{uniform(0.0001, 0.01)}})
    model.compile(loss='mse', metrics=['acc'],
                  optimizer=adam)
    callbacks_list = [
        callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=10,
                                restore_best_weights=True),
    ]
    XTraining, XValidation, YTraining, YValidation = train_test_split(x_train, y_train, stratify=y_train,
                                                                      test_size=0.2)  # before model building

    tic = time.time()
    history= model.fit(XTraining, YTraining,
                      batch_size={{choice([32,64, 128,256,512])}},
                      epochs=150,
                      verbose=1,
                      callbacks=callbacks_list,
                      validation_data=(XValidation,YValidation))

    toc = time.time()

    # get the highest validation accuracy of the training epochs
    score = np.amin(history.history['val_loss'])
    print('Best validation loss of epoch:', score)


    scores = [history.history['val_loss'][epoch] for epoch in range(len(history.history['loss']))]
    score = min(scores)
    print('Score',score)


    print('Best score',global_config.best_score)




    if global_config.best_score > score:
        global_config.best_score = score
        global_config.best_model = model
        global_config.best_numparameters = model.count_params()

        best_time = toc - tic



    return {'loss': score, 'status': STATUS_OK, 'n_epochs': len(history.history['loss']), 'n_params': model.count_params(), 'model': global_config.best_model, 'time':toc - tic}
示例#41
0
def CNN(x_train, y_train, x_test, y_test):
    input_shape = (x_train.shape[1], x_train.shape[2])
    print(input_shape)
    input2 = Input(input_shape)

    l1 = Conv1D(64,
                kernel_size=1,
                activation='relu',
                name='conv0',
                kernel_initializer='glorot_uniform')(input2)
    l1 = Dropout({{uniform(0, 1)}})(l1)

    l1 = Flatten()(l1)

    l1 = Dense(320, activation='relu', kernel_initializer='glorot_uniform')(l1)
    # l1= BatchNormalization()(l1)

    l1 = Dropout({{uniform(0, 1)}})(l1)

    l1 = Dense(160, activation='relu', kernel_initializer='glorot_uniform')(l1)
    l1 = Dropout({{uniform(0, 1)}})(l1)

    softmax = Dense(2,
                    activation='softmax',
                    kernel_initializer='glorot_uniform')(l1)

    adam = Adam(lr={{uniform(0.0001, 0.01)}})
    model = Model(inputs=input2, outputs=softmax)
    #model.summary()
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=adam)

    callbacks_list = [
        callbacks.EarlyStopping(monitor='val_loss',
                                min_delta=0.0001,
                                patience=10,
                                restore_best_weights=True),
    ]

    XTraining, XValidation, YTraining, YValidation = train_test_split(
        x_train, y_train, stratify=y_train,
        test_size=0.2)  # before model building
    tic = time.time()
    h = model.fit(x_train,
                  y_train,
                  batch_size={{choice([32, 64, 128, 256, 512])}},
                  epochs=150,
                  verbose=2,
                  callbacks=callbacks_list,
                  validation_data=(XValidation, YValidation))

    toc = time.time()

    scores = [
        h.history['val_loss'][epoch] for epoch in range(len(h.history['loss']))
    ]
    score = min(scores)
    print('Score', score)
    print(x_test.shape)
    predictions = model.predict(x_test, verbose=1)
    y_pred = np.argmax(predictions, axis=1)
    cmTest = confusion_matrix(y_test, y_pred)
    global_config.savedScore.append(cmTest)
    predictions = model.predict(XValidation, verbose=1)
    y_pred = np.argmax(predictions, axis=1)
    val = np.argmax(YValidation, axis=1)
    print(val)
    cmTrain = confusion_matrix(val, y_pred)
    global_config.savedTrain.append(cmTrain)

    print('Best score', global_config.best_score)

    if global_config.best_score > score:
        global_config.best_score = score
        global_config.best_model = model
        global_config.best_numparameters = model.count_params()
        global_config.best_time = toc - tic

    return {
        'loss': score,
        'status': STATUS_OK,
        'n_epochs': len(h.history['loss']),
        'n_params': model.count_params(),
        'model': global_config.best_model,
        'time': toc - tic
    }
def model(train_1, val_1):
    import tensorflow as tf
    from tensorflow import keras
    import time
    import os

    def get_run_logdir(root_logdir, description):
        run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S")
        return os.path.join(root_logdir, description + run_id)

    root_logdir = os.path.join(os.curdir, "logs")

    model = keras.Sequential()
    model.add(
        keras.layers.Conv2D(input_shape=(32, 32, 3),
                            data_format='channels_last',
                            filters=64,
                            strides=1,
                            kernel_size=7,
                            padding='same',
                            activation='relu'))
    model.add(keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

    model.add(
        keras.layers.Conv2D(filters=128,
                            strides=1,
                            kernel_size=3,
                            padding='same',
                            activation='relu'))
    model.add(
        keras.layers.Conv2D(filters=128,
                            strides=1,
                            kernel_size=3,
                            padding='same',
                            activation='relu'))
    model.add(keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

    model.add(
        keras.layers.Conv2D(filters=256,
                            strides=1,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            kernel_regularizer=keras.regularizers.l2(
                                {{choice([0.0, 0.001, 0.005, 0.01])}}),
                            bias_regularizer=keras.regularizers.l2(
                                {{choice([0.0, 0.001, 0.005, 0.01])}})))
    model.add(
        keras.layers.Conv2D(filters=256,
                            strides=1,
                            kernel_size=3,
                            padding='same',
                            activation='relu',
                            kernel_regularizer=keras.regularizers.l2(
                                {{choice([0.0, 0.001, 0.005, 0.01])}}),
                            bias_regularizer=keras.regularizers.l2(
                                {{choice([0.0, 0.001, 0.005, 0.01])}})))
    model.add(keras.layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))

    model.add(keras.layers.Flatten())
    model.add(keras.layers.Dropout({{choice([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])}}))
    model.add(keras.layers.Dense(units=128, activation='relu'))
    model.add(keras.layers.Dropout({{choice([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])}}))
    model.add(keras.layers.Dense(units=64, activation='relu'))
    model.add(keras.layers.Dropout({{choice([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])}}))
    model.add(keras.layers.Dense(units=10, activation='softmax'))

    tensorboard_cb = keras.callbacks.TensorBoard(
        get_run_logdir(root_logdir, "cifar10_model_hyperas"))
    stopping_cb = keras.callbacks.EarlyStopping(patience=5,
                                                restore_best_weights=True)

    model.compile(loss=keras.losses.sparse_categorical_crossentropy,
                  optimizer=tf.optimizers.SGD({{uniform(0, 0.01)}}),
                  metrics=['accuracy'])
    History = model.fit(x=train_1,
                        steps_per_epoch=1407,
                        validation_data=val_1,
                        validation_steps=157,
                        epochs=500,
                        callbacks=[tensorboard_cb, stopping_cb],
                        verbose=2)
    _, acc = model.evaluate(val_1, steps=157, verbose=2)

    print('Validation accuracy', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#43
0
def fHyperasTrain(X_train, Y_train, X_test, Y_test, patchSize):
    # explicitly stated here instead of cnn = createModel() to allow optimization
    cnn = Sequential()
    cnn.add(
        Convolution2D(
            32,
            14,
            14,
            init='normal',
            # activation='sigmoid',
            weights=None,
            border_mode='valid',
            subsample=(1, 1),
            W_regularizer=l2(1e-6),
            input_shape=(1, patchSize[0, 0], patchSize[0, 1])))
    cnn.add(Activation('relu'))

    cnn.add(
        Convolution2D(
            64,
            7,
            7,
            init='normal',
            # activation='sigmoid',
            weights=None,
            border_mode='valid',
            subsample=(1, 1),
            W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))
    cnn.add(
        Convolution2D(
            64,  #learning rate: 0.1 -> 76%
            3,
            3,
            init='normal',
            # activation='sigmoid',
            weights=None,
            border_mode='valid',
            subsample=(1, 1),
            W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))

    cnn.add(
        Convolution2D(
            128,  #learning rate: 0.1 -> 76%
            3,
            3,
            init='normal',
            # activation='sigmoid',
            weights=None,
            border_mode='valid',
            subsample=(1, 1),
            W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))

    #cnn.add(pool2(pool_size=(2, 2), strides=None, border_mode='valid', dim_ordering='th'))

    cnn.add(Flatten())
    #cnn.add(Dense(input_dim= 100,
    #              output_dim= 100,
    #              init = 'normal',
    #              #activation = 'sigmoid',
    #              W_regularizer='l2'))
    #cnn.add(Activation('sigmoid'))
    cnn.add(
        Dense(
            input_dim=100,
            output_dim=2,
            init='normal',
            #activation = 'sigmoid',
            W_regularizer='l2'))
    cnn.add(Activation('softmax'))

    opti = SGD(lr={{uniform(0.001, 0.1)}},
               momentum=1e-8,
               decay=0.1,
               nesterov=True)
    cnn.compile(loss='categorical_crossentropy', optimizer=opti)

    epochs = 300

    result = cnn.fit(X_train,
                     Y_train,
                     batch_size={{choice([64, 128])}},
                     nb_epoch=epochs,
                     show_accuracy=True,
                     verbose=2,
                     validation_data=(X_test, Y_test))
    score_test, acc_test = cnn.evaluate(X_test, Y_test, verbose=0)

    return {
        'loss': -acc_test,
        'status': STATUS_OK,
        'model': cnn,
        'trainresult': result,
        'score_test': score_test
    }
def Conv2DMultiTaskIn1(x_train, y_train, ddg_train, x_test, y_test, ddg_test,
                       class_weights_dict, obj):
    K.clear_session()
    summary = False
    verbose = 0
    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = 64
    epochs = {{choice([50, 100, 150, 200, 250])}}

    lr = {{loguniform(np.log(1e-4), np.log(1e-2))}}

    optimizer = {{choice(['adam', 'sgd', 'rmsprop'])}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}

    basic_conv2D_layers = {{choice([1, 2])}}
    basic_conv2D_filter_num = {{choice([16, 32])}}

    loop_dilation2D_layers = {{choice([2, 4, 6])}}
    loop_dilation2D_filter_num = {{choice([16, 32, 64])}}  #used in the loop
    loop_dilation2D_dropout_rate = {{uniform(0.001, 0.35)}}
    dilation_lower = 2
    dilation_upper = 16

    ddg_reduce_layers = {{choice([3, 4, 5])}
                         }  # conv 5 times: 120 => 60 => 30 => 15 => 8 => 4
    y_reduce_layers = {{choice([3, 4, 5])}
                       }  # conv 5 times: 120 => 60 => 30 => 15 => 8 => 4
    ddg_reduce_conv2D_filter_num = {{choice([16, 32,
                                             64])}}  #used for reduce dimention
    y_reduce_conv2D_filter_num = {{choice([8, 16,
                                           32])}}  #used for reduce dimention
    reduce_conv2D_dropout_rate = {{uniform(0.001, 0.25)}}
    ddg_residual_stride = 2
    y_residual_stride = 2

    ddg_dense1_num = {{choice([64, 128, 256])}}
    ddg_dense2_num = {{choice([32, 64])}}
    y_dense1_num = {{choice([32, 64, 128])}}
    y_dense2_num = {{choice([16, 32])}}

    drop_num = {{uniform(0.0001, 0.3)}}

    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = ['mse', 'binary_crossentropy']
    loss_weights = [0.5, 10]
    metrics = (['mae'], ['accuracy'])

    my_callbacks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.8,
            patience=10,
        )
    ]

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    # build --------------------------------------------------------------------------------------------------------
    ## basic Conv2D
    input_layer = Input(shape=x_train.shape[1:])
    y = layers.Conv2D(basic_conv2D_filter_num,
                      kernel_size,
                      padding=padding_style,
                      kernel_initializer=initializer,
                      activation=activator)(input_layer)
    y = layers.BatchNormalization(axis=-1)(y)
    if basic_conv2D_layers == 2:
        y = layers.Conv2D(basic_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
    ## loop with Conv2D with dilation (padding='same')
    for _ in range(loop_dilation2D_layers):
        y = layers.Conv2D(loop_dilation2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          dilation_rate=dilation_lower,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(loop_dilation2D_dropout_rate)(y)
        dilation_lower *= 2
        if dilation_lower > dilation_upper:
            dilation_lower = 2

    ## Conv2D with dilation (padding='valaid') and residual block to reduce dimention.
    ## for regressor branch
    y_ddg = layers.Conv2D(ddg_reduce_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
    y_ddg = layers.BatchNormalization(axis=-1)(y_ddg)
    y_ddg = layers.Dropout(reduce_conv2D_dropout_rate)(y_ddg)
    y_ddg = layers.MaxPooling2D(pool_size, padding=padding_style)(y_ddg)
    residual_ddg = layers.Conv2D(ddg_reduce_conv2D_filter_num,
                                 1,
                                 strides=ddg_residual_stride,
                                 padding='same')(input_layer)
    y_ddg = layers.add([y_ddg, residual_ddg])
    ddg_residual_stride *= 2
    for _ in range(ddg_reduce_layers - 1):
        y_ddg = layers.Conv2D(ddg_reduce_conv2D_filter_num,
                              kernel_size,
                              padding=padding_style,
                              kernel_initializer=initializer,
                              activation=activator)(y_ddg)
        y_ddg = layers.BatchNormalization(axis=-1)(y_ddg)
        y_ddg = layers.Dropout(reduce_conv2D_dropout_rate)(y_ddg)
        y_ddg = layers.MaxPooling2D(pool_size, padding=padding_style)(y_ddg)
        residual_ddg = layers.Conv2D(ddg_reduce_conv2D_filter_num,
                                     1,
                                     strides=ddg_residual_stride,
                                     padding='same')(input_layer)
        y_ddg = layers.add([y_ddg, residual_ddg])
        ddg_residual_stride *= 2
    ## flat & dense
    y_ddg = layers.Flatten()(y_ddg)
    y_ddg = layers.Dense(ddg_dense1_num, activation=activator)(y_ddg)
    y_ddg = layers.BatchNormalization(axis=-1)(y_ddg)
    y_ddg = layers.Dropout(drop_num)(y_ddg)
    y_ddg = layers.Dense(ddg_dense2_num, activation=activator)(y_ddg)
    y_ddg = layers.BatchNormalization(axis=-1)(y_ddg)
    y_ddg = layers.Dropout(drop_num)(y_ddg)
    ddg_prediction = layers.Dense(1, name='ddg')(y_ddg)
    # class_prediction = layers.Dense(len(np.unique(y_train)), activation='softmax', name='class')(y_ddg)

    ## for classifier branch
    y_y = layers.Conv2D(y_reduce_conv2D_filter_num,
                        kernel_size,
                        padding=padding_style,
                        kernel_initializer=initializer,
                        activation=activator)(y)
    y_y = layers.BatchNormalization(axis=-1)(y_y)
    y_y = layers.Dropout(reduce_conv2D_dropout_rate)(y_y)
    y_y = layers.MaxPooling2D(pool_size, padding=padding_style)(y_y)
    residual_y = layers.Conv2D(y_reduce_conv2D_filter_num,
                               1,
                               strides=y_residual_stride,
                               padding='same')(input_layer)
    y_y = layers.add([y_y, residual_y])
    y_residual_stride *= 2
    for _ in range(y_reduce_layers - 1):
        y_y = layers.Conv2D(y_reduce_conv2D_filter_num,
                            kernel_size,
                            padding=padding_style,
                            kernel_initializer=initializer,
                            activation=activator)(y_y)
        y_y = layers.BatchNormalization(axis=-1)(y_y)
        y_y = layers.Dropout(reduce_conv2D_dropout_rate)(y_y)
        y_y = layers.MaxPooling2D(pool_size, padding=padding_style)(y_y)
        residual_y = layers.Conv2D(y_reduce_conv2D_filter_num,
                                   1,
                                   strides=y_residual_stride,
                                   padding='same')(input_layer)
        y_y = layers.add([y_y, residual_y])
        y_residual_stride *= 2
    ## flat & dense
    y_y = layers.Flatten()(y_y)
    y_y = layers.Dense(y_dense1_num, activation=activator)(y_y)
    y_y = layers.BatchNormalization(axis=-1)(y_y)
    y_y = layers.Dropout(drop_num)(y_y)
    y_y = layers.Dense(y_dense2_num, activation=activator)(y_y)
    y_y = layers.BatchNormalization(axis=-1)(y_y)
    y_y = layers.Dropout(drop_num)(y_y)
    class_prediction = layers.Dense(len(np.unique(y_train)),
                                    activation='softmax',
                                    name='class')(y_y)

    model = models.Model(inputs=input_layer,
                         outputs=[ddg_prediction, class_prediction])

    if summary:
        model.summary()

    model.compile(optimizer=chosed_optimizer,
                  loss={
                      'ddg': loss_type[0],
                      'class': loss_type[1]
                  },
                  loss_weights={
                      'ddg': loss_weights[0],
                      'class': loss_weights[1]
                  },
                  metrics={
                      'ddg': metrics[0],
                      'class': metrics[1]
                  })

    # K.set_session(tf.Session(graph=model.output.graph))
    # init = K.tf.global_variables_initializer()
    # K.get_session().run(init)

    result = model.fit(
        x=x_train,
        y={
            'ddg': ddg_train,
            'class': y_train
        },
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=my_callbacks,
        validation_data=(x_test, {
            'ddg': ddg_test,
            'class': y_test
        }),
        shuffle=True,
        class_weight={
            'ddg': None,
            'class': class_weights_dict
        },
    )

    # print('\n----------History:\n%s'%result.history)

    if obj == 'test_report':
        pearson_coeff, std, acc, mcc, recall_p, recall_n, precision_p, precision_n = test_report(
            model, x_test, y_test, ddg_test)
        print(
            '\n----------Predict:'
            '\npearson_coeff: %s, std: %s'
            '\nacc: %s, mcc: %s, recall_p: %s, recall_n: %s, precision_p: %s, precision_n: %s'
            % (pearson_coeff, std, acc, mcc, recall_p, recall_n, precision_p,
               precision_n))

        objective = pearson_coeff * 2 + std + acc + 5 * mcc + recall_p + recall_n + precision_p + precision_n
        return {'loss': -objective, 'status': STATUS_OK}

    elif obj == 'val':
        validation_mae = np.amax(result.history['val_ddg_mean_absolute_error'])
        validation_acc = np.amax(result.history['val_class_acc'])
        print('Best validation of epoch, mae: %s, acc: %s:' %
              (validation_mae, validation_acc))
        return {
            'loss': validation_mae - 2 * validation_acc,
            'status': STATUS_OK
        }
示例#45
0
def create_model(x_train, y_train, x_validate, y_validate, x_test, y_test):
    import time
    import tabulate
    start = int(time.time())
    np.random.seed(1234)
    try:
        '''
        b) with and without a hidden fully-connected layer, 
        c) number of units in the hidden fc layer < 200, 
        c) different learning rates for adam (explore on a log scale - 0.001, 0.0001, etc), 
        d) maxpooling widths in the 10-60 range, 
        e) conv widths in the 10-40 range.
        '''
        model = Sequential()
        kernel_size1 = {{choice([13, 15, 19, 25, 39])}}
        kernel_size2 = kernel_size1 - 2
        model.add(
            Conv1D(filters={{choice([50, 100, 250, 500, 1000])}},
                   kernel_size=(kernel_size1),
                   input_shape=(1000, 4)))
        model.add(BatchNormalization(axis=-1))
        model.add(Activation('relu'))

        ## a) number of layers between 1 and 4,

        #decide on how many conv layers in model
        n_conv = {{choice([0, 1, 2, 3])}}

        filter_dim = [kernel_size1, kernel_size2, kernel_size2]

        for i in range(n_conv):
            model.add(
                Conv1D(filters={{choice([50, 100, 250, 500, 1000])}},
                       kernel_size=(filter_dim[i])))
            model.add(BatchNormalization(axis=-1))
            model.add(Activation('relu'))

        model.add(MaxPooling1D(pool_size=({{choice([10, 30, 60])}})))

        model.add(Flatten())
        n_dense = {{choice([0, 1, 2, 3])}}
        for i in range(n_dense):
            model.add(Dense({{choice([50, 100, 200])}}))
            model.add(BatchNormalization(axis=-1))
            model.add(Activation('relu'))
            model.add(Dropout({{choice([0.2, 0.4, 0.6])}}))

        model.add(Dense(4))
        model.add(Activation("sigmoid"))

        adam = keras.optimizers.Adam(lr={{choice([0.01, 0.001, 0.0001])}})

        model.compile(loss=keras_genomics.losses.ambig_binary_crossentropy,
                      optimizer=adam,
                      metrics=['accuracy'])
        print("compiled!")
        sys.stdout.flush()

        # added to collect optimization results
        if 'results' not in globals():
            global results
            results = []

        result = model.fit(x_train,
                           y_train,
                           batch_size=200,
                           epochs=20,
                           verbose=2,
                           validation_data=(x_validate, y_validate))
        print("trained!")
        sys.stdout.flush()

        loss, acc = model.evaluate(x_validate, y_validate, verbose=2)
        print("Validation loss:", loss, "Validation acc:", acc)
        sys.stdout.flush()

        # added to collect results
        valLoss = result.history['val_loss'][-1]
        parameters = space
        parameters["loss"] = valLoss
        parameters["time"] = int(time.time() - start)
        results.append(parameters)
        print(parameters)
        if len(results) % 10 == 0:
            tab = tabulate.tabulate(results,
                                    headers="keys",
                                    tablefmt="fancy_grid",
                                    floatfmt=".8f")
            print(tab.encode('utf-8'))
        else:
            tab = tabulate.tabulate(results[-1:],
                                    headers="keys",
                                    tablefmt="fancy_grid",
                                    floatfmt=".8f")
            print(tab.encode('utf-8'))
        print("model %d done ----------------" % len(results))
        sys.stdout.flush()

    except:
        loss = 1000
        acc = 0
        print("failed to run model")
        sys.stdout.flush()

        model = None

    return {'loss': loss, 'status': STATUS_OK, 'model': model}
示例#46
0
def model(checkpoint_path, embed_dim, n_classes, my_training_batch_generator, my_validation_batch_generator, train_steps, valid_steps):
    
    
    ################### SENTENCE - ASPECT INPUT ###################################
    sentence_embed = Input(shape=(30, embed_dim,), name="sentence_input")
    
    lstm_units = {{choice([50, 100, 150, 200])}}
    
    sentence_forward_layer = LSTM(lstm_units, activation='relu', return_sequences=False)
    sentence_backward_layer = LSTM(lstm_units, activation='relu', return_sequences=False, go_backwards=True)
    sentence_ = Bidirectional(sentence_forward_layer, backward_layer=sentence_backward_layer, 
                              name="BLSTM_sent")(sentence_embed)
    
    ################### CONCAT AND FULLY CONNECTED ################################
    
    out_ = Dense({{choice([100, 200, 400, 600, 800])}}, activation='relu', name='dense')(sentence_)
    out_ = Dropout(0.5, name="dropout")(out_)
    
    # If we choose 'four', add an additional fourth layer
    fc_number = {{choice(['one','two', 'three'])}}
    dense_2 = {{choice([50, 100, 200, 400, 600])}}
    dense_3 = {{choice([25, 50, 100, 200, 400])}}
    if fc_number == 'two':
        out_ = Dense(dense_2, activation='relu', name='dense1')(out_)
        out_ = Dropout(0.5, name="dropout1")(out_)
    
    elif fc_number == 'three':
        out_ = Dense(dense_2, activation='relu', name='dense1')(out_)
        out_ = Dropout(0.5, name="dropout1")(out_)
        
        out_ = Dense(dense_3, activation='relu', name='dense2')(out_)
        out_ = Dropout(0.5, name="dropout2")(out_)

    out = Dense(n_classes, activation='softmax', name='out')(out_)

    ################### DEFINE AND COMPILE MODEL ##################################
    model = Model(inputs=[sentence_embed], outputs=out)

    model.compile(loss= focal_loss(gamma={{choice([1.0, 2.0, 3.0, 4.0, 5.0])}}, alpha=1.0), 
                  metrics=['acc', AUC(curve='PR', multi_label=False, name='auc')], 
                  optimizer=Adam(0.001))  #   

    model.summary()
    
    """## Fit model"""
    checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=True, save_weights_only=True)
    earlystopper = EarlyStopping(monitor="val_auc", mode="max", patience=5, verbose=1)

    history = model.fit(my_training_batch_generator,
                        steps_per_epoch = train_steps,
                        epochs = 100,
                        verbose = 2,
                        validation_data = my_validation_batch_generator,
                        validation_steps = valid_steps,
                        callbacks=[checkpointer, earlystopper])
    
    model.load_weights(checkpoint_path)
    
    score = model.evaluate(my_validation_batch_generator, verbose=0)

    loss, acc, auc = score

    return {'loss':-auc, 'status': STATUS_OK, 'model': model}
示例#47
0
def model(datagen,X_train,Y_train,X_val,Y_val):
    num_layers1 = {{choice([48, 64, 96])}}
    num_layers2 = {{choice([96, 128, 192])}}
    num_layers3 = {{choice([192, 256, 512])}}
    lrate = {{choice([0.0001, 0.0004,0.0008])}}
    epochs = 60
    batch_size = 64

    inputs = Input((28,28,1))
    nois=GaussianNoise(0.2)(inputs)
    conv1 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(nois)
    conv1 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv1)
    conv1 = Activation('relu')(conv1)

    conv2 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(conv1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv2)
    conv2 = Activation('relu')(conv2)

    conv3 = Conv2D(num_layers1, (3, 3), activation=None, padding='same')(conv2)
    conv3 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv3)
    conv3 = Activation('relu')(conv3)
    conv3= MaxPooling2D(pool_size=(2, 2))(conv3)
    conv3= Dropout({{uniform(0,0.5)}})(conv3)

    conv4 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv3)
    conv4 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv4)
    conv4 = Activation('relu')(conv4)

    conv5 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv4)
    conv5 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv5)
    conv5 = Activation('relu')(conv5)

    conv6 = Conv2D(num_layers2, (3, 3), activation=None, padding='same')(conv5)
    conv6 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv6)
    conv6 = Activation('relu')(conv6)
    conv6= MaxPooling2D(pool_size=(2, 2))(conv6)
    conv6= Dropout({{uniform(0,0.5)}})(conv6)


    conv7 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv6)
    conv7 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv7)
    conv7 = Activation('relu')(conv7)

    conv8 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv7)
    conv8 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv8)
    conv8 = Activation('relu')(conv8)

    conv9 = Conv2D(num_layers3, (3, 3), activation=None, padding='same')(conv8)
    conv9 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(conv9)
    conv9 = Activation('relu')(conv9)
    conv9= MaxPooling2D(pool_size=(2, 2))(conv9)
    conv9= Dropout({{uniform(0,0.5)}})(conv9)
    conv9=Flatten()(conv9)

    dout1= Dense(256,activation = 'relu')(conv9)
    dout1 = normalization.BatchNormalization(epsilon=2e-05, axis=-1, momentum=0.9, weights=None,
		                                         beta_initializer='zero', gamma_initializer='one')(dout1)
    dout1= Dropout({{uniform(0,0.5)}})(dout1)
    dout2 =Dense(10,activation = 'softmax')(dout1)
    model = Model(inputs=inputs, outputs=dout2)

    optimizer=Adam(lr=lrate, beta_1=0.9, beta_2=0.95, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
    save_path=os.getcwd()
    checkpointer = []
    #checkpointer.append(ModelCheckpoint(filepath=os.path.join(save_path,'best_model.hdf5'), verbose=1, save_best_only=True))
    checkpointer.append(ReduceLROnPlateau(monitor='val_acc', patience=8, verbose=1, factor=0.5, min_lr=0.00001))
    checkpointer.append(EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=True))

    history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
                              epochs = epochs, validation_data = (X_val,Y_val),
                              verbose = 32, steps_per_epoch=X_train.shape[0] // batch_size
                              ,callbacks=checkpointer)
    score, acc = model.evaluate(X_val, Y_val, verbose=0)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_model(u_train, x_train, y_train, u_test, x_test, y_test):
    # Logistic regression for learning the attention parameters with a standalone feature as input
    input_attention = Input(shape=(globalvars.nb_attention_param, ))
    u = Dense(globalvars.nb_attention_param,
              activation='softmax')(input_attention)

    # Bi-directional Long Short-Term Memory for learning the temporal aggregation
    # Input shape: (time_steps, features,)
    input_feature = Input(shape=(globalvars.max_len, globalvars.nb_features))
    x = Masking(mask_value=-100.0)(input_feature)

    x = Dense(globalvars.nb_hidden_units, activation='relu')(x)
    x = Dropout(globalvars.dropout_rate)(x)

    x = Dense(globalvars.nb_hidden_units, activation='relu')(x)
    x = Dropout(globalvars.dropout_rate)(x)

    y = Bidirectional(
        LSTM(globalvars.nb_lstm_cells,
             return_sequences=True,
             dropout=globalvars.dropout_rate))(x)

    # To compute the final weights for the frames which sum to unity
    alpha = dot([u, y], axes=-1)
    alpha = Activation('softmax')(alpha)

    # Weighted pooling to get the utterance-level representation
    z = dot([alpha, y], axes=1)

    # Get posterior probability for each emotional class
    output = Dense(globalvars.nb_classes, activation='softmax')(z)

    model = Model(inputs=[input_attention, input_feature], outputs=output)

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.8, nesterov=True)
    choice_val = {{choice(['adam', 'rmsprop', sgd])}}
    if choice_val == 'adam':
        optimizer = optimizers.Adam()
    elif choice_val == 'rmsprop':
        optimizer = optimizers.RMSprop()
    else:
        optimizer = sgd

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=optimizer)

    globalvars.globalVar += 1

    file_path = 'weights_blstm_hyperas_' + str(globalvars.globalVar) + '.h5'
    callback_list = [
        EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto'),
        ModelCheckpoint(filepath=file_path,
                        monitor='val_acc',
                        save_best_only='True',
                        verbose=1,
                        mode='max')
    ]

    hist = model.fit([u_train, x_train],
                     y_train,
                     batch_size=128,
                     epochs={{choice([200, 300])}},
                     verbose=2,
                     callbacks=callback_list,
                     validation_data=([u_test, x_test], y_test))
    h = hist.history
    acc = np.asarray(h['acc'])
    loss = np.asarray(h['loss'])
    val_loss = np.asarray(h['val_loss'])
    val_acc = np.asarray(h['val_acc'])

    acc_and_loss = np.column_stack((acc, loss, val_acc, val_loss))
    save_file_blstm = 'blstm_run_' + str(globalvars.globalVar) + '.txt'
    with open(save_file_blstm, 'w'):
        np.savetxt(save_file_blstm, acc_and_loss)

    score, accuracy = model.evaluate([u_test, x_test],
                                     y_test,
                                     batch_size=128,
                                     verbose=1)
    print("Final validation accuracy: %s" % accuracy)

    return {'loss': -accuracy, 'status': STATUS_OK, 'model': model}
示例#49
0
def model_op(gap_generator, X_test, Y_test, class_weights, images):
    class StopTraining(callbacks.Callback):
        def __init__(self, monitor='val_loss', patience=10, goal=0.5):
            self.monitor = monitor
            self.patience = patience
            self.goal = goal

        def on_epoch_end(self, epoch, logs={}):
            current_val_acc = logs.get(self.monitor)

            if current_val_acc < self.goal and epoch == self.patience:
                self.model.stop_training = True

    args = arg_parameters()
    data_set = args.data
    model_name = secrets.token_hex(6)
    total_train_images = images.count - len(X_test)
    n_classes = len(images.classes)
    log = {'model_name': model_name}

    try:
        model = Sequential()

        filter_0 = {{choice([32, 64, 128, 256, 512])}}
        log['filter_0'] = filter_0
        kernel_0 = {{choice([3, 4])}}
        log['kernel_0'] = kernel_0
        activation_0 = {{choice(['relu', 'tanh'])}}
        log['activation_0'] = activation_0
        model.add(
            layers.Conv2D(filters=filter_0,
                          kernel_size=kernel_0,
                          activation=activation_0,
                          input_shape=(128, 128, 3)))

        # conditional_0 = {{choice([True, False])}}
        # log['conditional_0'] = conditional_0
        # if conditional_0:
        #     layers.BatchNormalization()
        # activity_regularizer=regularizers.l1(0.001)
        # kernel_regularizer=regularizers.l2(0.001)

        conditional_0 = {{choice([True, False])}}
        log['conditional_0'] = conditional_0
        if conditional_0:
            model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        conditional_1 = {{choice([True, False])}}
        log['conditional_1'] = conditional_1
        if conditional_1:
            dropout_0 = {{uniform(0, 1)}}
            log['dropout_0'] = dropout_0
            model.add(layers.Dropout(dropout_0))

        range_0 = {{choice([2, 3])}}
        log['range_0'] = range_0
        for i, _ in enumerate(range(range_0), 1):
            filters = {{choice([32, 64, 128, 256, 512])}}
            log["filters_{}".format(i)] = filters
            kernel_sizes = {{choice([3, 4])}}
            log["kernel_sizes_{}".format(i)] = kernel_sizes
            activations = {{choice(['relu', 'tanh'])}}
            log["activations_{}".format(i)] = activations
            model.add(
                layers.Conv2D(filters=filters,
                              kernel_size=kernel_sizes,
                              activation=activations))

            conditionals_0 = {{choice([True, False])}}
            log["conditionals_0_{}".format(i)] = conditionals_0
            if conditionals_0:
                model.add(layers.MaxPooling2D(pool_size=(2, 2)))

            conditionals_1 = {{choice([True, False])}}
            log["conditionals_1_{}".format(i)] = conditionals_1
            if conditionals_1:
                dropouts = {{uniform(0, 1)}}
                log["dropouts_{}".format(i)] = dropouts
                model.add(layers.Dropout(dropouts))

        model.add(layers.Flatten())

        conditional_2 = {{choice([True, False])}}
        log['conditional_2'] = conditional_2
        if conditional_2:
            filter_1 = {{choice([32, 64, 128, 256, 512])}}
            log['filter_1'] = filter_1
            activation_1 = {{choice(['relu', 'tanh'])}}
            log['activation_1'] = activation_1
            model.add(layers.Dense(filter_1, activation=activation_1))

        dropout_1 = {{uniform(0, 1)}}
        log['dropout_1'] = dropout_1
        model.add(layers.Dropout(dropout_1))

        activation_2 = {{choice(['softmax', 'sigmoid'])}}
        log['activation_2'] = activation_2
        model.add(layers.Dense(n_classes, activation=activation_2))

        optimizer = {{choice(['adam', 'sgd'])}}
        log['optimizer'] = optimizer
        model.compile(loss='categorical_crossentropy',
                      metrics=['accuracy'],
                      optimizer=optimizer)

        # callbacks

        earlystopping = callbacks.EarlyStopping(monitor='val_loss', patience=5)
        stoptraining = StopTraining(monitor='val_accuracy',
                                    patience=30,
                                    goal=0.6)
        model_file = '{}/model/{}.h5'.format(data_set, model_name)
        model_checkpoint = callbacks.ModelCheckpoint(model_file,
                                                     monitor='val_accuracy',
                                                     save_best_only=True,
                                                     save_weights_only=False,
                                                     mode='max')
        log_dir = "{}/logs/fit/{}".format(data_set, model_name)
        tensorboard = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)

        lr_reducer_factor = {{uniform(0, 1)}}
        log['lr_reducer_factor'] = lr_reducer_factor
        lr_reducer = callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                 factor=lr_reducer_factor,
                                                 cooldown=0,
                                                 patience=5,
                                                 min_lr=5e-7)

        minibatch_size = {{choice([16, 32, 64, 128])}}
        log['minibatch_size'] = minibatch_size

        model.fit_generator(
            generator=gap_generator(minibatch_size, images),
            validation_data=(X_test, Y_test),
            epochs=100,
            steps_per_epoch=int(total_train_images / minibatch_size),
            initial_epoch=0,
            verbose=0,
            class_weight=class_weights,
            # max_queue_size=20,
            # workers=24,
            # use_multiprocessing=True,
            callbacks=[
                model_checkpoint,
                earlystopping,
                tensorboard,
                # lr_reducer,
                stoptraining
            ])

        model = load_model(model_file)

        score, acc = model.evaluate(X_test, Y_test, verbose=0)
        print('Test accuracy:', acc)
        if acc > 0.85:
            print('log:', log)
        else:
            os.remove(model_file)
            shutil.rmtree(log_dir)

    except Exception as e:
        acc = 0.0
        model = Sequential()
        print('failed', e)

    del log
    K.clear_session()
    for _ in range(12):
        gc.collect()

    return {'loss': -acc, 'status': STATUS_OK, 'model': model_name}
示例#50
0
def model(train_generator, validation_generator):
    '''
    Model providing function:
 
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''

    optModel = TrainOptimizer(
        labelkey=('Sand', 'Branching', 'Mounding', 'Rock'),
        train_image_path='../Images/Training_Patches/',
        train_label_path='../Images/TrainingRef_Patches/',
        train_out_file='NeMO_train.txt',
        valid_image_path='../Images/Valid_Patches/',
        valid_label_path='../Images/ValidRef_Patches/',
        valid_out_file='NeMO_valid.txt',
        pixel_mean=[127.5, 127.5, 127.5],
        pixel_std=[127.5, 127.5, 127.5],
        num_classes=4,
        model=FCN,
        model_name="NeMO_FCN")

    model = optModel.model2opt()

    choiceval = {{choice(['adam', 'rmsprop', 'sgd'])}}
    if choiceval == 'adam':
        adam = Adam(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}})
        optim = adam
    elif choiceval == 'rmsprop':
        rmsprop = RMSprop(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}})
        optim = rmsprop
    else:
        sgd = SGD(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}})

        optim = sgd

    model.compile(loss='categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    globalvars.globalVar += 1

    filepath = './output/weights_' + optModel.model_name + 'hyperas' + str(
        globalvars.globalVar) + ".hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    csv_logger = CSVLogger('./output/hyperas_' + optModel.model_name +
                           'test_log.csv',
                           append=True,
                           separator=';')
    tensor_board_logfile = './logs/' + optModel.model_name + str(
        globalvars.globalVar)
    tensor_board = TensorBoard(log_dir=tensor_board_logfile,
                               histogram_freq=0,
                               write_graph=True)

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=80,
        epochs=3,
        validation_data=validation_generator,
        validation_steps=20,
        verbose=0,
        callbacks=[checkpoint, csv_logger, tensor_board])

    h1 = history.history
    acc_ = numpy.asarray(h1['acc'])
    loss_ = numpy.asarray(h1['loss'])
    val_loss_ = numpy.asarray(h1['val_loss'])
    val_acc_ = numpy.asarray(h1['val_acc'])
    parameters = space
    opt = numpy.asarray(parameters["choiceval"])
    if choiceval == 'adam':
        lr = numpy.asarray(parameters["lr"])
        decay = numpy.asarray(parameters["decay"])
    elif choiceval == 'rmsprop':
        lr = numpy.asarray(parameters["lr_1"])
        decay = numpy.asarray(parameters["decay_1"])
    elif choiceval == 'sgd':
        lr = numpy.asarray(parameters["lr_2"])
        decay = numpy.asarray(parameters["decay_2"])

    acc_plot = './plots/accuracy_run_' + str(globalvars.globalVar) + ".png"
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('run: ' + str(globalvars.globalVar) + " opt: " + str(opt) +
              " lr: " + str(lr) + " decay: " + str(decay))
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(acc_plot)
    plt.close()

    los_plot = './plots/losses_run_' + str(globalvars.globalVar) + ".png"
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('run: ' + str(globalvars.globalVar) + " opt: " + str(opt) +
              " lr: " + str(lr) + " decay: " + str(decay))
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(los_plot)
    plt.close()

    print("parameters for run " + str(globalvars.globalVar) + ":")
    print("-------------------------------")
    print(parameters)
    print("opt: ", opt)
    print("lr: ", lr)
    print("decay: ", decay)
    print("val_accuracy: ", val_acc_)

    acc_and_loss = numpy.column_stack((acc_, loss_, val_acc_, val_loss_))
    save_file_model = './output/' + optModel.model_name + '_run_' + '_' + str(
        globalvars.globalVar) + '.txt'
    with open(save_file_model, 'w') as f:
        numpy.savetxt(save_file_model, acc_and_loss, delimiter=",")

    score, acc = model.evaluate_generator(generator=validation_generator,
                                          steps=20,
                                          verbose=0)
    print('Test accuracy:', acc)

    save_file_params = './output/params_run_' + '_' + str(
        globalvars.globalVar) + '.txt'
    rownames = numpy.array([
        'Run', 'optimizer', 'learning_rate', 'decay', 'train_accuracy',
        'train_loss', 'val_accuracy', 'val_loss', 'test_accuracy'
    ])
    rowvals = (str(globalvars.globalVar), opt, lr, decay, acc_[-1], loss_[-1],
               val_acc_[-1], val_loss_[-1], acc)

    DAT = numpy.column_stack((rownames, rowvals))
    numpy.savetxt(save_file_params, DAT, delimiter=",", fmt="%s")

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例#51
0
def model():
    # **** RECOGER PARAMETROS Y CREAR FICHEROS ****
    modelo = "beto"
    idioma = "esp"
        
    # Creo el fichero donde se guardaran los pesos de la red entrenada
    filepath = "./hyperas/" + modelo + "_" + idioma + "_hyperas.h5"
    f = open(filepath, "w+")
    f.close()


    # ***** CARGAR EL DATASET ****

    data = pd.read_csv("/scratch/codigofsoler/baseDeDatos/csvs/5ZonascustomPADCHEST_onehot_multicolumn_SOLO_KATTY.csv")
    data = data[["Report", "Pulmon", "Calcificacion", "Cuerpos extranos", "Mediastino e hilios pulmonares", 
                "Pleura y diafragma y pared abdominal", "Patologica", "Unchanged"]]    # me quedo solo con las columnas que me interesan


    informes = list(json.load(open("./informes_padchest_esp.json")))
    data["Report"] = informes



    # Divido en train, validation y test
    new_train, new_test = train_test_split(data, test_size = 0.2, random_state = 1, shuffle = True, stratify = data[['Patologica']])    

    new_train, new_val =  train_test_split(new_train, test_size = 0.1, random_state = 1, shuffle = True, stratify = new_train[['Patologica']])

    # Guardo los informes de entrada
    train_comment = new_train["Report"].values
    test_comment  = new_test["Report"].values
    val_comment = new_val["Report"].values
            
            
    # ******************** CARGAR TOKENIZER Y FORMATEAR INFORMES DE ENTRADA ********************
    model_name = 'dccuchile/bert-base-spanish-wwm-cased'
    tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_or_path = model_name) 


    padded_ids_train = []
    mask_ids_train = []

    # 4 opciones para la maxima longitud de reporte
    max_length = {{choice([60, 118, 134, 154])}}
    # tres metodos de truncamiento
    truncation = {{choice(["longest_first", "only_first", "only_second"])}}
    for i in tqdm(range(len(train_comment))):
        encoding = tokenizer.encode_plus(train_comment[i]  , max_length = max_length , pad_to_max_length = True, truncation = truncation)
        input_ids , attention_id = encoding["input_ids"] , encoding["attention_mask"] 
        padded_ids_train.append(input_ids)
        mask_ids_train.append(attention_id)
        
    padded_ids_test = []
    mask_ids_test = []
    for i in tqdm(range(len(test_comment))):
        encoding = tokenizer.encode_plus(test_comment[i]  , max_length = max_length , pad_to_max_length = True , truncation = "longest_first" )
        input_ids , attention_id = encoding["input_ids"] , encoding["attention_mask"]
        padded_ids_test.append(input_ids)
        mask_ids_test.append(attention_id)

    padded_ids_val = []
    mask_ids_val = []
    for i in tqdm(range(len(val_comment))):
        encoding = tokenizer.encode_plus(val_comment[i]  , max_length = max_length , pad_to_max_length = True , truncation = "longest_first" )
        input_ids , attention_id = encoding["input_ids"] , encoding["attention_mask"]
        padded_ids_val.append(input_ids)
        mask_ids_val.append(attention_id)

    y_train = new_train.drop(["Report"] , axis=1)   # En y_train se guardan los 1 y 0 de cada columna para luego usarlos en evaluate
    train_id = np.array(padded_ids_train)           # train_id y train_test son los datos de entrada para entrenar (evaluar para test y val)
    train_mask = np.array(mask_ids_train)

    y_test = new_test.drop(["Report"] , axis=1)     # Analogo a y_train
    test_id = np.array(padded_ids_test)
    test_mask = np.array(mask_ids_test)

    y_val = new_val.drop(["Report"] , axis=1)       # Analogo a y_train
    val_id = np.array(padded_ids_val)
    val_mask = np.array(mask_ids_val)


    validation_data = ([val_id , val_mask], y_val)  # Usara estos datos para calcular val_auc de cada epoca de entrenamiento

    # *************** ARQUITECTURA DEL MODELO ****************

    input_1 = tf.keras.Input(shape = (max_length) , dtype=np.int32)    # Recibe train_id
    input_2 = tf.keras.Input(shape = (max_length) , dtype=np.int32)    # Recibe train_mask

    # Cargo el modelo
    model = TFBertForSequenceClassification.from_pretrained("/home/murat/datasets/pytorch", from_pt=True)

    output  = model([input_1 , input_2] , training = True)
    answer = tf.keras.layers.Dense(7 , activation = tf.nn.sigmoid )(output[0])  # Capa densa con 7 salidas (pulmon, ..., unchanged)
    model = tf.keras.Model(inputs = [input_1, input_2 ] , outputs = [answer])   # Construye la arquitectura sobre el modelo
    model.summary()

    #model.load_weights("./checkpoints_padchest/best_xlm100_en.h5")


    # ********* CALLBACKS, CHECKPOINTS, CLASSWEIGHTS *****************

    # Cargo el diccionario de frecuencias para calcular los class weights.
    #Para cada clase, su class_weight sera la inversa del numero de apariciones
    d_frecuencias = json.load(open("/scratch/codigofsoler/baseDeDatos/diccionarios/d_frecuencias_5zonas_sin_diagnosticos.json"))
    class_weights = {}
    nsamples = len(data)
    nclasses = 7
    class_weights[0] = (nsamples*1.0)/(d_frecuencias["Pulmon"]*nclasses)
    class_weights[1] = (nsamples*1.0)/(d_frecuencias["Calcificacion"]*nclasses)
    class_weights[2] = (nsamples*1.0)/(d_frecuencias["Cuerpos extranos"]*nclasses)
    class_weights[3] = (nsamples*1.0)/(d_frecuencias["Mediastino e hilios pulmonares"]*nclasses)
    class_weights[4] = (nsamples*1.0)/(d_frecuencias["Pleura y diafragma y pared abdominal"]*nclasses)
    class_weights[5] = (nsamples*1.0)/(d_frecuencias["Patologica"]*nclasses)
    class_weights[6] = (nsamples*1.0)/(d_frecuencias["Unchanged"]*nclasses)


    # Learning rate scheduler. El reduceOnPlateau reduce el lr segun factor cuando hayana pasado patience epocas sin mejora en el auc
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                patience=2, min_lr=3e-8, verbose = 1)


    # El checkpoint guarda en filepath los pesos de la red en la mejor epoca del entrenamiento (mejor monitor)
    checkpoint = ModelCheckpoint(filepath=filepath,
                                    monitor='val_auc',
                                    verbose=1,
                                    save_best_only=True , mode = "max" , save_weights_only = True)

    # Este callback es para mostrar el val_auc por clase al final de cada epoca
    nombres_clases = ['Pulmon', 'Calcificacion', 'Cuerpos extranos', 'Mediastino e hilios pulmonares', 'Pleura y diafragma y pared abdominal', 'Patologica', 'Unchanged']
    auroc = MultipleClassAUROC(class_names = nombres_clases, sequence = validation_data, weights_path = filepath)




    # ************* COMPILAR Y ENTRENAR ************************
    auc_score = AUC(multi_label=True)   # Metrica

    # 3 opciones para optimizador, cada una con 3 lr inciales
    adam    = keras.optimizers.Adam(lr={{choice([3e-4, 3e-5, 3e-6])}})
    sgd     = keras.optimizers.SGD(lr={{choice([3e-4, 3e-5, 3e-6])}})
    optim = {"adam": adam, "sgd": sgd}[{{choice(['adam', 'sgd', 'rmsprop'])}}]

    model.compile(optimizer = optim,
                    loss = tf.keras.losses.binary_crossentropy,
                    metrics = [auc_score]
    )

    # 4 opciones para batch_size
    model.fit(x = [train_id , train_mask] , y = y_train,
            validation_data = validation_data , 
            batch_size={{choice([32, 64, 128, 256])}}, 
            epochs=12, callbacks = [checkpoint, reduce_lr, auroc,], class_weight = class_weights
    )

    # ****************** MODEL PREDICTION *****************
    print("\n\n\n")

    score, loss = model.evaluate([test_id , test_mask] , y_test)  # Evalua usando los datos de test (no los habia visto nunca)
    print('Test loss:', loss)
    return {'loss': loss, 'status': STATUS_OK, 'model': model}
def model(X_train, Y_train, X_test, Y_test):
    img_rows, img_cols = 32, 32
    img_channels = 3
    nb_dim = 50
    nb_epoch=30

    #dense_layer_size = {{choice([256, 512, 1024])}}
    objective = 'mse'
    optimizer = {{choice(['rmsprop', 'adam', 'sgd'])}}
    batch_size = {{choice([32, 64, 128])}}
    #num_conv1 = int({{quniform(24, 64, 1)}})
    #num_conv2 = int({{quniform(32, 96, 1)}})
    #model_style = {{choice(['original', 'wider', 'deeper', 'wider_activation', 'nodrop_original', 'nodrop_wider'])}}
    model_style = {{choice(['original', 'wider', 'deeper', 'wider_activation'])}}
    data_augmentation = {{choice(['True','False'])}}
    params = {#'dense_layer_size':dense_layer_size,
              'optimizer':optimizer,
              'batch_size':batch_size,
              #'num_conv1':num_conv1,
              #'num_conv2':num_conv2,
              'model_style':model_style
             }
    if optimizer == 'sgd':
        learning_rate = {{loguniform(np.log(0.001),np.log(0.999))}}
        params['learning_rate'] = learning_rate

    if data_augmentation:
        more_augmentation = {{choice(['True','False'])}}
        params['more_augmentation'] = more_augmentation

    model = Sequential()

    if model_style == 'original':

        model.add(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(32, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Convolution2D(64, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(64, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_dim))
        #model.add(Activation('softmax'))
        #TODO: might want a linear activation function here
    elif model_style == 'wider':

        model.add(Convolution2D(48, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(48, 5, 5))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Convolution2D(96, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(96, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_dim))
        #model.add(Activation('softmax'))
        #TODO: might want a linear activation function here
    elif model_style == 'deeper':

        model.add(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(32, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Convolution2D(64, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(64, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Convolution2D(96, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(96, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_dim))
        #model.add(Activation('softmax'))
        #TODO: might want a linear activation function here
    elif model_style == 'wider_activation':

        model.add(Convolution2D(48, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(48, 5, 5))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Convolution2D(96, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(96, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_dim))
        model.add(Activation('linear'))
        #TODO: might want a linear activation function here
    if model_style == 'nodrop_original':

        model.add(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(32, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(64, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(64, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dense(nb_dim))
        #model.add(Activation('softmax'))
        #TODO: might want a linear activation function here
    elif model_style == 'nodrop_wider':

        model.add(Convolution2D(48, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)))
        model.add(Activation('relu'))
        model.add(Convolution2D(48, 5, 5))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(96, 3, 3, border_mode='same'))
        model.add(Activation('relu'))
        model.add(Convolution2D(96, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(1024))
        model.add(Activation('relu'))
        model.add(Dense(nb_dim))
        #model.add(Activation('softmax'))
        #TODO: might want a linear activation function here

    if optimizer == 'sgd':
        # let's train the model using SGD + momentum (how original).
        sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=objective, optimizer=sgd)
    elif optimizer == 'rmsprop':
        model.compile(loss=objective, optimizer='rmsprop')
    else:
        model.compile(loss=objective, optimizer=optimizer)

    if not data_augmentation:
        print('Not using data augmentation.')
        history = model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, show_accuracy=True,
                  validation_data=(X_test, Y_test), shuffle=True)
    else:
        print('Using real-time data augmentation.')
        if more_augmentation:
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=True,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=True,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False)  # randomly flip images
        else:
            # this will do preprocessing and realtime data augmentation
            datagen = ImageDataGenerator(
                featurewise_center=False,  # set input mean to 0 over the dataset
                samplewise_center=False,  # set each sample mean to 0
                featurewise_std_normalization=False,  # divide inputs by std of the dataset
                samplewise_std_normalization=False,  # divide each input by its std
                zca_whitening=False,  # apply ZCA whitening
                rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
                width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
                height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
                horizontal_flip=True,  # randomly flip images
                vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
                            samples_per_epoch=X_train.shape[0],
                            nb_epoch=nb_epoch, show_accuracy=True,
                            validation_data=(X_test, Y_test),
                            nb_worker=1)

    #score, acc = model.evaluate(X_test, Y_test, verbose=0)
    loss = model.evaluate(X_test, Y_test, verbose=0)
    print('Test loss:', loss)

    return {'loss': loss, 'status': STATUS_OK, 'params':params}
示例#53
0
def create_model(x_train, y_train, x_val, y_val, x_test, y_test):

    if sys.argv[1] == 'german':
        input_n = 24
    elif sys.argv[1] == 'australian':
        input_n = 15

    batch_size = 32
    epochs = 500
    inits = ['Zeros', 'Ones', 'RandomNormal', 'RandomUniform', 'TruncatedNormal', 'Orthogonal', 'lecun_uniform', 'lecun_normal', 'he_uniform', 'he_normal', 'glorot_uniform', 'glorot_normal']
    acts = ['tanh', 'softsign', 'sigmoid', 'hard_sigmoid', 'relu', 'softplus', 'LeakyReLU', 'PReLU', 'elu', 'selu']
    init = inits[11]
    act = acts[int({{quniform(0, 9, 1)}})]

    neurons = int({{quniform(9, 180, 9)}})
    layers = {{choice([1, 2, 4, 8])}}
    norm = {{choice(['no', 'l1', 'l2'])}}
    dropout = {{choice([0, 1])}}
    earlystop = {{choice([0, 1])}}
    k1 = None
    k2 = None
    p = None

    if norm == 'no':
        reg = None
    elif norm == 'l1':
        k1 = {{loguniform(-9.2, -2.3)}}
        reg = regularizers.l1(k1)
    elif norm == 'l2':
        k2 = {{loguniform(-9.2, -2.3)}}
        reg = regularizers.l2(k2)

    X_input = Input((input_n, ))
    X = X_input

    for _ in range(layers):
        X = Dense(
            neurons,
            kernel_initializer=init,
            kernel_regularizer=reg,
        )(X)

        if act == 'LeakyReLU':
            X = LeakyReLU()(X)
        elif act == 'PReLU':
            X = PReLU()(X)
        else:
            X = Activation(act)(X)

        if dropout == 1:
            p = {{uniform(0, 1)}}
            X = Dropout(p)(X)

    X = Dense(1, kernel_initializer=init, kernel_regularizer=reg)(X)
    X_outputs = Activation('sigmoid')(X)

    model = Model(inputs = X_input, outputs = X_outputs)
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        metrics=['accuracy'],
    )

    patience = int({{quniform(1, 500, 1)}})
    es = EarlyStopping(
        monitor='val_loss',
        patience=patience,
        verbose=0,
        mode='auto',
    )
    if earlystop == 1:
        model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            verbose=0,
            epochs=epochs,
            validation_data=(x_val, y_val),
            callbacks=[es],
        )
    else:
        model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            verbose=0,
            epochs=epochs,
            validation_data=(x_val, y_val),
        )

    loss_t, score_t = model.evaluate(x_train, y_train, verbose=0)
    loss_v, score_v = model.evaluate(x_val, y_val, verbose=0)
    loss_te, score_te = model.evaluate(x_test, y_test, verbose=0)

    print(init + '\t' + act + '\t' + str(neurons) + '\t' + str(layers) + '\t' + str(norm) + '\t' + str(dropout) + '\t' + str(earlystop) + '%-24s%-24s%-24s%s'%(str(k1), str(k2), str(p), str(patience)) + '  ' + str(score_v) + '  ' + str(loss_v) + '  ' + str(score_te) + '  ' + str(loss_te))
    return {'loss': loss_v, 'status': STATUS_OK, 'model': model}
示例#54
0
def Conv2DClassifierIn1(x_train, y_train, x_test, y_test, class_weights_dict,
                        obj):
    K.clear_session()
    summary = True
    verbose = 0
    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = 64
    epochs = {{choice([50, 100, 150, 200, 250])}}

    lr = {{loguniform(np.log(1e-4), np.log(1e-2))}}

    optimizer = {{choice(['adam', 'sgd', 'rmsprop'])}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}

    basic_conv2D_layers = {{choice([1, 2])}}
    basic_conv2D_filter_num = {{choice([16, 32])}}

    loop_dilation2D_layers = {{choice([2, 4, 6])}}
    loop_dilation2D_filter_num = {{choice([16, 32, 64])}}  #used in the loop
    loop_dilation2D_dropout_rate = {{uniform(0.001, 0.35)}}
    dilation_lower = 2
    dilation_upper = 16

    reduce_layers = 5  # conv 3 times: 120 => 60 => 30 => 15
    reduce_conv2D_filter_num = {{choice([8, 16,
                                         32])}}  #used for reduce dimention
    reduce_conv2D_dropout_rate = {{uniform(0.001, 0.25)}}
    residual_stride = 2

    dense1_num = {{choice([64, 128, 256])}}
    dense2_num = {{choice([32, 64])}}

    drop_num = {{uniform(0.0001, 0.3)}}

    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = 'binary_crossentropy'
    metrics = ('accuracy', )

    my_callbacks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.8,
            patience=10,
        )
    ]

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    # build --------------------------------------------------------------------------------------------------------
    ## basic Conv2D
    input_layer = Input(shape=x_train.shape[1:])
    y = layers.Conv2D(basic_conv2D_filter_num,
                      kernel_size,
                      padding=padding_style,
                      kernel_initializer=initializer,
                      activation=activator)(input_layer)
    y = layers.BatchNormalization(axis=-1)(y)
    if basic_conv2D_layers == 2:
        y = layers.Conv2D(basic_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)

    ## loop with Conv2D with dilation (padding='same')
    for _ in range(loop_dilation2D_layers):
        y = layers.Conv2D(loop_dilation2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          dilation_rate=dilation_lower,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(loop_dilation2D_dropout_rate)(y)
        dilation_lower *= 2
        if dilation_lower > dilation_upper:
            dilation_lower = 2

    ## Conv2D with dilation (padding='valaid') and residual block to reduce dimention.
    for _ in range(reduce_layers):
        y = layers.Conv2D(reduce_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(reduce_conv2D_dropout_rate)(y)
        y = layers.MaxPooling2D(pool_size, padding=padding_style)(y)
        residual = layers.Conv2D(reduce_conv2D_filter_num,
                                 1,
                                 strides=residual_stride,
                                 padding='same')(input_layer)
        y = layers.add([y, residual])
        residual_stride *= 2

    ## flat & dense
    y = layers.Flatten()(y)
    y = layers.Dense(dense1_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(drop_num)(y)
    y = layers.Dense(dense2_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(drop_num)(y)

    output_layer = layers.Dense(len(np.unique(y_train)),
                                activation='softmax')(y)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

    model.compile(
        optimizer=chosed_optimizer,
        loss=loss_type,
        metrics=list(metrics)  # accuracy
    )

    K.set_session(tf.Session(graph=model.output.graph))
    init = K.tf.global_variables_initializer()
    K.get_session().run(init)

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=verbose,
                       callbacks=my_callbacks,
                       validation_data=(x_test, y_test),
                       shuffle=True,
                       class_weight=class_weights_dict)
    # print('\n----------History:\n%s'%result.history)

    if obj == 'test_report_cla':
        acc_test, mcc_test, recall_p_test, recall_n_test, precision_p_test, precision_n_test = test_report_cla(
            model, x_test, y_test)
        print(
            '\n----------Predict:\nacc_test: %s, mcc_test: %s, recall_p_test: %s, recall_n_test: %s, precision_p_test: %s, precision_n_test: %s'
            % (acc_test, mcc_test, recall_p_test, recall_n_test,
               precision_p_test, precision_n_test))
        objective = acc_test + 5 * mcc_test + recall_p_test + recall_n_test + precision_p_test + precision_n_test
        return {'loss': -objective, 'status': STATUS_OK}

    elif obj == 'val_acc':
        validation_acc = np.amax(result.history['val_acc'])
        print('Best validation acc of epoch:', validation_acc)
        return {'loss': -validation_acc, 'status': STATUS_OK}
示例#55
0
def fluoro_model(image_train_cum, cali_train_cum, label_train_cum):
    def root_mean_squared_error(y_true, y_pred):
        return keras.backend.sqrt(
            keras.backend.mean(keras.backend.square(y_pred - y_true)))

    channel_order = 'channels_last'
    img_input_shape = (128, 128, 1)

    regularizer = keras.regularizers.l1_l2(l1={{uniform(0, 1)}},
                                           l2={{uniform(0, 1)}})
    activation_fn = {{choice(['elu', 'relu'])}}

    kern_init = {{choice(['glorot_uniform', 'glorot_normal'])}}

    conv_1_filters = {{choice([10, 20, 40, 50])}}
    conv_1_kernel = {{choice([(10, 10), (5, 5), (3, 3)])}}
    conv_1_strides = {{choice([(2, 2), (1, 1)])}}
    conv_1_padding = 'valid'

    spatial_drop_rate_1 = {{uniform(0, 1)}}

    pool_1_size = {{choice([(2, 2), (3, 3)])}}
    pool_1_padding = 'same'

    conv_2_filters = {{choice([20, 40, 80])}}
    conv_2_kernel = {{choice([(3, 3), (5, 5)])}}
    conv_2_strides = {{choice([(2, 2), (1, 1)])}}
    conv_2_padding = 'same'

    pool_2_size = {{choice([(2, 2), (3, 3)])}}
    pool_2_padding = 'same'

    conv_3_filters = {{choice([20, 80, 100])}}
    conv_3_kernel = {{choice([(2, 2), (3, 3)])}}
    conv_3_strides = {{choice([(2, 2), (1, 1)])}}
    conv_3_padding = 'valid'

    pool_3_size = (2, 2)
    pool_3_padding = 'valid'

    dense_1_f_units = {{choice([40, 80, 120])}}
    dense_1_f_bias = True

    dense_2_f_units = {{choice([40, 80, 120])}}
    dense_2_f_bias = True

    dense_3_f_units = {{choice([40, 80, 120])}}
    dense_3_f_bias = True

    dense_1_ca_units = {{choice([6, 20, 60])}}
    dense_1_ca_bias = True

    dense_2_co_units = {{choice([20, 40, 80])}}
    dense_2_co_bias = True

    drop_1_comb_rate = {{uniform(0, 1)}}

    dense_3_co_units = {{choice([20, 40, 80])}}
    dense_3_co_bias = True

    main_output_units = 6
    main_output_act = 'linear'
    main_output_bias = True

    model_opt = {{choice(['adam', 'nadam', 'adagrad', 'rmsprop'])}}
    model_loss = 'mse'
    model_metric = root_mean_squared_error

    model_epochs = {{choice([30, 40, 50])}}
    model_batchsize = {{choice([5, 10, 30])}}

    input_fluoro_1 = keras.Input(shape=img_input_shape,
                                 dtype='float32',
                                 name='fluoro1_inpt')
    input_fluoro_2 = keras.Input(shape=img_input_shape,
                                 dtype='float32',
                                 name='fluoro2_inpt')
    input_cali = keras.Input(shape=(6, ), dtype='float32', name='cali_inpt')

    bn_1_1 = keras.layers.BatchNormalization()(input_fluoro_1)
    conv_1_1 = keras.layers.Conv2D(filters=conv_1_filters,
                                   kernel_size=conv_1_kernel,
                                   strides=conv_1_strides,
                                   padding=conv_1_padding,
                                   activation=activation_fn,
                                   input_shape=img_input_shape,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(bn_1_1)
    spat_1_1 = keras.layers.SpatialDropout2D(
        rate=spatial_drop_rate_1)(conv_1_1)
    pool_1_1 = keras.layers.MaxPooling2D(pool_size=pool_1_size,
                                         padding=pool_1_padding,
                                         data_format=channel_order)(spat_1_1)
    conv_2_1 = keras.layers.Conv2D(filters=conv_2_filters,
                                   kernel_size=conv_2_kernel,
                                   strides=conv_2_strides,
                                   padding=conv_2_padding,
                                   activation=activation_fn,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_1_1)
    pool_2_1 = keras.layers.MaxPooling2D(pool_size=pool_2_size,
                                         padding=pool_2_padding,
                                         data_format=channel_order)(conv_2_1)
    conv_3_1 = keras.layers.Conv2D(filters=conv_3_filters,
                                   kernel_size=conv_3_kernel,
                                   strides=conv_3_strides,
                                   padding=conv_3_padding,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_2_1)
    pool_3_1 = keras.layers.MaxPooling2D(pool_size=pool_3_size,
                                         padding=pool_3_padding,
                                         data_format=channel_order)(conv_3_1)
    flatten_1_1 = keras.layers.Flatten()(pool_3_1)
    dense_1_f_1 = keras.layers.Dense(units=dense_1_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_1_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_1_f_1')(flatten_1_1)
    dense_2_f_1 = keras.layers.Dense(units=dense_2_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_2_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_2_f_1')(dense_1_f_1)
    dense_3_f_1 = keras.layers.Dense(units=dense_3_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_3_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_3_f_1')(dense_2_f_1)

    bn_1_2 = keras.layers.BatchNormalization()(input_fluoro_2)
    conv_1_2 = keras.layers.Conv2D(filters=conv_1_filters,
                                   kernel_size=conv_1_kernel,
                                   strides=conv_1_strides,
                                   padding=conv_1_padding,
                                   activation=activation_fn,
                                   input_shape=img_input_shape,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(bn_1_2)
    spat_1_2 = keras.layers.SpatialDropout2D(
        rate=spatial_drop_rate_1)(conv_1_2)
    pool_1_2 = keras.layers.MaxPooling2D(pool_size=pool_1_size,
                                         padding=pool_1_padding,
                                         data_format=channel_order)(spat_1_2)
    conv_2_2 = keras.layers.Conv2D(filters=conv_2_filters,
                                   kernel_size=conv_2_kernel,
                                   strides=conv_2_strides,
                                   padding=conv_2_padding,
                                   activation=activation_fn,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_1_2)
    pool_2_2 = keras.layers.MaxPooling2D(pool_size=pool_2_size,
                                         padding=pool_2_padding,
                                         data_format=channel_order)(conv_2_2)
    conv_3_2 = keras.layers.Conv2D(filters=conv_3_filters,
                                   kernel_size=conv_3_kernel,
                                   strides=conv_3_strides,
                                   padding=conv_3_padding,
                                   data_format=channel_order,
                                   activity_regularizer=regularizer,
                                   kernel_initializer=kern_init)(pool_2_2)
    pool_3_2 = keras.layers.MaxPooling2D(pool_size=pool_3_size,
                                         padding=pool_3_padding,
                                         data_format=channel_order)(conv_3_2)
    flatten_1_2 = keras.layers.Flatten()(pool_3_2)
    dense_1_f_2 = keras.layers.Dense(units=dense_1_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_1_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_1_f_2')(flatten_1_2)
    dense_2_f_2 = keras.layers.Dense(units=dense_2_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_2_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_2_f_2')(dense_1_f_2)
    dense_3_f_2 = keras.layers.Dense(units=dense_3_f_units,
                                     activation=activation_fn,
                                     use_bias=dense_3_f_bias,
                                     kernel_initializer=kern_init,
                                     activity_regularizer=regularizer,
                                     name='dense_3_f_2')(dense_2_f_2)

    dense_1_cali = keras.layers.Dense(units=dense_1_ca_units,
                                      activation=activation_fn,
                                      use_bias=dense_1_ca_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_1_cali')(input_cali)

    dense_1_comb = keras.layers.concatenate(
        [dense_3_f_1, dense_3_f_2, dense_1_cali], name='dense_1_comb')

    dense_2_comb = keras.layers.Dense(units=dense_2_co_units,
                                      activation=activation_fn,
                                      use_bias=dense_2_co_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_2_comb')(dense_1_comb)
    drop_1_comb = keras.layers.Dropout(rate=drop_1_comb_rate)(dense_2_comb)
    dense_3_comb = keras.layers.Dense(units=dense_3_co_units,
                                      activation=activation_fn,
                                      use_bias=dense_3_co_bias,
                                      kernel_initializer=kern_init,
                                      name='dense_3_comb')(drop_1_comb)
    main_output = keras.layers.Dense(units=main_output_units,
                                     activation=main_output_act,
                                     name='main_output')(dense_3_comb)

    model = keras.Model(inputs=[input_fluoro_1, input_fluoro_2, input_cali],
                        outputs=main_output)

    keras.utils.plot_model(model, 'show.png', show_shapes=True)

    model.compile(optimizer=model_opt, loss=model_loss, metrics=[model_metric])

    result = model.fit(x=[
        np.expand_dims(image_train_cum[:, 0, :, :], axis=3),
        np.expand_dims(image_train_cum[:, 1, :, :], axis=3), cali_train_cum
    ],
                       y=label_train_cum,
                       epochs=model_epochs,
                       batch_size=model_batchsize,
                       validation_split=0.2,
                       shuffle=True,
                       verbose=True)
    return {
        'loss': np.amin(result.history['loss']),
        'status': STATUS_OK,
        'model': model
    }
示例#56
0
def model(X_train, Y_train, X_test, Y_test):
    # initialising the CNN
    classifier = Sequential()

    # convolution
    # 32 feature detectors of 3x3 feature maps
    classifier.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      input_shape=(68, 68, 3),
                      activation={{choice(["relu", "elu"])}}))

    #max pooling
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    #second copnvolutional layer and max pooling
    classifier.add(
        Convolution2D(filters=32,
                      kernel_size=(3, 3),
                      activation={{choice(["relu", "elu"])}}))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))

    #flattening the feature maps
    classifier.add(Flatten())

    #ann layers
    classifier.add(
        Dense(units={{choice([96, 112, 128])}},
              activation={{choice(["relu", "elu"])}}))

    classifier.add(Dropout({{choice([0.0, 0.1, 0.21, 0.3])}}))

    classifier.add(
        Dense(units={{choice([130, 164, 212])}},
              activation={{choice(["relu", "elu"])}}))

    classifier.add(Dropout({{choice([0.0, 0.1, 0.21, 0.3])}}))

    #none binary outcome uses softmax activation function instead of sigmoid
    classifier.add(Dense(units=30, activation="softmax"))

    #compiling the cnn
    #stocastic gradient descent used for backpropagation, can use rmsprop instead

    classifier.compile(optimizer={{choice(["rmsprop", "adam", "sgd"])}},
                       loss="categorical_crossentropy",
                       metrics=["accuracy"])

    classifier.fit(X_train,
                   Y_train,
                   batch_size=16,
                   epochs={{choice([15, 21, 24, 31, 35])}},
                   validation_data=(X_test, Y_test))

    score, acc = classifier.evaluate(X_test, Y_test, verbose=0)

    print("Test Accuracy:", acc)

    return {
        "accuracy": acc,
        "status": STATUS_OK,
        "model": classifier,
        "loss": score
    }
示例#57
0
def create_model(train_data, train_ws, test_data, test_ws):
   
    model = Sequential()
    model.add(Dense({{choice([16,32, 64])}}, activation='relu', input_shape=(train_data.shape[1],)))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([16,32,64,256])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    
    if {{choice(['two', 'three'])}} == 'three':
        model.add(Dense({{choice([16,32,64,256, 512])}}))
    
    
    if {{choice(['two', 'three', 'four'])}} == 'four':
        model.add(Dense({{choice([16,32,64,256])}}))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Dense({{choice([16,32,64,256])}}))
    
    if {{choice(['two', 'three', 'four', 'five'])}} == 'five':
        model.add(Dense({{choice([16,32,64,256])}}))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Dense({{choice([16,32,64,256])}}))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Dense({{choice([16,32,64,256])}}))
    
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    

    model.compile(loss='mae',optimizer=optimizers.SGD(lr={{choice([0.001,0.0001])}}, momentum={{choice([0.6,0.7])}}))
    
    result = model.fit(train_data, train_ws, 
                       validation_data=(test_data,test_ws),
                      batch_size=1,
                      epochs=15,
                      verbose=2)
                  
    validation_loss = np.amin(result.history['val_loss']) 
                  
    print('Best validation acc of epoch:', validation_loss)
                  
    return {'loss': validation_loss, 'status': STATUS_OK, 'model': model}
示例#58
0
def model(data, data_label):
    """
    Defines the comparisons model, all hyperparameters in double brackets will be optimize by Hyperas.
    :return: a dictionary with following keys :
                - loss : the metrics function to be minimized by Hyperopt.
                - status : a boolean that tells if everything went fine.
                - model : the model on which hyperparameters optimization occurs.
    """
    img_size = 224
    vgg_feature_extractor = VGG19(weights='imagenet',
                                  include_top=False,
                                  input_shape=(img_size, img_size, 3))
    for layer in vgg_feature_extractor.layers[:-4]:
        layer.trainable = False

    img_a = Input(shape=(img_size, img_size, 3), name="left_image")
    img_b = Input(shape=(img_size, img_size, 3), name="right_image")

    out_a = vgg_feature_extractor(img_a)
    out_b = vgg_feature_extractor(img_b)

    concat = concatenate([out_a, out_b])

    x = Conv2D({{choice([64, 128, 256, 512])}}, (3, 3),
               activation='relu',
               padding='same',
               name="Conv_1")(concat)
    x = Dropout({{uniform(0, 0.5)}}, name="Drop_1")(x)
    x = Conv2D({{choice([64, 128, 256, 512])}}, (3, 3),
               activation='relu',
               padding='same',
               name="Conv_2")(x)
    x = Dropout({{uniform(0, 0.5)}}, name="Drop_2")(x)
    x = Conv2D({{choice([64, 128, 256, 512])}}, (3, 3),
               activation='relu',
               padding='same',
               name="Conv_3")(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dense(2, activation='softmax', name="Dense_Final")(x)

    comparisons_model = Model([img_a, img_b], x)

    sgd = SGD(lr={{choice([1e-4, 1e-5, 1e-6])}},
              decay={{choice([1e-4, 1e-5, 1e-6])}},
              momentum={{uniform(0, 0.9)}},
              nesterov=True)
    comparisons_model.compile(loss='categorical_crossentropy',
                              optimizer=sgd,
                              metrics=['accuracy'])

    result = comparisons_model.fit([data[0], data[1]],
                                   data_label,
                                   batch_size=16,
                                   epochs=30,
                                   validation_split=0.2)

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)

    return {
        'loss': -validation_acc,
        'status': STATUS_OK,
        'model': comparisons_model
    }
def model(X_train, Y_train_fine, Y_train_coarse, X_test, Y_test_fine, Y_test_coarse):

    nb_dim = 20
    img_rows, img_cols = 32, 32
    img_channels = 3

    #dense_layer_size = {{choice([256, 512, 1024])}}
    objective = 'mse'
    optimizer = {{choice(['rmsprop', 'adam', 'sgd'])}}
    batch_size = {{choice([32, 64, 128])}}
    #num_conv1 = int({{quniform(24, 64, 1)}})
    #num_conv2 = int({{quniform(32, 96, 1)}})
    model_style = {{choice(['original', 'wider', 'nodroporiginal', 'moredense', 'custom1', 'split', 'nodrop_split'])}}
    params = {#'dense_layer_size':dense_layer_size,
              'optimizer':optimizer,
              'batch_size':batch_size,
              #'num_conv1':num_conv1,
              #'num_conv2':num_conv2,
              'model_style':model_style
             }

    model = Graph()

    model.add_input(name='input', input_shape=(img_channels, img_rows, img_cols))
    if model_style == 'original':

        model.add_node(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(32, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')
        model.add_node(Dropout(0.25),
                       name='drop1', input='pool1')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3', input='conv3')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4', input='relu3')
        model.add_node(Activation('relu'),
                       name='relu4', input='conv4')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2', input='relu4')
        model.add_node(Dropout(0.25),
                       name='drop2', input='pool2')

        model.add_node(Flatten(),
                       name='flat1', input='drop2')
        model.add_node(Dense(512),
                       name='dense1', input='flat1')
        model.add_node(Activation('relu'),
                       name='relu5', input='dense1')
        model.add_node(Dropout(0.5),
                       name='drop3', input='relu5')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    if model_style == 'nodroporiginal':

        model.add_node(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(32, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3', input='pool1')
        model.add_node(Activation('relu'),
                       name='relu3', input='conv3')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4', input='relu3')
        model.add_node(Activation('relu'),
                       name='relu4', input='conv4')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2', input='relu4')

        model.add_node(Flatten(),
                       name='flat1', input='pool2')
        model.add_node(Dense(512),
                       name='dense1', input='flat1')
        model.add_node(Activation('relu'),
                       name='relu5', input='dense1')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='relu5')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='relu5')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    elif model_style == 'moredense':

        model.add_node(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(32, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')
        model.add_node(Dropout(0.25),
                       name='drop1', input='pool1')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3', input='conv3')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4', input='relu3')
        model.add_node(Activation('relu'),
                       name='relu4', input='conv4')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2', input='relu4')
        model.add_node(Dropout(0.25),
                       name='drop2', input='pool2')

        model.add_node(Flatten(),
                       name='flat1', input='drop2')
        model.add_node(Dense(512),
                       name='dense1', input='flat1')
        model.add_node(Activation('relu'),
                       name='relu5', input='dense1')
        model.add_node(Dropout(0.25),
                       name='drop3', input='relu5')
        model.add_node(Dense(512),
                       name='dense2', input='drop3')
        model.add_node(Activation('relu'),
                       name='relu6', input='dense2')
        model.add_node(Dropout(0.25),
                       name='drop4', input='relu6')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='drop4')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='drop4')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    elif model_style == 'wider':

        model.add_node(Convolution2D(48, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(48, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')
        model.add_node(Dropout(0.25),
                       name='drop1', input='pool1')

        model.add_node(Convolution2D(96, 3, 3, border_mode='same'),
                       name='conv3', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3', input='conv3')
        model.add_node(Convolution2D(96, 3, 3),
                       name='conv4', input='relu3')
        model.add_node(Activation('relu'),
                       name='relu4', input='conv4')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2', input='relu4')
        model.add_node(Dropout(0.25),
                       name='drop2', input='pool2')

        model.add_node(Flatten(),
                       name='flat1', input='drop2')
        model.add_node(Dense(1024),
                       name='dense1', input='flat1')
        model.add_node(Activation('relu'),
                       name='relu5', input='dense1')
        model.add_node(Dropout(0.5),
                       name='drop3', input='relu5')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    elif model_style == 'custom1':

        model.add_node(Convolution2D(48, 5, 5, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(48, 5, 5),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')
        model.add_node(Dropout(0.10),
                       name='drop1', input='pool1')

        model.add_node(Convolution2D(96, 3, 3, border_mode='same'),
                       name='conv3', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3', input='conv3')
        model.add_node(Convolution2D(96, 3, 3),
                       name='conv4', input='relu3')
        model.add_node(Activation('relu'),
                       name='relu4', input='conv4')
        model.add_node(MaxPooling2D(pool_size=(3, 3)),
                       name='pool2', input='relu4')
        model.add_node(Dropout(0.10),
                       name='drop2', input='pool2')

        model.add_node(Flatten(),
                       name='flat1', input='drop2')
        model.add_node(Dense(1024),
                       name='dense1', input='flat1')
        model.add_node(Activation('relu'),
                       name='relu5', input='dense1')
        model.add_node(Dropout(0.10),
                       name='drop3', input='relu5')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='drop3')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    elif model_style == 'split': # have some convolutions for each of coarse and fine only

        model.add_node(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(32, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')
        model.add_node(Dropout(0.25),
                       name='drop1', input='pool1')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3_c', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3_c', input='conv3_c')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4_c', input='relu3_c')
        model.add_node(Activation('relu'),
                       name='relu4_c', input='conv4_c')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2_c', input='relu4_c')
        model.add_node(Dropout(0.25),
                       name='drop2_c', input='pool2_c')

        model.add_node(Flatten(),
                       name='flat1_c', input='drop2_c')
        model.add_node(Dense(512),
                       name='dense1_c', input='flat1_c')
        model.add_node(Activation('relu'),
                       name='relu5_c', input='dense1_c')
        model.add_node(Dropout(0.5),
                       name='drop3_c', input='relu5_c')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='drop3_c')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3_f', input='drop1')
        model.add_node(Activation('relu'),
                       name='relu3_f', input='conv3_f')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4_f', input='relu3_f')
        model.add_node(Activation('relu'),
                       name='relu4_f', input='conv4_f')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2_f', input='relu4_f')
        model.add_node(Dropout(0.25),
                       name='drop2_f', input='pool2_f')

        model.add_node(Flatten(),
                       name='flat1_f', input='drop2_f')
        model.add_node(Dense(512),
                       name='dense1_f', input='flat1_f')
        model.add_node(Activation('relu'),
                       name='relu5_f', input='dense1_f')
        model.add_node(Dropout(0.5),
                       name='drop3_f', input='relu5_f')
        
        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='drop3_f')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')
    elif model_style == 'nodrop_split': # have some convolutions for each of coarse and fine only

        model.add_node(Convolution2D(32, 3, 3, border_mode='same',
                                input_shape=(img_channels, img_rows, img_cols)),
                       name='conv1', input='input')
        model.add_node(Activation('relu'),
                       name='relu1', input='conv1')
        model.add_node(Convolution2D(32, 3, 3),
                       name='conv2', input='relu1')
        model.add_node(Activation('relu'),
                       name='relu2', input='conv2')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool1', input='relu2')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3_c', input='pool1')
        model.add_node(Activation('relu'),
                       name='relu3_c', input='conv3_c')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4_c', input='relu3_c')
        model.add_node(Activation('relu'),
                       name='relu4_c', input='conv4_c')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2_c', input='relu4_c')

        model.add_node(Flatten(),
                       name='flat1_c', input='pool2_c')
        model.add_node(Dense(512),
                       name='dense1_c', input='flat1_c')
        model.add_node(Activation('relu'),
                       name='relu5_c', input='dense1_c')

        #model.add_node(Dense(nb_classes_coarse + nb_classes_fine),
        #               name='dense2', input='drop3')
        model.add_node(Dense(nb_classes_coarse),
                       name='dense_c', input='relu5_c')
        model.add_node(Activation('softmax'),
                       name='soft_c', input='dense_c')

        model.add_node(Convolution2D(64, 3, 3, border_mode='same'),
                       name='conv3_f', input='pool1')
        model.add_node(Activation('relu'),
                       name='relu3_f', input='conv3_f')
        model.add_node(Convolution2D(64, 3, 3),
                       name='conv4_f', input='relu3_f')
        model.add_node(Activation('relu'),
                       name='relu4_f', input='conv4_f')
        model.add_node(MaxPooling2D(pool_size=(2, 2)),
                       name='pool2_f', input='relu4_f')

        model.add_node(Flatten(),
                       name='flat1_f', input='pool2_f')
        model.add_node(Dense(512),
                       name='dense1_f', input='flat1_f')
        model.add_node(Activation('relu'),
                       name='relu5_f', input='dense1_f')
        
        model.add_node(Dense(nb_classes_fine),
                       name='dense_f', input='relu5_f')
        model.add_node(Activation('softmax'),
                       name='soft_f', input='dense_f')

    model.add_output(name='output_fine', input='soft_f')
    model.add_output(name='output_coarse', input='soft_c')


    
    model.compile(loss={'output_fine':objective,'output_coarse':objective}, optimizer=optimizer, metrics=['accuracy'])

    history = model.fit({'input':X_train, 'output_fine':Y_train_fine,'output_coarse':Y_train_coarse}, batch_size=batch_size,
              nb_epoch=30, verbose=2,#show_accuracy=True,
              validation_data={'input':X_test, 'output_fine':Y_test_fine,'output_coarse':Y_test_coarse}, shuffle=True)

    #score, acc = model.evaluate({'input':X_train, 'output_fine':Y_train_fine,'output_coarse':Y_train_coarse}, verbose=0)
    loss, fine_loss, coarse_loss, fine_acc, coarse_acc = model.evaluate({'input':X_train, 'output_fine':Y_train_fine,'output_coarse':Y_train_coarse}, verbose=0)
    print('Test fine accuracy:', fine_acc)
    print('Test coarse accuracy:', coarse_acc)
    print('Combined loss', fine_loss + coarse_loss)
    #return {'loss': -acc, 'status': STATUS_OK, 'model':model}
    return {'loss': fine_loss + coarse_loss, 'status': STATUS_OK, 'params':params, 'fine_acc':fine_acc, 'coarse_acc':coarse_acc}