Пример #1
0
def base(x_train, y_train, x_test, y_test):
    n_out = {n_out}
    input_shape = {input_shape}
    batch_size = {batch_size}
    epochs = {epochs}
    steps_per_epoch = len(x_train) // batch_size
    lossfun = '{lossfun}'
    optimizer = '{optimizer}'
    metrics = ['accuracy']

    p = Augmentor.Pipeline()

    p.flip_left_right(probability=0.5)
    if conditional({{choice([True, False])}}):
        p.crop_random(probability=1, percentage_area=0.8)
        p.resize(probability=1, width=96, height=96)
    if conditional({{choice([True, False])}}):
        p.random_erasing(probability=0.5, rectangle_area=0.2)
    if conditional({{choice([True, False])}}):
        p.shear(probability=0.3, max_shear_left=2, max_shear_right=2)
    print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
    p.status()
    g = p.keras_generator_from_array(x_train, y_train, batch_size=batch_size)
    g = ((x / 255., y) for (x, y) in g)

    inputs = Input(shape=input_shape)
    x = inputs
    x = Conv2D(32, (3, 3))(x)
    x = Conv2D(32, (3, 3))(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(64, (3, 3))(x)
    x = Conv2D(64, (3, 3))(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.25)(x)
    x = Flatten()(x)
    x = Dense(512)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(n_out)(x)
    x = Activation('softmax')(x)
    model = Model(inputs=inputs, outputs=x)

    model.compile(loss=lossfun,
                  optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
                  metrics=['accuracy'])

    model.fit_generator(
        g,
        steps_per_epoch=steps_per_epoch,
        validation_data=(x_test, y_test),
        epochs=epochs,
        verbose=2,
    )
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)

    return dict(zip(['loss', 'status', 'model'], [-acc, STATUS_OK, model]))
Пример #2
0
def model(x_train, y_train, x_test, y_test):

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([128, 256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'fomv ur', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))

        # We can also choose between complete sets of layers

        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train, y_train,
              batch_size={{choice([128, 256])}},
              epochs=5,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #3
0
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(
        LSTM(units={{choice([16, 32, 64, 128, 256])}},
             input_shape=(x_train.shape[1], x_train.shape[2])))

    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(units={{choice([2, 4, 8, 16, 32, 64, 128, 256])}}))
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(1))

    model.compile(
        loss='mae',  # because mse gives me NaN's some times
        optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train,
              y_train,
              batch_size={{choice([8, 16, 32, 64, 128])}},
              epochs=50,
              verbose=0,
              shuffle=False,
              validation_data=(x_test, y_test))
    mae = model.evaluate(x_test, y_test, verbose=0)
    print('MAE:', mae)
    return {'loss': mae, 'status': STATUS_OK, 'model': model}
Пример #4
0
def model(x_train, y_train, x_test, y_test):
    # 25 > 10 (relu) dropout > 10 (relu) dropout > 1 (relu)
    model = Sequential()
    model.add(
        Dense(output_dim={{
            choice([5, 10, 15, 20, 25, 30, 50, 75, 100, 200, 500])
        }},
              input_dim=x_train.shape[1]))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh', 'linear'])}}))
    model.add(Dropout({{uniform(0, 1)}}))
    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['extra-layer', 'no'])}}) == 'extra-layer':
        model.add(
            Dense(output_dim={{
                choice([5, 10, 15, 20, 25, 30, 50, 75, 100, 200, 500])
            }}))
        model.add(Activation({{choice(['relu', 'sigmoid', 'tanh',
                                       'linear'])}}))
        model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(output_dim=1))
    model.add(Activation({{choice(['relu', 'sigmoid', 'tanh', 'linear'])}}))

    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=10,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #5
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    model = Sequential()
    model.add(Dense({{choice([2, 5, 8, 16, 32, 64, 96, 128, 256])}}, input_dim=x_train.shape[1], activation="relu", kernel_initializer="uniform"))
        # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['one', 'two'])}}) == 'two':
        model.add(Dense({{choice([2, 5, 8, 16, 32, 64, 96, 128, 256])}}, activation="relu", kernel_initializer="uniform"))
        
    model.add(Dense(1, activation="linear", kernel_initializer="uniform"))
    model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])

    model.fit(x_train, y_train,
              batch_size=8,
              epochs=15,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': acc, 'status': STATUS_OK, 'model': model}
Пример #6
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:
 
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add(Dropout(0.5))
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))
    opt = Adam(lr={{choice([0.01, 0.001, 0.0001, 0.00001])}},
               decay={{choice([1e-2, 1e-3, 1e-4])}})
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    globalvars.globalVar += 1

    filepath = "../output/weights_fcn_hyperas" + str(
        globalvars.globalVar) + ".hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    csv_logger = CSVLogger('../output/hyperas_test_log.csv',
                           append=True,
                           separator=';')

    model.fit(X_train,
              Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(X_test, Y_test),
              callbacks=[checkpoint, csv_logger])
    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def create_lstm_model(x_train, y_train, x_test, y_test):
    num_layer = conditional({{choice(['one', 'two'])}})
    lstm_units = {{choice([16, 32, 64, 128, 256])}}
    print('lstm', lstm_units)
    if num_layer == 'two':
        lstm2_units = {{choice([16, 32, 64, 128])}}
        if lstm2_units > lstm_units:
            lstm2_units = lstm_units
        print('lstm2', lstm2_units)

    dropout_rate = {{uniform(0, 1)}}
    recurrent_dropout_rate = {{uniform(0, 1)}}
    print('dropout', dropout_rate)
    print('recurrent_dropout', recurrent_dropout_rate)
    epochs = int({{uniform(1, 25)}})
    batch_size = {{choice([256, 512, 1024])}}
    optimizer = {{choice(['sgd', 'adam', 'rmsprop'])}}
    print('batch size', batch_size, optimizer)

    model = Sequential()
    model.add(
        LSTM(units=lstm_units,
             input_shape=(50, 2),
             dropout=dropout_rate,
             recurrent_dropout=recurrent_dropout_rate,
             return_sequences=(num_layer == 'two')))

    if num_layer == 'two':
        model.add(
            LSTM(units=lstm2_units,
                 input_shape=(50, lstm_units),
                 dropout=dropout_rate,
                 recurrent_dropout=recurrent_dropout_rate,
                 return_sequences=False))

    model.add(Dense(units=2))

    model.compile(loss='mse', optimizer=optimizer, metrics=['mse'])
    early_stopping_monitor = EarlyStopping(patience=5, verbose=0)
    model.fit(x_train,
              y_train,
              epochs=epochs,
              batch_size=batch_size,
              validation_data=(x_test, y_test),
              callbacks=[early_stopping_monitor],
              verbose=2)

    score, mse = model.evaluate(x_test, y_test, verbose=2)
    print(score)
    if np.isnan(score):
        print('loss is nan')
        score = 100.0

    return {'loss': score, 'status': STATUS_OK, 'model': model}
def create_time_model(Xtrain, Ttrain, Xtest, Ttest):
    def batch_generator(x, t):
        i = 0
        while True:
            if i == len(x):
                i = 0
            else:
                xtrain, ytrain = x[i], t[i]
                i += 1
            yield xtrain, ytrain

    steps_per_epoch = len(Xtrain)
    val_steps = len(Xtest)
    model = Sequential()
    csv_logger = CSVLogger('time_log.csv', append=True, separator='\t')
    layer = conditional({{choice(['one', 'two'])}})
    if layer == 'two':
        returnseq = True
    else:
        returnseq = False
    model.add(
        LSTM(units={{choice([32, 64, 128, 256])}},
             input_shape=(None, Xtrain[0].shape[2]),
             kernel_regularizer=L2({{uniform(0, 1)}}),
             dropout={{uniform(0, 1)}},
             return_sequences=returnseq))
    if layer == 'two':
        model.add(
            LSTM(units={{choice([256, 512])}},
                 input_shape=(None, Xtrain[0].shape[2]),
                 kernel_regularizer=L2({{uniform(0, 1)}}),
                 dropout={{uniform(0, 1)}}))
    model.add(Dense({{choice([1024, 512])}}))
    model.add(Activation('relu'))
    model.add({{choice([Dropout(0.5), Activation('linear')])}})
    model.add(Dense(Ttrain[0].shape[1]))
    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['cosine'])
    model.summary()
    history = model.fit_generator(batch_generator(Xtrain, Ttrain),
                                  steps_per_epoch=len(Xtrain),
                                  epochs=5,
                                  callbacks=[csv_logger],
                                  verbose=2,
                                  validation_data=batch_generator(
                                      Xtest, Ttest),
                                  validation_steps=len(Xtest))
    score, acc = model.evaluate_generator(batch_generator(Xtest, Ttest),
                                          steps=len(Xtest))
    return {'loss': acc, 'model': model, 'status': STATUS_OK}
Пример #9
0
def model(train_X, train_Y, test_X, test_Y):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    
    model = Sequential()
    model.add(Dense(500,input_shape=(train_X.shape[1],)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense({{choice([512, 1024])}}))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))
    

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(500))
        # We can also choose between complete sets of layers
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(train_Y.shape[1]))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(train_X, train_Y,
              batch_size={{choice([128, 256])}},
              nb_epoch=1,
              verbose=2,
              validation_data=(test_X, test_Y))
    score, acc = model.evaluate(test_X, test_Y, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #10
0
def model(X_train, Y_train, X_test, Y_test, MAX_VOCAB_SIZE, MAX_SEQ_LEN):
    ## DEFINE MODEL
    embed_dim = 64
    lstm_out = 32

    model = Sequential()
    model.add(Embedding(MAX_VOCAB_SIZE, embed_dim, input_length=MAX_SEQ_LEN))
    if conditional({{choice(['gru', 'lstm'])}}) == 'gru':
        model.add(CuDNNGRU({{choice([8, 32, 64])}}))
    else:
        model.add(CuDNNLSTM({{choice([8, 32, 64])}}))

    if conditional({{choice(['one', 'two', 'three'])}}) == 'two':
        model.add(Dense({{choice([5, 20, 50, 100])}}, activation="relu"))
    elif conditional({{choice(['one', 'two', 'three'])}}) == 'three':
        model.add(Dense({{choice([20, 50, 100])}}, activation="relu"))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Dense({{choice([5, 20])}}, activation="relu"))

    model.add(Dense(1, activation='sigmoid'))

    ## COMPILE
    model.compile(loss='binary_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    result = model.fit(X_train,
                       Y_train,
                       batch_size={{choice([32, 64, 128])}},
                       epochs=5,
                       verbose=2,
                       validation_data=(X_test, Y_test))

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
Пример #11
0
def model(X_train, Y_train, X_test, Y_test):

    input_i = keras.layers.Input(shape=[1])
    i = keras.layers.Embedding(1000 + 1, 64)(input_i)
    i = keras.layers.Flatten()(i)
    i = keras.layers.Dropout({{uniform(0, 1)}})(i)

    input_u = keras.layers.Input(shape=[1])
    u = keras.layers.Embedding(10000 + 1, 64)(input_u)
    u = keras.layers.Flatten()(u)
    u = keras.layers.Dropout({{uniform(0, 1)}})(u)

    nn = keras.layers.merge([i, u], mode='concat')
    nn = keras.layers.Dense({{choice([128, 256, 512, 1024])}})(nn)
    nn = keras.layers.Activation({{choice(['relu', 'sigmoid'])}})(nn)
    nn = keras.layers.Dropout({{uniform(0, 1)}})(nn)
    nn = keras.layers.normalization.BatchNormalization()(nn)
    nn = keras.layers.Dense({{choice([128, 256, 512, 1024])}})(nn)
    nn = keras.layers.Activation({{choice(['relu', 'sigmoid'])}})(nn)

    if conditional({{choice(['2', '3'])}}) == '3':
        nn = keras.layers.Dropout({{uniform(0, 1)}})(nn)
        nn = keras.layers.normalization.BatchNormalization()(nn)
        nn = keras.layers.Dense({{choice([128, 256, 512, 1024])}})(nn)
        nn = keras.layers.Activation({{choice(['relu', 'sigmoid'])}})(nn)

    output = keras.layers.Dense(5, activation='softmax')(nn)

    model = keras.models.Model([input_i, input_u], output)
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train,
              Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              validation_data=(X_test, Y_test))

    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    mse = metrics.mean_squared_error(
        numpy.argmax(Y_test, 1) + 1,
        numpy.argmax(model.predict(X_test), 1) + 1)
    rmse = numpy.sqrt(mse)

    print('Test accuracy:', rmse)
    return {'loss': rmse, 'status': STATUS_OK, 'model': model}
Пример #12
0
def model(X_train, y_train, X_test, y_test):
    # create model
    model = Sequential()
    model.add(Dense({{choice([54, 27, 13])}}, input_dim=54, init='normal', activation='linear'))
    model.add(Dense({{choice([104, 54, 27, 13])}}, init='normal', activation='linear'))
    
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense({{choice([27, 13, 7])}}, activation='linear'))

    model.add(Dense(1, init='normal', activation='linear'))
    # Compile model
    model.compile(loss='mse', optimizer='rmsprop')
    model.fit(X_train, y_train, nb_epoch=50, batch_size={{choice([64, 128, 256])}}, verbose=2)
    acc = model.evaluate(X_test, y_test)
    print('\nTest accuracy:', acc)
    return {'loss': acc, 'status': STATUS_OK, 'model': model}
Пример #13
0
def model(X_train, y_train, X_test, y_test, max_features, maxlen):
    model = Sequential()

    model.add(LSTM({{choice([200, 400, 800, 1000])}}, 
              input_shape = (1, max_features), 
              return_sequences = True))

    if conditional({{choice(['two', 'three'])}}) == 'three':
        model.add(LSTM({{choice([200, 400, 800, 1000])}},
                  return_sequences = True))
        model.add(LSTM({{choice([200, 400, 800, 1000])}},
                  return_sequences = False))
    else:
        model.add(LSTM({{choice([200, 400, 800, 1000])}},
                  return_sequences = False))
    
    # Avoid overfitting by dropping data
    model.add(Dropout({{uniform(0, 1)}}))

    # Regular dense nn with sigmoid activation function
    model.add(Dense(y_train.shape[1], activation = 'softmax'))

    ## Compile model
    model.compile(
        loss='categorical_crossentropy'
      , optimizer='rmsprop'
      , metrics = ['accuracy']    # Collect accuracy metric 
    )

    ## Early stop
    early_stopping = EarlyStopping(monitor='val_loss', patience=8)

    ## Fit model
    model.fit(X_train, y_train, 
              batch_size={{choice([64, 128, 256])}}, 
              nb_epoch=500,
              validation_data=(X_test, y_test),
              callbacks=[early_stopping])

    ## Extract score)
    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print("Accuracy: ", acc)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #14
0
def create_model(x_train, y_train, x_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """

    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))

        # We can also choose between complete sets of layers

        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train,
              y_train,
              batch_size={{choice([64, 128])}},
              epochs=1,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #15
0
def keras_fmin_fnct(space):
    """
    Model providing function:
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout(space['Dropout']))
    model.add(Dense(space['Dense']))
    model.add(Activation(space['Activation']))
    model.add(Dropout(space['Dropout_1']))

    # If we choose 'four', add an additional fourth layer
    if conditional(space['conditional']) == 'four':
        model.add(Dense(100))

        # We can also choose between complete sets of layers

        model.add(space['add'])
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=space['optimizer'])

    model.fit(x_train,
              y_train,
              batch_size=space['batch_size'],
              epochs=2,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #16
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(loss='categorical_crossentropy', optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #17
0
def create_model(x_train, y_train, x_test, y_test):
    model = Sequential()
    model.add(
        Dense({{choice([30, 45, 60])}},
              input_dim=x_train.shape[1],
              activation={{choice(['relu', 'elu'])}},
              kernel_initializer='uniform'))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(
        Dense({{choice([30, 45, 60])}},
              activation={{choice(['relu', 'elu'])}},
              kernel_initializer='uniform'))
    model.add(Dropout({{uniform(0, 1)}}))

    # If we choose 'four', add an additional fourth layer
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(
            Dense({{choice([30, 45, 60])}},
                  input_dim=x_train.shape[1],
                  activation={{choice(['relu', 'elu'])}},
                  kernel_initializer='uniform'))
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(25, activation='softmax', kernel_initializer='uniform'))

    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})

    model.fit(x_train,
              y_train,
              batch_size={{choice([64, 128])}},
              epochs=100,
              verbose=0,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #18
0
def load_model(X_train, X_aux_train, y_train, X_test, X_aux_test, y_test):

    lstm_input = Input(shape=(5, 11), name='lstm_input')
    lstm_out = LSTM({{choice([3, 4, 7, 10])}}, activation='linear')(lstm_input)

    auxiliary_input = Input(shape=(46, ), name='aux_input')
    x = merge([lstm_out, auxiliary_input], mode='concat')

    # we stack a deep fully-connected network on top
    x = Dense({{choice([92, 46, 23, 12])}},
              activation='linear',
              W_regularizer=l2({{choice([0.01, 0.03])}}))(x)

    if conditional({{choice(['three', 'four'])}}) == 'four':
        x = Dense({{choice([46, 23, 12, 6])}},
                  activation='linear',
                  W_regularizer=l2({{choice([0.01, 0.03])}}))(x)

    main_output = Dense(1, activation='linear', name='main_output')(x)

    final_model = Model(input=[lstm_input, auxiliary_input],
                        output=[main_output])

    final_model.compile(loss='mean_squared_error', optimizer='adam')

    final_model.fit([X_train, X_aux_train],
                    y_train,
                    nb_epoch=30,
                    batch_size={{choice([64, 128])}},
                    verbose=2)
    acc = final_model.evaluate([X_test, X_aux_test], y_test, verbose=1)
    #---------------------------------------------------------------- print "\n"
    #--------------------------------------------------------------- print score
    #-------------------- y_test_est = final_model.predict([X_test, X_aux_test])
    #------------ print("LSTM MSE; %f" % mean_squared_error(y_test, y_test_est))
    #-------------------- print("LSTM score: %f" % r2_score(y_test, y_test_est))
    print('\nTest accuracy:', acc)
    return {'loss': acc, 'status': STATUS_OK, 'model': final_model}
def create_fix_model(Xtrain, Ttrain, Xtest, Ttest):
    csv_logger = CSVLogger('fix_log.csv', append=True, separator='\t')
    model = Sequential()
    layer = conditional({{choice(['one', 'two'])}})
    if layer == 'two':
        returnseq = True
    else:
        returnseq = False
    model.add(
        LSTM(units={{choice([32, 64, 128, 256])}},
             input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
             kernel_regularizer=L2({{uniform(0, 1)}}),
             dropout={{uniform(0, 1)}},
             return_sequences=returnseq))
    if layer == 'two':
        model.add(
            LSTM(units={{choice([256, 512])}},
                 input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
                 kernel_regularizer=L2({{uniform(0, 1)}}),
                 dropout={{uniform(0, 1)}}))
    model.add(Dense({{choice([1024, 512])}}))
    model.add(Activation('relu'))
    model.add({{choice([Dropout(0.5), Activation('linear')])}})
    model.add(Dense(Ttrain.shape[1]))
    model.compile(loss='mean_squared_error',
                  optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
                  metrics=['cosine'])
    model.summary()
    model.fit(Xtrain,
              Ttrain,
              batch_size=50,
              epochs=5,
              callbacks=[csv_logger],
              verbose=2,
              validation_data=(Xtest, Ttest))
    score, acc = model.evaluate(Xtest, Ttest, verbose=0)
    return {'loss': acc, 'model': model, 'status': STATUS_OK}
Пример #20
0
def convnet(train_data, train_labels, test_data, test_labels):
    '''
    Convolutional Neural Network defined.
    '''
    SEED = 123
    np.random.seed(SEED)
    NB_CLASSES = 2
    DOC_ROWS = 1
    DOC_COLS = 300
    INPUT_SHAPE = (1, DOC_ROWS, DOC_COLS)

    model = Sequential()

    model.add(Dropout({{uniform(0, 1)}}, input_shape=INPUT_SHAPE))

    model.add(
        Conv2D(
            {{choice([5, 10, 15, 20, 50, 100])}},
            kernel_size={{choice([5, 10, 20])}},
            padding='same',
            kernel_initializer={{choice(['TruncatedNormal',
                                         'random_normal'])}}))
    model.add(Activation({{choice(["tanh", "relu"])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['yes', 'no'])}}) == 'yes':
        model.add(MaxPooling2D(pool_size=(1, 1), strides=None))
    model.add(
        Conv2D(
            {{choice([5, 10, 15, 20, 50, 100])}},
            kernel_size={{choice([5, 10, 20])}},
            padding='same',
            kernel_initializer={{choice(['TruncatedNormal',
                                         'random_normal'])}}))

    model.add(Activation({{choice(["tanh", "relu"])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['yes', 'no'])}}) == 'yes':
        model.add(MaxPooling2D(pool_size=(1, 1), strides=None))

    if conditional({{choice(['yes', 'no'])}}) == 'yes':
        model.add(
            Conv2D({{choice([5, 10, 15, 20, 50, 100])}},
                   kernel_size={{choice([5, 10, 20])}},
                   padding='same',
                   kernel_initializer={{
                       choice(['TruncatedNormal', 'random_normal'])
                   }}))
        model.add(Activation({{choice(["tanh", "relu"])}}))
        model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['yes', 'no'])}}) == 'yes':
        model.add(
            Conv2D({{choice([5, 10, 15, 20, 50, 100])}},
                   kernel_size={{choice([5, 10, 20])}},
                   padding='same',
                   kernel_initializer={{
                       choice(['TruncatedNormal', 'random_normal'])
                   }}))
        model.add(Activation({{choice(["tanh", "relu"])}}))
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Flatten())

    model.add(Dense({{choice([250, 500])}}))
    model.add(Activation({{choice(["tanh", "relu"])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['yes', 'no'])}}) == 'yes':
        model.add(Dense({{choice([250, 500, 750])}}))
        model.add(Activation({{choice(["tanh", "relu"])}}))
        model.add(Dropout({{uniform(0, 1)}}))

    model.add(Dense(NB_CLASSES))
    model.add(Activation({{choice(["softmax", "sigmoid", "hard_sigmoid"])}}))

    model.compile(
        loss={{choice(['categorical_crossentropy', 'binary_crossentropy'])}},
        metrics=['accuracy'],
        optimizer={{choice(['rmsprop', 'adam', 'sgd', 'nadam', 'adamax'])}})

    history = model.fit(train_data,
                        train_labels,
                        batch_size={{choice([32, 64, 128, 256, 512])}},
                        epochs={{choice([10, 20, 50, 100])}},
                        verbose=1,
                        validation_split={{uniform(0, 1)}},
                        callbacks=[
                            TensorBoard(
                                log_dir=os.path.join('LOGS/dcnn_d2v300_BM'),
                                histogram_freq=0,
                                write_graph=True,
                                write_grads=False,
                                write_images=False,
                                embeddings_freq=0,
                                embeddings_layer_names=None,
                                embeddings_metadata=None)
                        ])

    score, acc = model.evaluate(test_data, test_labels, verbose=1)

    print('History')
    print(history.history.keys())

    print('Test Score: {}'.format(score))
    print('Test Accuracy: {}'.format(acc))

    out = {'loss': -acc, 'status': STATUS_OK, 'model': model}

    return out
Пример #21
0
def justdeep(x_train, y_train, x_test, y_test):
    include_top = True
    input_tensor = None
    input_shape = (128, 128, 3)
    pooling = None
    classes = y_test.shape[-1]
    relu = 'elu'

    input_shape = _obtain_input_shape(
        input_shape,
        default_size=227,
        min_size=48,
        data_format=K.image_data_format(),
        require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(
        {{choice([16, 32, 64])}},
        {{choice([(3, 3), (5, 5)])}},
        strides=(2, 2),
        padding='valid',
        activation=relu,
    )(img_input)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

    nth = 1
    #n_layer = conditional({{choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])}})
    n_layer = conditional({{choice([1, 2, 3, 4, 5, 6])}})
    for _ in range(n_layer):
        squeeze = nth * {{choice([8, 16])}}
        expand = squeeze * {{choice([3, 4])}}
        x = fire_module(x, squeeze=squeeze, expand=expand)
        x = fire_module(x, squeeze=squeeze, expand=expand)
        #x = Dropout({{uniform(0, 1)}})(x)

        nth += 1

    # It's not obvious where to cut the network...
    # Could do the 8th or 9th layer... some work recommends cutting earlier layers.

    x = Dropout({{uniform(0, 1)}})(x)

    x = Convolution2D(
        classes,
        (1, 1),
        padding='valid',
        activation=relu,
    )(x)
    x = GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)

    # Ensure that the model takes into account
    # any potential predecessors of .
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='squeezenet')

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
        metrics=['accuracy'])

    model.fit(
        x_train, y_train, batch_size=64, epochs=100, verbose=2, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #22
0
def mydeep(x_train, y_train, x_test, y_test):
    from keras.applications.imagenet_utils import _obtain_input_shape
    from keras import backend as K
    from keras.layers import Input, Convolution2D, MaxPooling2D, Activation, concatenate, Dropout, warnings
    from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
    from keras.models import Model
    from keras.engine.topology import get_source_inputs
    from keras.utils import get_file
    from keras.utils import layer_utils

    def fire_module(x, squeeze=16, expand=64):
        if K.image_data_format() == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = 3

        with tf.variable_scope('fire_module'):
            x = Convolution2D(squeeze, (1, 1), padding='valid')(x)
            x = Activation('elu')(x)

            left = Convolution2D(expand, (1, 1), padding='valid')(x)
            left = Activation('elu')(left)

            right = Convolution2D(expand, (3, 3), padding='same')(x)
            right = Activation('elu')(right)

            x = concatenate([left, right], axis=channel_axis)
        return x

    include_top = True
    input_tensor = None
    input_shape = (128, 128, 3)
    pooling = None
    classes = y_test.shape[-1]

    input_shape = _obtain_input_shape(
        input_shape,
        default_size=227,
        min_size=48,
        data_format=K.image_data_format(),
        require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(
        {{choice([16, 32, 64])}},
        {{choice([(3, 3), (5, 5), (7, 7), (9, 9), (11, 11)])}},
        strides=(2, 2),
        padding='valid',
        activation='elu',
    )(img_input)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

    nth = 1
    n_layer = conditional({{choice([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])}})
    for _ in range(n_layer):
        squeeze = nth * {{choice([8, 16])}}
        expand = squeeze * {{choice([3, 4])}}
        x = fire_module(x, squeeze=squeeze, expand=expand)
        x = fire_module(x, squeeze=squeeze, expand=expand)

        nth += 1

    # It's not obvious where to cut the network...
    # Could do the 8th or 9th layer... some work recommends cutting earlier layers.

    x = Dropout({{uniform(0, 1)}})(x)

    x = Convolution2D(
        classes,
        (1, 1),
        padding='valid',
        activation='elu',
    )(x)
    x = GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)

    # Ensure that the model takes into account
    # any potential predecessors of .
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='squeezenet')

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
        metrics=['accuracy'])

    model.fit(
        x_train,
        y_train,
        batch_size={{choice([32, 64, 128])}},
        epochs=100,
        verbose=2,
        validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #23
0
    def createModel(features_train, embeddings_matrix, idxs):
        char_emb_size = 25
        char_lstm_size = 25
        lstm_units = {{choice([200, 50, 100])}}

        if embeddings_matrix is None:
            emb_layer = Embedding(input_dim=len(word_idx) + 1,
                                  output_dim=300,
                                  mask_zero=True,
                                  trainable=False,
                                  name="word_emb")
        else:
            emb_layer = Embedding(input_dim=embeddings_matrix.shape[0],
                                  output_dim=embeddings_matrix.shape[1],
                                  mask_zero=True,
                                  weights=[embeddings_matrix],
                                  name="word_emb",
                                  trainable=False)

        word_ids = Input(batch_shape=(None, None), dtype='int32')
        word_embeddings = emb_layer(word_ids)

        #     #Char embeddings
        char_ids = Input(batch_shape=(None, None, None), dtype='int32')
        #     char_ids = attention_3d_block(char_ids)

        char_embeddings = Embedding(input_dim=len(idxs["char_idx"]) + 1,
                                    output_dim=char_emb_size,
                                    mask_zero=True,
                                    input_length=20,
                                    name="char_emb")(char_ids)
        s = K.shape(char_embeddings)

        if conditional({{choice(['true', 'false'])}}) == 'true':
            #     #####Attentionlayer on characters
            char_embeddings = Lambda(
                lambda x: K.reshape(x, shape=(-1, 20, char_emb_size)),
                name="lambda")(char_embeddings)
            fwd_state = LSTM(
                char_lstm_size,
                return_sequences=True,
                name="char_for",
            )(char_embeddings)
            bwd_state = LSTM(char_lstm_size,
                             return_sequences=True,
                             go_backwards=True,
                             name="char_back")(char_embeddings)
            char_embeddings = Concatenate(
                axis=-1, name="char_concat")([fwd_state, bwd_state])
            char_embeddings = attention_3d_block(char_embeddings, 20,
                                                 "char_att")
            char_embeddings = Lambda(
                lambda x: K.reshape(x, shape=[-1, s[1], char_lstm_size * 2]),
                name="lambda2")([char_embeddings])
        else:
            #No Attention Layer
            char_embeddings = Lambda(
                lambda x: K.reshape(x, shape=(-1, s[-2], char_emb_size)),
                name="lambda")(char_embeddings)
            fwd_state = LSTM(
                char_lstm_size,
                return_state=True,
                name="char_for",
            )(char_embeddings)[-2]
            bwd_state = LSTM(char_lstm_size,
                             return_state=True,
                             go_backwards=True,
                             name="char_back")(char_embeddings)[-2]
            #Charembeddings concatinate forward and backword
            char_embeddings = Concatenate(
                axis=-1, name="char_concat")([fwd_state, bwd_state])
            char_embeddings = Lambda(
                lambda x: K.reshape(x, shape=[-1, s[1], 2 * char_lstm_size]),
                name="lambda2")(char_embeddings)

        embeddings = [word_embeddings, char_embeddings]
        additional_features = []
        additional_features_length = 0
        for j in features_train:
            if j != "words" and j != "output" and j != "char_input" and j != "wordembeddings_input":
                additional_features_length = additional_features_length + len(
                    idxs[j])
                additional_features.append(
                    Input(batch_shape=(None, None, len(idxs[j])),
                          dtype='float32'))

        if conditional({{choice(['true', 'false'])}}) == 'true':
            additional_features_concat = Concatenate(
                axis=-1, name="concat_additional_feats")(additional_features)
            print additional_features_concat.shape
            attention_probs = Dense(
                additional_features_length,
                activation='softmax',
                name='attention_vec_')(additional_features_concat)
            attention_mul = Multiply()(
                [additional_features_concat, attention_probs])
            print word_embeddings.shape, char_embeddings.shape
            embeddings.append(attention_mul)
            merge_input = Concatenate(axis=-1, name="concat_all")(embeddings)
        else:
            merge_input = Concatenate(
                axis=-1, name="concat_all")(embeddings + additional_features)

        merge_input = Dropout({{uniform(0, 1)}})(merge_input)

        merge_lstm1 = Bidirectional(LSTM(lstm_units,
                                         return_sequences=True,
                                         name="lstm1"),
                                    name="bilstm")(merge_input)

        merge_lstm1 = Dropout({{uniform(0, 1)}})(merge_lstm1)
        #         merge_lstm1 = Dense(100, activation="tanh")(merge_lstm1)
        merge_lstm1 = Dense(len(idxs["output_idx"]))(merge_lstm1)

        crf = CRF(len(idxs["output_idx"]))
        pred = crf(merge_lstm1)
        lossFct = crf.loss_function
        model = Model(inputs=[word_ids, char_ids] + additional_features,
                      outputs=[pred])
        model.compile(loss=lossFct,
                      optimizer=Adam(lr=0.001),
                      metrics=[crf.accuracy])

        return model
Пример #24
0
def model(x_train, y_train, x_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    ENV_NAME = 'CartPole-v0'

    # Get the environment and extract the number of actions.
    env = gym.make(ENV_NAME)
    np.random.seed(123)
    env.seed(123)
    nb_actions = env.action_space.n

    model = Sequential()
    #model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
    model.add(
        LSTM({{choice([16, 32, 48, 64])}},
             input_shape=(1, ) + env.observation_space.shape))
    model.add(Dropout({{uniform(0, 1)}}))  # if shit commment this out
    model.add(Activation('tanh'))
    model.add(Dense({{choice([16, 32, 48, 64])}}))
    model.add(Dropout({{uniform(0, 1)}}))  # if shit comment this out
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    # comment this out
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense({{choice([16, 32, 48, 64])}}))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    if conditional({{choice(['four', 'five'])}}) == 'five':
        model.add(Dense({{choice([16, 32, 48, 64])}}))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation({{choice(['relu', 'sigmoid'])}}))

    model.add(Dense(nb_actions))
    model.add(Activation({{choice(['relu', 'sigmoid', 'linear'])}}))
    #print(model.summary())

    # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
    # even the metrics!
    memory = SequentialMemory(limit=50000, window_length=1)
    policy = BoltzmannQPolicy()
    dqn = DQNAgent(model=model,
                   nb_actions=nb_actions,
                   memory=memory,
                   nb_steps_warmup=10,
                   target_model_update=1e-2,
                   policy=policy)
    dqn.compile(Adam(lr={{uniform(1e-4, 1e-2)}}), metrics=['mae'])

    # Okay, now it's time to learn something! We visualize the training here for show, but this
    # slows down training quite a lot. You can always safely abort the training prematurely using
    # Ctrl + C.
    number_tasks = 1000  #0 # number of steps change me please
    dqn.fit(env, nb_steps=number_tasks, visualize=False, verbose=2)

    hist = dqn.test(env, nb_episodes=5, visualize=True)
    rewards = hist.history['episode_reward']

    loss = 1.0 / len(rewards) * np.sum([(90.0 - reward)**2
                                        for reward in rewards])

    print('Test Loss:', loss)
    return {'loss': -loss, 'status': STATUS_OK, 'model': dqn}
Пример #25
0
def make_model(x_train, y_train, x_val, y_val):
    weights_dir = 'weights'
    history_dir = 'history'
    
    numgrulayers = {{choice(['three'])}}
    numgrulayers = conditional(numgrulayers)


    model = Sequential()
    model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', input_shape=(x_train.shape[1],x_train.shape[2]), return_sequences=True ))
    model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
    if numgrulayers == 'five':
        model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
        model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
    elif numgrulayers == 'six':
        model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
        model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
        model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=True ))
    model.add(GRU(units={{choice([32,64,128,256,512])}}, activation='tanh', return_sequences=False ))
    model.add(Dense(y_train.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])

    if numgrulayers == 'three':
        filename = weights_dir+'/' + \
                numgrulayers + '_' + \
                str(space['units']) + '_' + \
                str(space['units_1']) + '_' + \
                str(space['units_7']) + '_' + \
                str(space['batch_size']) + \
                '_{epoch:03d}_{loss:.4f}_{acc:.4f}_{val_loss:.4f}_{val_acc:.4f}.hdf5'
    elif numgrulayers == 'five':
        filename = weights_dir+'/' + \
                numgrulayers + '_' + \
                str(space['units']) + '_' + \
                str(space['units_1']) + '_' + \
                str(space['units_2']) + '_' + \
                str(space['units_3']) + '_' + \
                str(space['units_7']) + '_' + \
                str(space['batch_size']) + \
                '_{epoch:03d}_{loss:.4f}_{acc:.4f}_{val_loss:.4f}_{val_acc:.4f}.hdf5'
    elif numgrulayers == 'six':
        filename = weights_dir+'/' + \
                numgrulayers + '_' + \
                str(space['units']) + '_' + \
                str(space['units_1']) + '_' + \
                str(space['units_4']) + '_' + \
                str(space['units_5']) + '_' + \
                str(space['units_6']) + '_' + \
                str(space['units_7']) + '_' + \
                str(space['batch_size']) + \
                '_{epoch:03d}_{loss:.4f}_{acc:.4f}_{val_loss:.4f}_{val_acc:.4f}.hdf5'

    checkpoint = ModelCheckpoint(filename, monitor='loss', verbose=1, save_best_only=True, mode='min')
    earlystopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
    callbacks = [checkpoint,earlystopping]
    
    result = model.fit(x_train, y_train, batch_size={{choice([32,64,128])}}, epochs=20,verbose=6, validation_data=(x_val, y_val), callbacks=callbacks)

    parameters = space
    parameters['history'] = result.history
    if numgrulayers == 'three':
        dm.save(parameters, history_dir + '/' + \
            numgrulayers + '_' + \
            str(space['units']) + '_' + \
            str(space['units_1']) + '_' + \
            str(space['units_7']) + '_' + \
            str(space['batch_size']))
    elif numgrulayers == 'five':
        dm.save(parameters, history_dir + '/' + \
            numgrulayers + '_' + \
            str(space['units']) + '_' + \
            str(space['units_1']) + '_' + \
            str(space['units_2']) + '_' + \
            str(space['units_3']) + '_' + \
            str(space['units_7']) + '_' + \
            str(space['batch_size']))
    elif numgrulayers == 'six':
        dm.save(parameters, history_dir + '/' + \
            numgrulayers + '_' + \
            str(space['units']) + '_' + \
            str(space['units_1']) + '_' + \
            str(space['units_4']) + '_' + \
            str(space['units_5']) + '_' + \
            str(space['units_6']) + '_' + \
            str(space['units_7']) + '_' + \
            str(space['batch_size']))

    loss,acc = model.evaluate(x_val,y_val, verbose=0)
    print("Test loss: "+str(loss)+"\tTest acc: " +str(acc))
    return {'status': STATUS_OK, 'model':model, 'loss':loss, 'acc':acc }
Пример #26
0
def model(x_train, x_test, Y_train, Y_test):
    crop_length = 160

    x_train_ = x_train[:, :crop_length]
    x_test_ = x_test[:, :crop_length]

    x_concat = np.concatenate([x_train_, x_test_])
    x_concat = (x_concat - x_concat.min()) / (x_concat.max() -
                                              x_concat.min()) * 2 - 1.
    x_train_ = x_concat[:x_train_.shape[0]]
    x_test_ = x_concat[x_train_.shape[0]:]

    x_train_ = x_train_.reshape(x_train_.shape + (1, ))
    x_test_ = x_test_.reshape(x_test_.shape + (1, ))
    x_concat = np.concatenate([x_train_, x_test_], axis=0)

    epochs = 3
    batch_size = 128

    depth = {{choice([16])}}
    n_layers = {{choice(["1 layer", "2 layers"])}}

    if (conditional(n_layers) == "2 layers"):
        squeeze = {{choice([1, 2])}}

    layer_after_conv = {{choice(["batch_norm", "max_pool"])}}

    if (conditional(layer_after_conv) == "batch_norm"):
        activation = "relu"  # "Batchnorm + relu","maxPooling"
        pooling = 1
    elif (conditional(layer_after_conv) == "max_pool"):
        activation = {{choice(["relu", "elu",
                               None])}}  # "Batchnorm + relu","maxPooling"
        pooling = {{choice([2, 4])}}

    kernel_size = 8
    intermediate_dim = {{choice([32])}}

    undersample_rate = pooling**(2 if
                                 (conditional(n_layers) == "2 layers") else 1)
    output_shape = (batch_size, crop_length // undersample_rate, 1)  # //4 //4

    # use_biais = True / False
    # elu / selu / relu
    # dilation_rate -> but stride = 1

    x = Input(shape=x_train_.shape[1:])
    h = x
    h = Conv1D(depth, kernel_size,
               padding='same')(h)  # name="test" # NO ! We duplicate
    if (conditional(layer_after_conv) == "max_pool"):
        h = MaxPooling1D(pooling, padding='same')(h)
    else:
        h = BatchNormalization()(h)
    h = Activation(activation)(h)

    if (conditional(n_layers) == "2 layers"):
        h = Conv1D(depth * squeeze,
                   kernel_size // squeeze,
                   padding='same',
                   activation=activation)(h)
        if (conditional(layer_after_conv) == "max_pool"):
            h = MaxPooling1D(pooling, padding='same')(h)
        else:
            h = BatchNormalization()(h)
        h = Activation(activation)(h)

    flat = Flatten()(h)
    hidden = Dense(intermediate_dim, activation=activation)(flat)

    x_recons = hidden
    x_recons = Dense(output_shape[1])(x_recons)
    x_recons = Reshape(output_shape[1:])(x_recons)

    if (conditional(n_layers) == "2 layers"):
        x_recons = Conv1D(depth * squeeze,
                          kernel_size // squeeze,
                          padding='same',
                          activation=activation)(x_recons)
        if (conditional(layer_after_conv) == "max_pool"):
            x_recons = UpSampling1D(pooling)(x_recons)

    x_recons = Conv1D(depth,
                      kernel_size,
                      padding='same',
                      activation=activation)(x_recons)
    if (conditional(layer_after_conv) == "max_pool"):
        x_recons = UpSampling1D(pooling)(x_recons)
    x_recons = Conv1D(1, kernel_size, padding='same')(x_recons)

    y = Dropout(0.2)(hidden)
    y = Dense(Y_train.shape[1], activation='softmax')(y)

    mlt = Model(x, [x_recons, y])
    mlt.compile('adam', ['mse', 'categorical_crossentropy'], metrics=['acc'])

    hist = mlt.fit(
        x_concat, [x_concat, np.concatenate([Y_train, Y_test])],
        sample_weight=[
            np.ones(len(x_concat)),
            np.concatenate([np.ones(len(Y_train)),
                            np.zeros(len(Y_test))])
        ],
        shuffle=True,
        epochs=epochs,
        batch_size=batch_size,
        verbose=0)
    acc = (np.argmax(mlt.predict(x_test_)[1],
                     axis=1) == np.argmax(Y_test, axis=-1)).mean()
    print("ACC :", acc)
    return {'loss': -acc, 'status': STATUS_OK}  # we could add the history...
Пример #27
0
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:
 
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    
    model = Sequential()

    model.add(Dropout({{uniform(0, 1)}}, input_shape=X_train.shape[1:], name="drop_input"))
    model.add(Dense({{choice([256, 512, 1024])}}, activation={{choice(['relu', 'tanh'])}}, name="fc1"))
    model.add(Dropout({{uniform(0, 1)}}, name="drop_fc1"))
    model.add(Dense({{choice([256, 512, 1024])}}, activation={{choice(['relu', 'tanh'])}}, name="fc2"))
    model.add(Dropout({{uniform(0, 1)}}, name="drop_fc2"))


    if conditional({{choice(['two', 'three'])}}) == 'three':
        model.add(Dense({{choice([256, 512, 1024])}}, activation={{choice(['relu', 'tanh'])}}, name="fc3"))
        model.add(Dropout({{uniform(0, 1)}}, name="drop_fc3"))


    
    model.add(Dense(Y_test.shape[1], activation='linear',name='out_linear'))

    model.summary()
    

         
    choiceval = {{choice(['adam','rmsprop','sgd'])}}
    if choiceval == 'adam':
        adam    = Adam(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, 
                                decay={{choice([1e-2,1e-3,1e-4])}})
        optim = adam
    elif choiceval == 'rmsprop':
        rmsprop = RMSprop(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, 
                                decay={{choice([1e-2,1e-3,1e-4])}})
        optim = rmsprop
    else:
        sgd     = SGD(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, 
                                decay={{choice([1e-2,1e-3,1e-4])}})

        optim = sgd


    model.compile(loss='categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    globalvars.globalVar += 1

    filepath = "D:/ORACLES_NN/py_outputs/weights_rsp_nn_hyperas" + var_in + str(globalvars.globalVar) + ".hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')

    csv_logger = CSVLogger('D:/ORACLES_NN/py_outputs/hyperas_rsp_nn_log_' + var_in + '.csv', 
                                   append=True, separator=';')
    tensor_board_logfile = 'D:/ORACLES_NN/py_logs/' + var_in + str(globalvars.globalVar)
    tensor_board = TensorBoard(log_dir=tensor_board_logfile, histogram_freq=0, write_graph=True)


    if 'results' not in globals():
        global results
        results = []

    history = model.fit(X_train, Y_train,
              batch_size={{choice([64, 128, 256])}},
              nb_epoch=3,
              verbose=2,
              validation_data=(X_test, Y_test),
              callbacks=[checkpoint,csv_logger,tensor_board])

    print(history.history.keys())
    

    h1   = history.history
    acc_ = numpy.asarray(h1['acc'])
    loss_ = numpy.asarray(h1['loss'])
    val_loss_ = numpy.asarray(h1['val_loss'])
    val_acc_  = numpy.asarray(h1['val_acc'])
    parameters = space
    opt        = numpy.asarray(parameters["choiceval"])
    if choiceval == 'adam':
      lr         = numpy.asarray(parameters["lr"])
      decay      = numpy.asarray(parameters["decay"])
    elif choiceval == 'rmsprop':
      lr         = numpy.asarray(parameters["lr_1"])
      decay      = numpy.asarray(parameters["decay_1"])
    elif choiceval == 'sgd':
      lr         = numpy.asarray(parameters["lr_2"])
      decay      = numpy.asarray(parameters["decay_2"])

    results.append(parameters)


    acc_plot = 'D:/ORACLES_NN/py_plots/accuracy_run_' + var_in + str(globalvars.globalVar) + ".png"
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('run: ' + var_in + str(globalvars.globalVar) + " opt: " + str(opt) + " lr: " + str(lr) + " decay: " + str(decay))
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(acc_plot)   
    plt.close()  
    
    los_plot = 'D:/ORACLES_NN/py_plots/losses_run_' + var_in + str(globalvars.globalVar) + ".png"
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('run: ' + var_in + str(globalvars.globalVar) + " opt: " + str(opt) + " lr: " + str(lr) + " decay: " + str(decay))
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(los_plot)   
    plt.close()  
    

    
    acc_and_loss = numpy.column_stack((acc_, loss_, val_acc_, val_loss_))
    save_file_mlp = 'D:/ORACLES_NN/py_outputs/rsp_nn_run_' + var_in + '_' + str(globalvars.globalVar) + '.txt'
    with open(save_file_mlp, 'w') as f:
            numpy.savetxt(save_file_mlp, acc_and_loss, delimiter=",")


    score, acc = model.evaluate(X_test, Y_test, verbose=0)

    print("parameters for run " + var_in + str(globalvars.globalVar) + ":")
    print("-------------------------------")
    print(parameters)
    print("opt: ", opt)
    print("lr: ", lr)
    print("decay: ", decay)
    print('Test accuracy:', acc)

    save_file_params = 'D:/ORACLES_NN/py_outputs/params_run_' + var_in + '_' + str(globalvars.globalVar) + '.txt'
    rownames  = numpy.array(['Input', 'Run', 'optimizer', 'learning_rate', 'decay', 'train_accuracy','train_loss','val_accuracy', 'val_loss', 'test_accuracy'])
    rowvals   = (var_in, str(globalvars.globalVar), opt, lr, decay, acc_[-1], loss_[-1], val_acc_[-1], val_loss_[-1],acc)

    DAT =  numpy.column_stack((rownames, rowvals))
    numpy.savetxt(save_file_params, DAT, delimiter=",",fmt="%s")

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_conditional():
    data = 'foo'
    assert data == conditional(data)
Пример #29
0
def create_model(trainx_windows, testx_windows, trainy, testy):
    """
    Used by Hyperas to create and test a unique model by chosing a value at
    each location wwith double-bracketed ranges of values.

    Parameters:
    -------------
    trainx_windows: triaxial windowed training data, returned by get_data()
    testx_windows:  triaxial windowed testing data, returned by get_data()
    trainy:         one-hot labels for training the model, returned by get_data()
    testy:          one-hot labels for evaluating models' predictions, returned
                    by get_data()

    Returns:
    -------------
    loss:   (negative) accuracy of models' predictions
    status: parameter used by Hyperas
    model:  baseline LSTM model with unique hyperparameter selections

    """

    lstm_input = LSTM(input_shape=(128,3), units={{choice(np.arange(2,512))}},\
                    activation={{choice(['softmax', 'tanh', 'sigmoid', 'relu', 'linear'])}},\
                    recurrent_activation={{choice(['softmax', 'tanh', 'sigmoid', 'relu', 'linear'])}},\
                    use_bias={{choice([True, False])}},\
                    kernel_initializer={{choice(['zeros', 'ones', RandomNormal(), RandomUniform(minval=-1, maxval=1, seed=None), Constant(value=0.1), 'orthogonal', 'lecun_normal', 'glorot_uniform'])}},\
                    recurrent_initializer={{choice(['zeros', 'ones', RandomNormal(), RandomUniform(minval=-1, maxval=1, seed=None), Constant(value=0.1), 'orthogonal', 'lecun_normal', 'glorot_uniform'])}},\
                    unit_forget_bias=True,\
                    kernel_regularizer={{choice([None,'l2', 'l1'])}},\
                    recurrent_regularizer={{choice([None,'l2', 'l1'])}},\
                    bias_regularizer={{choice([None,'l2', 'l1'])}},\
                    activity_regularizer={{choice([None,'l2', 'l1'])}},\
                    kernel_constraint=None, recurrent_constraint=None,\
                    bias_constraint=None, dropout={{uniform(0, 1)}},\
                    recurrent_dropout={{uniform(0, 1)}},\
                    return_sequences=True, return_state=False,\
                    go_backwards=False, stateful=False, unroll=False)

    lstm_last = LSTM(units={{choice(np.arange(2,512))}},\
                    activation={{choice(['softmax', 'tanh', 'sigmoid', 'relu', 'linear'])}},\
                    recurrent_activation={{choice(['softmax', 'tanh', 'sigmoid', 'relu', 'linear'])}},\
                    use_bias={{choice([True, False])}},\
                    kernel_initializer={{choice(['zeros', 'ones', RandomNormal(), RandomUniform(minval=-1, maxval=1, seed=None), Constant(value=0.1), 'orthogonal', 'lecun_normal', 'glorot_uniform'])}},\
                    recurrent_initializer={{choice(['zeros', 'ones', RandomNormal(), RandomUniform(minval=-1, maxval=1, seed=None), Constant(value=0.1), 'orthogonal', 'lecun_normal', 'glorot_uniform'])}},\
                    unit_forget_bias=True,\
                    kernel_regularizer={{choice([None,'l2', 'l1'])}},\
                    recurrent_regularizer={{choice([None,'l2', 'l1'])}},\
                    bias_regularizer={{choice([None,'l2', 'l1'])}},\
                    activity_regularizer={{choice([None,'l2', 'l1'])}},\
                    kernel_constraint=None, recurrent_constraint=None,\
                    bias_constraint=None, dropout={{uniform(0, 1)}},\
                    recurrent_dropout={{uniform(0, 1)}},\
                    return_sequences=False, return_state=False,\
                    go_backwards=False, stateful=False, unroll=False)

    model = Sequential()
    model.add(lstm_input)
    if conditional({{choice([0,1])}}) == 1: model.add(BatchNormalization())
    model.add(lstm_last)
    if conditional({{choice([0,1])}}) == 1: model.add(BatchNormalization())
    model.add(Dense(6))
    model.add(Activation('softmax'))

    adam_lr = keras.optimizers.Adam(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, clipnorm=1.)
    rmsprop_lr = keras.optimizers.RMSprop(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, clipnorm=1.)
    sgd_lr = keras.optimizers.SGD(lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}}, clipnorm=1.)

    optims = {{choice(['adam', 'sgd', 'rmsprop', 'adagrad', 'nadam', 'adadelta'])}}
    if optims == 'adam': optim = adam_lr
    elif optims == 'rmsprop': optim = rmsprop_lr
    elif optims == 'sgd': optim = sgd_lr
    else: optim = optims

    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer=optim)

    # model_saver = ModelCheckpoint('model.{epoch:02d}-{val_loss:.2f}.hdf5')
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0.01, patience=20, verbose=0, mode='auto')

    # added to collect optimization results
    if 'results' not in globals():
        global results
        results = []

    print(trainx_windows.shape)
    result = model.fit(trainx_windows, trainy, epochs=500, batch_size={{choice(np.arange(32, 450))}}, validation_split=0.2, callbacks=[early_stop]) #, model_saver])
    score, acc = model.evaluate(testx_windows, testy, verbose=1)
    # valLoss = result.history['val_mean_absolute_error'][-1]
    parameters = space
    print(parameters)
    results.append(parameters)

    tab_results = tabulate(results, headers="keys", tablefmt="fancy_grid", floatfmt=".8f")
    weights = model.get_weights()
    # print(weights)
    with open('../../output/hp_opt/weights.txt', 'a+') as model_summ:
        model_summ.write("model: {}\n\tweights:\n{}\n\tmodel_details:\n{}\n\tscore:\t{}".format(model, list(weights), tab_results, acc))

    # print(tab_results)

    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #30
0
def model(x_train, y_train, x_test, y_test):
    """
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    """
    # Get the environment and extract the number of actions.
    env = gym.make(args.env_name)
    np.random.seed(123)
    env.seed(123)
    nb_actions = env.action_space.n

    # Next, we build our model. We use the same model that was described by Mnih et al. (2015).
    input_shape = (WINDOW_LENGTH, ) + INPUT_SHAPE
    model = Sequential()
    if K.image_dim_ordering() == 'tf':
        # (width, height, channels)
        model.add(Permute((2, 3, 1), input_shape=input_shape))
    elif K.image_dim_ordering() == 'th':
        # (channels, width, height)
        model.add(Permute((1, 2, 3), input_shape=input_shape))
    else:
        raise RuntimeError('Unknown image_dim_ordering.')
    model.add(Convolution2D(32, 8, 8, subsample=(4, 4)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2, 2)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1, 1)))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense({{choice([16, 32, 48, 64])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense({{choice([16, 32, 48, 64])}}))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    if conditional({{choice(['four', 'five'])}}) == 'five':
        model.add(Dense({{choice([16, 32, 48, 64])}}))
        model.add({{choice([Dropout(0.5), Activation('linear')])}})
        model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())

    # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
    # even the metrics!
    memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)
    processor = AtariProcessor()

    # Select a policy. We use eps-greedy action selection, which means that a random action is selected
    # with probability eps. We anneal eps from 1.0 to 0.1 over the course of 1M steps. This is done so that
    # the agent initially explores the environment (high eps) and then gradually sticks to what it knows
    # (low eps). We also set a dedicated eps value that is used during testing. Note that we set it to 0.05
    # so that the agent still performs some random actions. This ensures that the agent cannot get stuck.
    policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                                  attr='eps',
                                  value_max=1.,
                                  value_min=.1,
                                  value_test=.05,
                                  nb_steps=1000000)

    # The trade-off between exploration and exploitation is difficult and an on-going research topic.
    # If you want, you can experiment with the parameters or use a different policy. Another popular one
    # is Boltzmann-style exploration:
    # policy = BoltzmannQPolicy(tau=1.)
    # Feel free to give it a try!

    dqn = DQNAgent(model=model,
                   nb_actions=nb_actions,
                   policy=policy,
                   memory=memory,
                   processor=processor,
                   nb_steps_warmup=50000,
                   gamma=.99,
                   target_model_update=10000,
                   train_interval=4,
                   delta_clip=1.)
    dqn.compile(Adam(lr=.00025), metrics=['mae'])
    dqn.fit(env, nb_steps=1750000, log_interval=10000, visualize=False)

    hist = dqn.test(env, nb_episodes=10, visualize=True)
    rewards = hist.history['episode_reward']

    loss = 1.0 / len(rewards) * np.sum([(90.0 - reward)**2
                                        for reward in rewards])

    print('Test Loss:', loss)
    return {'loss': -loss, 'status': STATUS_OK, 'model': dqn}
Пример #31
0
def fit_nc_tcn(train_set, val_set):
    global n_eval

    n_eval += 1
    print(str(datetime.datetime.now()) + ": iteration number: " + str(n_eval))

    with open("hyper_opt_result.txt", 'a') as f:
        f.write(
            str(datetime.datetime.now()) + ": iteration number: " +
            str(n_eval) + "\n")

    cfg = {
        "optimizer": {
            "name": "adam",
            "lr": 0.002,
            "clipnorm": 1,
            "beta_1": 0.9,
            "beta_2": 0.999,
            "epsilon": None,
            "decay": 0.0
        },
        "workers": 8,
        "use_multiprocessing": True,
        "n_epochs": 7
    }
    nb_filters = []
    kernel_size = {{
        choice(
            [11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41])
    }}
    dilations = {{
        choice([[2**i for i in range(4)], [2**i for i in range(5)],
                [2**i for i in range(6)], [2**i for i in range(7)],
                [2**i for i in range(8)], [2**i for i in range(9)],
                [2**i for i in range(10)]])
    }}
    nb_stacks = {{choice([3, 4, 5, 6, 7, 8, 9, 10])}}
    use_skip_connections = False
    n_layers = {{choice([1, 2, 3, 4, 5])}}

    nb_filters.append({{choice([8, 16, 32, 64])}})
    if conditional(n_layers) >= 2:
        nb_filters.append({{choice([8, 16, 32, 64])}})
        if conditional(n_layers) >= 3:
            nb_filters.append({{choice([8, 16, 32, 64])}})
            if conditional(n_layers) >= 4:
                nb_filters.append({{choice([8, 16, 32, 64])}})
                if conditional(n_layers) >= 5:
                    nb_filters.append({{choice([8, 16, 32, 64])}})

    model = tcn.create_tcn(list_n_filters=nb_filters,
                           kernel_size=kernel_size,
                           dilations=dilations,
                           nb_stacks=nb_stacks,
                           n_layers=n_layers,
                           use_skip_connections=use_skip_connections,
                           dropout_rate={{uniform(0.01, 0.25)}},
                           bidirectional=True)

    n_params = model.count_params()
    print(n_params)
    print(space)

    with open("hyper_opt_result.txt", 'a') as f:
        f.write(str(n_params))
        for key in space.keys():
            f.write(str(key) + " " + str(space[key]) + " ")
            f.write('\n')

    optimizer = optimizers.adam(lr=cfg["optimizer"]["lr"],
                                clipnorm=cfg["optimizer"]["clipnorm"])

    model.compile(loss=config.LOSS,
                  metrics=config.METRICS,
                  optimizer=optimizer)

    result = model.fit_generator(
        train_set,
        epochs=cfg["n_epochs"],
        validation_data=val_set,
        workers=cfg["workers"],
        use_multiprocessing=cfg["use_multiprocessing"],
        shuffle=True,
        verbose=0)

    validation_loss = np.amin(result.history['val_loss'])

    csv_line = [n_eval, n_params, validation_loss]
    for key in space.keys():
        csv_line.append(space[key])
    with open(r'fit_b_tcn_log.csv', 'a') as f:
        writer = csv.writer(f)
        writer.writerow(csv_line)

    print('Best validation acc of epoch:', validation_loss)
    del model
    K.clear_session()
    return {'loss': validation_loss, 'status': STATUS_OK}
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:
 
    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    model = Sequential()
    model.add(Dense(512, input_shape=(784, )))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation({{choice(['relu', 'sigmoid'])}}))
    model.add(Dropout({{uniform(0, 1)}}))

    if conditional({{choice(['three', 'four'])}}) == 'four':
        model.add(Dense(100))
        model.add(Dropout(0.5))
        model.add(Activation('relu'))

    model.add(Dense(10))
    model.add(Activation('softmax'))

    choiceval = {{choice(['adam', 'sgd', 'rmsprop'])}}
    if choiceval == 'adam':
        adam = Adam(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)
        optim = adam
    elif choiceval == 'rmsprop':
        rmsprop = RMSprop(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)
        optim = rmsprop
    else:
        sgd = SGD(
            lr={{choice([10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1])}},
            decay={{choice([1e-2, 1e-3, 1e-4])}},
            clipnorm=1.)

        optim = sgd

    model.compile(loss='categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    globalvars.globalVar += 1

    filepath = "../output/weights_fcn_hyperas" + str(
        globalvars.globalVar) + ".hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    csv_logger = CSVLogger('../output/hyperas_test_log.csv',
                           append=True,
                           separator=';')

    hist = model.fit(X_train,
                     Y_train,
                     batch_size={{choice([64, 128])}},
                     nb_epoch=1,
                     verbose=2,
                     validation_data=(X_test, Y_test),
                     callbacks=[checkpoint, csv_logger])

    h1 = hist.history
    acc_ = numpy.asarray(h1['acc'])
    loss_ = numpy.asarray(h1['loss'])
    val_loss_ = numpy.asarray(h1['val_loss'])
    val_acc_ = numpy.asarray(h1['val_acc'])

    acc_and_loss = numpy.column_stack((acc_, loss_, val_acc_, val_loss_))
    save_file_mlp = '../output/mlp_run_' + '_' + str(
        globalvars.globalVar) + '.txt'
    with open(save_file_mlp, 'w') as f:
        numpy.savetxt(save_file_mlp, acc_and_loss, delimiter=" ")

    score, acc = model.evaluate(X_test, Y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #33
0
def fit_lstm(train_set, val_set):
    global n_eval

    n_eval += 1
    print(str(datetime.datetime.now()) + ": iteration number: " + str(n_eval))

    with open("hyper_opt_result.txt", 'a') as f:
        f.write(
            str(datetime.datetime.now()) + ": iteration number: " +
            str(n_eval) + "\n")

    cfg = {
        "optimizer": {
            "name": "SGD",
            "lr": 0.001,
            "momentum": 0.9,
            "decay": 0
        },
        "workers": 8,
        "use_multiprocessing": True,
        "n_epochs": 5
    }
    n_layer = {{choice([1, 2, 3, 4])}}
    layers = []

    layers.append({{choice([25, 50, 75, 100, 125, 150, 175, 200, 225, 250])}})
    if conditional(n_layer) >= 2:
        layers.append(
            {{choice([25, 50, 75, 100, 125, 150, 175, 200, 225, 250])}})
        if conditional(n_layer) >= 3:
            layers.append(
                {{choice([25, 50, 75, 100, 125, 150, 175, 200, 225, 250])}})
            if conditional(n_layer) >= 4:
                layers.append(
                    {{choice([25, 50, 75, 100, 125, 150, 175, 200, 225,
                              250])}})

    model = lstm.create_lstm(hidden_units=layers,
                             dropout={{uniform(0.05, 0.5)}},
                             bidirectional=True)

    n_params = model.count_params()
    print(n_params)
    print(space)

    with open("hyper_opt_result.txt", 'a') as f:
        f.write(str(n_params))
        for key in space.keys():
            f.write(str(key) + " " + str(space[key]) + " ")
            f.write('\n')

    optimizer = optimizers.SGD(lr=cfg["optimizer"]["lr"],
                               momentum=cfg["optimizer"]["momentum"],
                               decay=cfg["optimizer"]["decay"])

    model.compile(loss=config.LOSS,
                  metrics=config.METRICS,
                  optimizer=optimizer)

    result = model.fit_generator(
        train_set,
        epochs=cfg["n_epochs"],
        validation_data=val_set,
        workers=cfg["workers"],
        use_multiprocessing=cfg["use_multiprocessing"],
        shuffle=True,
        verbose=0)

    validation_loss = np.amin(result.history['val_loss'])

    csv_line = [n_eval, n_params, validation_loss]
    for key in space.keys():
        csv_line.append(space[key])
    with open(r'fit_b_lstm_log.csv', 'a') as f:
        writer = csv.writer(f)
        writer.writerow(csv_line)

    print('Best validation acc of epoch:', validation_loss)
    del model
    K.clear_session()
    return {'loss': validation_loss, 'status': STATUS_OK}