Esempio n. 1
0
def assemble_mlp(input_shape, output_shape, batch_size, nb_train_samples):
    """Assemble a simple MLP model.
    """
    inputs = Input(shape=input_shape)
    hidden = Dense(1024, activation='relu', name='dense1')(inputs)
    hidden = BatchNormalization(name='batchnorm1')(hidden)
    hidden = Dropout(rate=0.02, name='dropout1')(hidden)
    hidden = Dense(1024, activation='relu', name='dense2')(hidden)
    hidden = BatchNormalization(name='batchnorm2')(hidden)
    hidden = Dense(128, activation='relu', name='dense3')(hidden)
    hidden = BatchNormalization(name='batchnorm3')(hidden)
    hidden = Dense(16, activation='relu', name='dense4')(hidden)
    hidden = BatchNormalization(name='batchnorm4')(hidden)    
    hidden = Dense(2, activation='relu', name='dense5')(hidden)
    hidden = BatchNormalization(name='batchnorm5')(hidden)

    gp = GP(hyp={
                'lik': np.log(0.3),
                'mean': [],
                'cov': [[0.5], [1.0]],
            },
            inf='infGrid', dlik='dlikGrid',
            opt={'cg_maxit': 2000, 'cg_tol': 1e-6},
            mean='meanZero', cov='covSEiso',
            update_grid=1,
            grid_kwargs={'eq': 1, 'k': 70.},
            batch_size=batch_size,
            nb_train_samples=nb_train_samples)
    outputs = [gp(hidden)]
    return Model(inputs=inputs, outputs=outputs)
Esempio n. 2
0
def assemble_mlp(input_shape, output_shape, batch_size, nb_train_samples):
    """Assemble a simple MLP model.
    """

    inputs = Input(shape=input_shape)
    hidden = Dense(512, activation='relu', name='dense1')(inputs)
    hidden = Dropout(0.5)(hidden)
    hidden = Dense(512, activation='relu', name='dense2')(hidden)
    hidden = Dropout(0.5)(hidden)
    hidden = Dense(1, activation='linear')(hidden)
    gp = GP(hyp={
        'lik': np.log(0.3),
        'mean': [],
        'cov': [[0.5], [1.0]],
    },
            inf='infGrid',
            dlik='dlikGrid',
            opt={
                'cg_maxit': 20000,
                'cg_tol': 1e-4
            },
            mean='meanZero',
            cov='covSEiso',
            update_grid=1,
            grid_kwargs={
                'eq': 1,
                'k': 70.
            },
            batch_size=batch_size,
            nb_train_samples=nb_train_samples)
    outputs = [gp(hidden)]
    return Model(inputs=inputs, outputs=outputs)
Esempio n. 3
0
def assemble_mlp(input_shape, batch_size, nb_train_samples):
    """Assemble a simple MLP model.
    """
    inputs = Input(shape=input_shape)
    hidden = Dense(1024, activation='tanh', name='dense1')(inputs)
    hidden = Dropout(0.5)(hidden)
    hidden = Dense(512, activation='tanh', name='dense2')(hidden)
    hidden = Dropout(0.25)(hidden)
    hidden = Dense(64, activation='tanh', name='dense3')(hidden)
    hidden = Dropout(0.1)(hidden)
    hidden = Dense(2, activation='tanh', name='dense4')(hidden)

    gp = GP(hyp={
        'lik': np.log(0.3),
        'mean': np.zeros((2, 1)).tolist() + [[0]],
        'cov': initCovSM(4, 1),
    },
        inf='infGrid', dlik='dlikGrid',
        opt={'cg_maxit': 2000, 'cg_tol': 1e-6},
        mean='meanSum', cov='covSM',
        update_grid=1,
        grid_kwargs={'eq': 1, 'k': 10.},
        cov_args=[4],
        mean_args=['{@meanLinear, @meanConst}'],
        batch_size=batch_size,
        nb_train_samples=nb_train_samples)
    outputs = [gp(hidden)]
    return Model(inputs=inputs, outputs=outputs)
Esempio n. 4
0
def build_model(nb_outputs=2):
    inputs = Input(shape=input_shape)

    # Neural transformations
    lstm = LSTM(lstm_dim)(inputs)
    dense = Dense(dense_dim)(lstm)

    # GP outputs
    outputs = [GP(**gp_test_config)(dense) for _ in xrange(nb_outputs)]

    # Build the model
    model = Model(input=inputs, output=outputs)

    return model
Esempio n. 5
0
def assemble_gprnn(nn_params, gp_params):
    """Construct an GP-RNN/LSTM/GRU model of the form: X-[H1-H2-...-HN]-GP-Y
    """
    # Assemble RNN
    RNN = assemble_rnn(nn_params, final_reshape=False)

    # Inputs and RNN layer
    inputs = RNN.inputs
    rnn = RNN(inputs)

    # Output GP layers
    outputs = [
        GP(**gp_params['config'])(rnn) for _ in xrange(gp_params['nb_outputs'])
    ]

    return Model(inputs=inputs, outputs=outputs)
Esempio n. 6
0
def create_model(horizon=1, nb_train_samples=512, batch_size=32, feature_count=11):

    x = Input(shape=(6, feature_count), name="input_layer")
    conv = Conv1D(kernel_size=3, filters=5, activation='relu', dilation_rate=1)(x)
    conv2 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=2)(conv)
    conv3 = Conv1D(5, kernel_size=3, padding='causal', strides=1, activation='relu', dilation_rate=4)(conv2)
    mp = MaxPooling1D(pool_size=2)(conv3)
    # conv2 = Conv1D(filters=5, kernel_size=3, activation='relu')(mp)
    # mp = MaxPooling1D(pool_size=2)(conv2)

    lstm1 = GRU(16, return_sequences=True)(mp)
    lstm2 = GRU(32, return_sequences=True)(lstm1)

    shared_dense = Dense(64, name="shared_layer")(lstm2)
    shared_dense = Flatten()(shared_dense)
    sub1 = Dense(16, name="task1")(shared_dense)
    # sub2 = Dense(16, name="task2")(shared_dense)
    # sub3 = Dense(16, name="task3")(shared_dense)


    # sub1 = GRU(units=16, name="task1")(shared_dense)
    # sub2 = GRU(units=16, name="task2")(shared_dense)
    # sub3 = GRU(units=16, name="task3")(shared_dense)

    # out1_gp = Dense(1, name="out1_gp")(sub1)
    out1 = Dense(1, name="out1")(sub1)
    # out2 = Dense(1, name="out2")(sub2)
    # out3 = Dense(1, name="out3")(sub3)
    # Gaussian setting
    gp_hypers = {'lik': -2.0, 'cov': [[-0.7], [0.0]]}
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {'cg_maxit': 500, 'cg_tol': 1e-4},
        'grid_kwargs': {'eq': 1, 'k': 1e2},
        'update_grid': True,
    }
    gp1 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
    # gp2 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
    # gp3 = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)

    outputs = [gp1(out1)]

    model = Model(inputs=x, outputs=outputs)


    model.compile(optimizer='adam', loss='mse', metrics=['mae', 'mape', 'mse'])
    # Callbacks
    # callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    model.summary()

    return model
Esempio n. 7
0
def assemble_gpnarx(nn_params, gp_params):
    """Construct an GP-NARX model of the form: X-[H1-H2-...-HN]-GP-Y.
    """
    # Assemble NARX
    NARX = assemble_narx(nn_params, final_reshape=False)

    # Inputs and NARX layer
    inputs = NARX.inputs
    narx = NARX(inputs)

    # Output GP layers
    outputs = [
        GP(**gp_params['config'])(narx)
        for _ in xrange(gp_params['nb_outputs'])
    ]

    return Model(inputs=inputs, outputs=outputs)
Esempio n. 8
0
from keras.layers import Input, SimpleRNN
from keras.optimizers import Adam

from kgp.layers import GP
from kgp.models import Model
from kgp.losses import gen_gp_loss

input_shape = (10, 2)  # 10 time steps, 2 dimensions
batch_size = 32
nb_train_samples = 512
gp_hypers = {'lik': -2.0, 'cov': [[-0.7], [0.0]]}

# Build the model
inputs = Input(shape=input_shape)
rnn = SimpleRNN(32)(inputs)
gp = GP(gp_hypers, batch_size=batch_size, nb_train_samples=nb_train_samples)
outputs = [gp(rnn)]
model = Model(inputs=inputs, outputs=outputs)

# Compile the model
loss = [gen_gp_loss(gp) for gp in model.output_layers]
model.compile(optimizer=Adam(1e-2), loss=loss)

model.summary()