示例#1
0
def test_update_gp(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]
        X_val = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_val = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.gp_output_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Setup the callback
        update_gp_callback = UpdateGP((X_tr, Y_tr),
                                      val_ins=(X_val, Y_val),
                                      batch_size=batch_size)
        update_gp_callback._set_model(model)

        # Test the callback
        epoch_logs, batch_logs = {}, {}
        batch_logs['size'] = batch_size
        batch_logs['ids'] = np.arange(batch_size)
        update_gp_callback.on_epoch_begin(1, epoch_logs)
        update_gp_callback.on_batch_begin(1, batch_logs)
        update_gp_callback.on_epoch_end(1, epoch_logs)

        assert 'gp_update_elapsed' in epoch_logs
        assert 'val_nlml' in epoch_logs
        assert 'val_mse' in epoch_logs
示例#2
0
def test_compile():
    model = build_model()

    # Generate losses for GP outputs
    loss = [gen_gp_loss(gp) for gp in model.gp_output_layers]

    # Compile the model
    model.compile(optimizer=optimizer, loss=loss)
示例#3
0
def test_evaluate(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_ts = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_ts = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.gp_output_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Evaluate the model
        nlml = model.evaluate(X_ts, Y_ts, batch_size=batch_size, verbose=0)
示例#4
0
def test_finetune(gp_n_iter=10, seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.gp_output_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Finetune the model
        model.finetune(X_tr, Y_tr,
                       batch_size=batch_size,
                       gp_n_iter=gp_n_iter,
                       verbose=0)
示例#5
0
def test_predict(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]
        X_ts = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_ts = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.gp_output_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Predict
        Y_pr = model.predict(X_ts, X_tr, Y_tr,
                             batch_size=batch_size, verbose=0)
        assert type(Y_pr) is list
        assert len(Y_pr) == len(Y_ts)
        assert np.all([(yp.shape == yt.shape) for yp, yt in zip(Y_pr, Y_ts)])
示例#6
0
def test_assemble_gpgru():
    for gp_type in ['GP', 'MSGP']:
        model = assemble('GP-GRU', [gru_configs['1H'], gp_configs[gp_type]])
        loss = [gen_gp_loss(gp) for gp in model.output_layers]
        model.compile(optimizer=optimizer, loss=loss)
        assert model.built
示例#7
0
def main():
    # Load data
    X_train, y_train = load_data('actuator', stop=45.)
    X_valid, y_valid = load_data('actuator', start=45., stop=55.)
    X_test, y_test = load_data('actuator', start=55.)
    data = {
        'train': (X_train, y_train),
        'valid': (X_valid, y_valid),
        'test': (X_test, y_test),
    }

    data = preprocess_data(data,
                           standardize=True,
                           multiple_outputs=True,
                           t_lag=10,
                           t_future_shift=1,
                           t_future_steps=1,
                           t_sw_step=1)

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = data['train'][0].shape[1:]
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1, )
    batch_size = 128
    nb_epoch = 5

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 H_dim=16,
                                 H_activation='tanh',
                                 dropout=0.1)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 cov='SEiso',
                                 hyp_lik=-2.0,
                                 hyp_cov=[[-0.7], [0.0]])

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model,
                    data,
                    callbacks=callbacks,
                    gp_n_iter=5,
                    checkpoint='lstm',
                    checkpoint_monitor='val_mse',
                    nb_epoch=nb_epoch,
                    batch_size=batch_size,
                    verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)