コード例 #1
0
def main():
    # Load data
    X_train, y_train = load_data('actuator', stop=45.)
    X_test, y_test = load_data('actuator', start=45., stop=90.)
    X_valid, y_valid = load_data('actuator', start=90.)
    data = {
        'train': (X_train, y_train),
        'valid': (X_valid, y_valid),
        'test': (X_test, y_test),
    }

    data = preprocess_data(data,
                           standardize=True,
                           t_lag=10,
                           t_future_shift=1,
                           t_future_steps=1,
                           t_sw_step=1,
                           seq_restart=False)

    # Model & training parameters
    input_shape = data['train'][0].shape[1:]
    output_shape = data['train'][1].shape[1:]
    batch_size = 16
    nb_epoch = 3

    # Retrieve model config
    configs = load_NN_configs(filename='lstm.yaml',
                              input_shape=input_shape,
                              output_shape=output_shape,
                              H_dim=128, H_activation='tanh',
                              dropout=0.1)

    # Construct & compile the model
    model = assemble('LSTM', configs['1H'])
    model.compile(optimizer=Adam(1e-1), loss='mse')

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_loss', patience=10)]

    # Train the model
    history = train(model, data, callbacks=callbacks,
                    checkpoint='lstm', checkpoint_monitor='val_loss',
                    nb_epoch=nb_epoch, batch_size=batch_size, verbose=2)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test RMSE:', rmse_predict)
コード例 #2
0
def main():
    # Load data
    X, y = load_data('actuator', use_targets=False)
    X_seq, y_seq = data_to_seq(X, y,
        t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1)

    # Split
    train_end = int((45. / 100.) * len(X_seq))
    test_end = int((90. / 100.) * len(X_seq))
    X_train, y_train = X_seq[:train_end], y_seq[:train_end]
    X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end]
    X_valid, y_valid = X_seq[test_end:], y_seq[test_end:]

    data = {
        'train': [X_train, y_train],
        'valid': [X_valid, y_valid],
        'test': [X_test, y_test],
    }

    # Re-format targets
    for set_name in data:
        y = data[set_name][1]
        y = y.reshape((-1, 1, np.prod(y.shape[1:])))
        data[set_name][1] = [y[:,:,i] for i in xrange(y.shape[2])]

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = list(data['train'][0].shape[1:])
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1,)
    batch_size = 128
    epochs = 100

    nn_params = {
        'H_dim': 16,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {},
    }

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])
    loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model, data, callbacks=callbacks, gp_n_iter=5,
                    checkpoint='gp_lstm_actuator', checkpoint_monitor='val_mse',
                    epochs=epochs, batch_size=batch_size, verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)
コード例 #3
0
ファイル: gp_lstm_actuator.py プロジェクト: vyraun/kgp
def main():
    # Load data
    X_train, y_train = load_data('actuator', stop=45.)
    X_valid, y_valid = load_data('actuator', start=45., stop=55.)
    X_test, y_test = load_data('actuator', start=55.)
    data = {
        'train': (X_train, y_train),
        'valid': (X_valid, y_valid),
        'test': (X_test, y_test),
    }

    data = preprocess_data(data,
                           standardize=True,
                           multiple_outputs=True,
                           t_lag=10,
                           t_future_shift=1,
                           t_future_steps=1,
                           t_sw_step=1)

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = data['train'][0].shape[1:]
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1, )
    batch_size = 128
    nb_epoch = 5

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 H_dim=16,
                                 H_activation='tanh',
                                 dropout=0.1)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 cov='SEiso',
                                 hyp_lik=-2.0,
                                 hyp_cov=[[-0.7], [0.0]])

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model,
                    data,
                    callbacks=callbacks,
                    gp_n_iter=5,
                    checkpoint='lstm',
                    checkpoint_monitor='val_mse',
                    nb_epoch=nb_epoch,
                    batch_size=batch_size,
                    verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)
コード例 #4
0
def main():
    # Load data
    X, y = load_data('actuator', use_targets=False)
    X_seq, y_seq = data_to_seq(X,
                               y,
                               t_lag=32,
                               t_future_shift=1,
                               t_future_steps=1,
                               t_sw_step=1)

    # Split
    train_end = int((45. / 100.) * len(X_seq))
    test_end = int((90. / 100.) * len(X_seq))
    X_train, y_train = X_seq[:train_end], y_seq[:train_end]
    X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end]
    X_valid, y_valid = X_seq[test_end:], y_seq[test_end:]

    data = {
        'train': [X_train, y_train],
        'valid': [X_valid, y_valid],
        'test': [X_test, y_test],
    }

    # Model & training parameters
    input_shape = list(data['train'][0].shape[1:])
    output_shape = list(data['train'][1].shape[1:])
    batch_size = 16
    epochs = 100

    nn_params = {
        'H_dim': 32,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }

    # Retrieve model config
    configs = load_NN_configs(filename='lstm.yaml',
                              input_shape=input_shape,
                              output_shape=output_shape,
                              params=nn_params)

    # Construct & compile the model
    model = assemble('LSTM', configs['1H'])
    model.compile(optimizer=Adam(1e-1), loss='mse')

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_loss', patience=10)]

    # Train the model
    history = train(model,
                    data,
                    callbacks=callbacks,
                    checkpoint='lstm_actuator',
                    checkpoint_monitor='val_loss',
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=2)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test RMSE:', rmse_predict)
コード例 #5
0
def main():
    # Load data
    X, y = load_data('actuator', use_targets=False)
    X_seq, y_seq = data_to_seq(X,
                               y,
                               t_lag=32,
                               t_future_shift=1,
                               t_future_steps=1,
                               t_sw_step=1)

    # Split
    train_end = int((45. / 100.) * len(X_seq))
    test_end = int((90. / 100.) * len(X_seq))
    X_train, y_train = X_seq[:train_end], y_seq[:train_end]
    X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end]
    X_valid, y_valid = X_seq[test_end:], y_seq[test_end:]

    data = {
        'train': [X_train, y_train],
        'valid': [X_valid, y_valid],
        'test': [X_test, y_test],
    }

    # Re-format targets
    for set_name in data:
        y = data[set_name][1]
        y = y.reshape((-1, 1, np.prod(y.shape[1:])))
        data[set_name][1] = [y[:, :, i] for i in xrange(y.shape[2])]

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = data['train'][0].shape[1:]
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1, )
    batch_size = 128
    epochs = 5

    nn_params = {
        'H_dim': 16,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {
            'cg_maxit': 500,
            'cg_tol': 1e-4
        },
        'grid_kwargs': {
            'eq': 1,
            'k': 1e2
        },
        'update_grid': False,  # when using manual grid, turn off grid updates
    }

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Specify manual grid for MSGP (100 equidistant points per input dimension).
    # Note: each np.ndarray in the xg must be a column vector.
    gp_configs['MSGP']['config']['grid_kwargs']['xg'] = (
        gp_input_shape[0] * [np.linspace(-1.0, 1.0, 100)[:, None]])

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['MSGP']])
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model,
                    data,
                    callbacks=callbacks,
                    gp_n_iter=5,
                    checkpoint='lstm',
                    checkpoint_monitor='val_mse',
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)