예제 #1
0
def build_lstmgpt(input_shape, gp_input_shape, nb_outputs, batch_size,
                  nb_train_samples):
    nn_params = {
        'H_dim': 16,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }
    gp_params = {
        'cov': 'RQiso',
        'hyp_lik': -1.0,
        'hyp_cov': [[1.], [1.], [0.0]],
        'opt': {},
    }

    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['2H'], gp_configs['GP']])
    loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    return model
예제 #2
0
def test_update_gp(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]
        X_val = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_val = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Setup the callback
        update_gp_callback = UpdateGP((X_tr, Y_tr),
                                      val_ins=(X_val, Y_val),
                                      batch_size=batch_size)
        update_gp_callback.set_model(model)

        # Test the callback
        epoch_logs, batch_logs = {}, {}
        batch_logs['size'] = batch_size
        batch_logs['ids'] = np.arange(batch_size)
        update_gp_callback.on_epoch_begin(1, epoch_logs)
        update_gp_callback.on_batch_begin(1, batch_logs)
        update_gp_callback.on_epoch_end(1, epoch_logs)

        assert 'gp_update_elapsed' in epoch_logs
        assert 'val_nlml' in epoch_logs
        assert 'val_mse' in epoch_logs
예제 #3
0
def main():
    # Load data

    dataset = np.loadtxt("kin40ktraindata.txt", delimiter=",")

    X_train = dataset[:, 0:8]

    y_train = dataset[:, 8]

    dataset = np.loadtxt("kin40ktestdata.txt", delimiter=",")

    X_test = dataset[:, 0:8]

    y_test = dataset[:, 8]

    X_valid, y_valid = X_test, y_test

    X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid)

    data = {
        'train': (X_train, y_train),
        'valid': (X_valid, y_valid),
        'test': (X_test, y_test),
    }

    # Model & training parameters
    input_shape = data['train'][0].shape[1:]
    output_shape = data['train'][1].shape[1:]
    batch_size = 128
    epochs = 100

    # Construct & compile the model
    model = assemble_mlp(input_shape,
                         output_shape,
                         batch_size,
                         nb_train_samples=len(X_train))
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-4), loss=loss)

    # Load saved weights (if exist)
    #if os.path.isfile('checkpoints/msgp_mlp_kin40k.h5'):
    #    model.load_weights('checkpoints/msgp_mlp_kin40k.h5', by_name=True)

    # Train the model
    history = train(model,
                    data,
                    callbacks=[],
                    gp_n_iter=5,
                    checkpoint='msgp_mlp_kin40k',
                    checkpoint_monitor='val_loss',
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=1)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test RMSE:', rmse_predict)
예제 #4
0
파일: test_models.py 프로젝트: robi56/kgp
def test_compile():
    model = build_model()

    # Generate losses for GP outputs
    loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]

    # Compile the model
    model.compile(optimizer=optimizer, loss=loss)
def GPLSTM(shift, lr, batch_size, data):
    '''
    Parameters
    ----------
    shift : Integer
        Shift/steps into the future of predicted value.
    lr : Float
        Learning Rate for model training.
    batch_size : Integer
        Batch size for training.
    data : Dictionnary
        Dictionnary containing the training, test and validation data of the model.

    Returns
    -------
    model : Optimized Model
        Returns the optimized deep learning model.
    '''
    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = data['train'][0].shape[1:]
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1, )
    batch_size = batch_size

    nn_params = {
        'H_dim': 4,
        'H_activation': 'tanh',
        'dropout': 0.0,
    }

    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': np.log(0.1),
        'hyp_cov': [[1.0], [1.0]],
    }

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])  #MSGP
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(lr), loss=loss)

    return model
예제 #6
0
파일: test_models.py 프로젝트: robi56/kgp
def test_evaluate(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_ts = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_ts = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Evaluate the model
        nlml = model.evaluate(X_ts, Y_ts, batch_size=batch_size, verbose=0)
예제 #7
0
파일: test_models.py 프로젝트: robi56/kgp
def test_fit(epochs=10, seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Train the model
        model.fit(X_tr, Y_tr, epochs=epochs, batch_size=batch_size, verbose=2)
예제 #8
0
def prep_gp_bbox(sess, x, y, X_train, Y_train, X_test, Y_test,
              nb_epochs, batch_size, learning_rate,
              rng):
    """
    Define and train a model that simulates the "remote"
    black-box oracle described in the original paper.
    :param sess: the TF session
    :param x: the input placeholder for MNIST
    :param y: the ouput placeholder for MNIST
    :param X_train: the training data for the oracle
    :param Y_train: the training labels for the oracle
    :param X_test: the testing data for the oracle
    :param Y_test: the testing labels for the oracle
    :param nb_epochs: number of epochs to train model
    :param batch_size: size of training batches
    :param learning_rate: learning rate for training
    :param rng: numpy.random.RandomState
    :return:
    """

    # Define TF model graph (for the black-box model)
    model = gp_model()
    predictions = model(x)
    log_raw.info("Defined TensorFlow model graph.")

    # Train an MNIST model
    train_params = {
        'nb_epochs': nb_epochs,
        'batch_size': batch_size,
        'learning_rate': learning_rate
    }
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)
    model_train(sess, x, y, predictions, X_train, Y_train, verbose=False,
                args=train_params, rng=rng)

    # Print out the accuracy on legitimate data
    eval_params = {'batch_size': batch_size}
    accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
                          args=eval_params)
    log_raw.info('Test accuracy of black-box on legitimate test '
          'examples: ' + str(accuracy))

    return model, predictions, accuracy
예제 #9
0
파일: test_models.py 프로젝트: robi56/kgp
def test_predict(seed=42):
    rng = np.random.RandomState(seed)

    for nb_outputs in [1, 2]:
        # Generate dummy data
        X_tr = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_tr = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]
        X_ts = rng.normal(size=(N, input_shape[0], input_shape[1]))
        Y_ts = [rng.normal(size=(N, 1)) for _ in xrange(nb_outputs)]

        # Build & compile the model
        model = build_model(nb_outputs)
        loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
        model.compile(optimizer=optimizer, loss=loss)

        # Predict
        Y_pr = model.predict(X_ts,
                             X_tr,
                             Y_tr,
                             batch_size=batch_size,
                             verbose=0)
        assert type(Y_pr) is list
        assert len(Y_pr) == len(Y_ts)
        assert np.all([(yp.shape == yt.shape) for yp, yt in zip(Y_pr, Y_ts)])
예제 #10
0
def main():
    # Load data
    X, y = load_data('actuator', use_targets=False)
    X_seq, y_seq = data_to_seq(X, y,
        t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1)

    # Split
    train_end = int((45. / 100.) * len(X_seq))
    test_end = int((90. / 100.) * len(X_seq))
    X_train, y_train = X_seq[:train_end], y_seq[:train_end]
    X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end]
    X_valid, y_valid = X_seq[test_end:], y_seq[test_end:]

    data = {
        'train': [X_train, y_train],
        'valid': [X_valid, y_valid],
        'test': [X_test, y_test],
    }

    # Re-format targets
    for set_name in data:
        y = data[set_name][1]
        y = y.reshape((-1, 1, np.prod(y.shape[1:])))
        data[set_name][1] = [y[:,:,i] for i in xrange(y.shape[2])]

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = list(data['train'][0].shape[1:])
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1,)
    batch_size = 128
    epochs = 100

    nn_params = {
        'H_dim': 16,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {},
    }

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])
    loss = [gen_gp_loss(gp) for gp in model.output_gp_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model, data, callbacks=callbacks, gp_n_iter=5,
                    checkpoint='gp_lstm_actuator', checkpoint_monitor='val_mse',
                    epochs=epochs, batch_size=batch_size, verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)
예제 #11
0
    print('Loaded Data...')
    print('  train:', data['train'][0].shape)
    print('  valid:', data['valid'][0].shape)
    print('  test:',  data['test'][0].shape)

    # Model & training parameters
    input_shape = data['train'][0].shape[1:]
    output_shape = data['train'][1].shape[1:]
    batch_size = 2**10
    epochs = 5000

    # Construct & compile the model
    model = assemble_mlp(input_shape, output_shape, batch_size,
                         nb_train_samples=len(X_train))
    opt = Adam(lr=1e-4)
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=opt, loss=loss)

    # Load saved weights (if exist)
    # if os.path.isfile('checkpoints/msgp_mlp.h5'):
    #     model.load_weights('checkpoints/msgp_mlp.h5', by_name=True)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=50)]

    # Train the model
    history = train(model, data, gp_n_iter=5,
                    epochs=epochs, batch_size=batch_size, 
                    callbacks=callbacks, tensorboard=True,
                    checkpoint='msgp_mlp', checkpoint_monitor='val_mse', 
                    verbose=1)
예제 #12
0
 def opt_regressor(self):
     self.model = assemble('GP-LSTM',
                           [self.nn_configs['2H'], self.gp_configs['GP']])
     loss = [gen_gp_loss(gp) for gp in self.model.output_gp_layers]
     self.model.compile(optimizer=Adam(1e-2), loss=loss)
     return
예제 #13
0
    def build_train_GPLSTM(self):
        '''
        Define GP-LSTM Architecture and Training.
    
        Returns
        -------
        history : Dictionnary
            Training Information.
        y_test : Numpy Array
            Input Test Data.
        y_pred : Numpy Array
            Predicted output.
        var : Numpy Array
            Predicted Variances.
        rmse_predict : Float
            Training Metrics.
        model : Optimized Model
            Optimized Deep Learning Model after Training.
        data : Dictionnary
            Train, test and validation sets.
        '''   
        data=self.data
        
        # Model & training parameters
        nb_train_samples = data['train'][0].shape[0]
        input_shape = data['train'][0].shape[1:]
        nb_outputs = len(data['train'][1])
        gp_input_shape = (1,)

    
        nn_params = {
            'H_dim': self.hdim,
            'H_activation': 'tanh',
            'dropout': 0.0,
        }
    
        gp_params = {
            'cov': 'SEiso', 
            'hyp_lik': np.log(0.3),
            'hyp_cov': [[4.0], [0.1]],
            
            'opt': {'cg_maxit': 2000,'cg_tol': 1e-4,#'deg':3,
                    'pred_var':-100,
                                    
                    },    
        }
        
        # Retrieve model config
        nn_configs = load_NN_configs(filename='lstm.yaml',
                                     input_shape=input_shape,
                                     output_shape=gp_input_shape,
                                     params=nn_params)
        gp_configs = load_GP_configs(filename='gp.yaml',
                                     nb_outputs=nb_outputs,
                                     batch_size=self.batch_size,
                                     nb_train_samples=nb_train_samples,
                                     params=gp_params)
    
        # Construct & compile the model
        model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']])
        loss = [gen_gp_loss(gp) for gp in model.output_layers]
        model.compile(optimizer=Adam(1e-5), loss=loss)
    
        # Callbacks
        callbacks = [EarlyStopping(monitor='val_mse', patience=2000)]
    
        # Train the model
        history = train(model, data, callbacks=callbacks, gp_n_iter=5,
                        checkpoint='checkpL3_predmode_'+str(pred_mode)+'_test_'+str(test), checkpoint_monitor='val_mse',
                        epochs=self.epochs, batch_size=self.batch_size, verbose=1)
        
        # Finetune the model
        model.finetune(*data['train'],
                       batch_size=self.batch_size,
                       gp_n_iter=100,
                       verbose=0)
          
        # Test the model
        X_test, y_test = data['test']
        X_train,y_train=data['train']
        
        y_pred,var = model.predict(X_test,return_var=True, X_tr=X_train, Y_tr=y_train,batch_size=self.batch_size)
        var=np.array(var)
        rmse_predict = RMSE(y_test, y_pred)
        print('Test predict RMSE:', rmse_predict)
        print('mean variance:', var.mean())
           
        return history,y_test,y_pred,var,rmse_predict,model,data
def main():
    # Load data
    X, y = load_data('actuator', use_targets=False)
    X_seq, y_seq = data_to_seq(X,
                               y,
                               t_lag=32,
                               t_future_shift=1,
                               t_future_steps=1,
                               t_sw_step=1)

    # Split
    train_end = int((45. / 100.) * len(X_seq))
    test_end = int((90. / 100.) * len(X_seq))
    X_train, y_train = X_seq[:train_end], y_seq[:train_end]
    X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end]
    X_valid, y_valid = X_seq[test_end:], y_seq[test_end:]

    data = {
        'train': [X_train, y_train],
        'valid': [X_valid, y_valid],
        'test': [X_test, y_test],
    }

    # Re-format targets
    for set_name in data:
        y = data[set_name][1]
        y = y.reshape((-1, 1, np.prod(y.shape[1:])))
        data[set_name][1] = [y[:, :, i] for i in xrange(y.shape[2])]

    # Model & training parameters
    nb_train_samples = data['train'][0].shape[0]
    input_shape = data['train'][0].shape[1:]
    nb_outputs = len(data['train'][1])
    gp_input_shape = (1, )
    batch_size = 128
    epochs = 5

    nn_params = {
        'H_dim': 16,
        'H_activation': 'tanh',
        'dropout': 0.1,
    }
    gp_params = {
        'cov': 'SEiso',
        'hyp_lik': -2.0,
        'hyp_cov': [[-0.7], [0.0]],
        'opt': {
            'cg_maxit': 500,
            'cg_tol': 1e-4
        },
        'grid_kwargs': {
            'eq': 1,
            'k': 1e2
        },
        'update_grid': False,  # when using manual grid, turn off grid updates
    }

    # Retrieve model config
    nn_configs = load_NN_configs(filename='lstm.yaml',
                                 input_shape=input_shape,
                                 output_shape=gp_input_shape,
                                 params=nn_params)
    gp_configs = load_GP_configs(filename='gp.yaml',
                                 nb_outputs=nb_outputs,
                                 batch_size=batch_size,
                                 nb_train_samples=nb_train_samples,
                                 params=gp_params)

    # Specify manual grid for MSGP (100 equidistant points per input dimension).
    # Note: each np.ndarray in the xg must be a column vector.
    gp_configs['MSGP']['config']['grid_kwargs']['xg'] = (
        gp_input_shape[0] * [np.linspace(-1.0, 1.0, 100)[:, None]])

    # Construct & compile the model
    model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['MSGP']])
    loss = [gen_gp_loss(gp) for gp in model.output_layers]
    model.compile(optimizer=Adam(1e-2), loss=loss)

    # Callbacks
    callbacks = [EarlyStopping(monitor='val_mse', patience=10)]

    # Train the model
    history = train(model,
                    data,
                    callbacks=callbacks,
                    gp_n_iter=5,
                    checkpoint='lstm',
                    checkpoint_monitor='val_mse',
                    epochs=epochs,
                    batch_size=batch_size,
                    verbose=2)

    # Finetune the model
    model.finetune(*data['train'],
                   batch_size=batch_size,
                   gp_n_iter=100,
                   verbose=0)

    # Test the model
    X_test, y_test = data['test']
    y_preds = model.predict(X_test)
    rmse_predict = RMSE(y_test, y_preds)
    print('Test predict RMSE:', rmse_predict)
예제 #15
0
def test_assemble_gpgru():
    for gp_type in ['GP', 'MSGP']:
        model = assemble('GP-GRU', [gru_configs['1H'], gp_configs[gp_type]])
        loss = [gen_gp_loss(gp) for gp in model.output_layers]
        model.compile(optimizer=optimizer, loss=loss)
        assert model.built