def train_model(data_path,
                model_path,
                norm_path,
                test_size=0.05,
                shuffle=True,
                lr=0.003,
                minibatch_size=2048,
                epochs=30,
                lambd=0.001,
                testing=False,
                loading=False,
                plot_start=1,
                plot_end=5000):
    """
    Description
       ---
          Trains a normalized (min-max) linear regression model, given the data from data_path.  Model will be saved
          to model_path.  Advanced settings are set above.


    Inputs
       ---
              data_path: Path for the process data.  First column should be labels
             model_path: Path for the model saving.
              norm_path: Path for the normalization object.
              test_size: Size
                shuffle: Boolean, shuffle the data for training?  Breaks time correlation of data
                     lr: Learning rate of the model, higher learning rate results in faster, more unstable learning.
         minibatch_size: Size of batches for stochastic / minibatch gradient descent
                 epochs: Number of passes through the whole data
                  lambd: Regularization term
                testing: Training or testing?
                loading: If you want to load an old model for further training
             plot_start: Index for the start of the validation plot
               plot_end: Index for the end of the validation plot


    Returns
       ---
               raw_data: Data used for model building
          heading_names: Headings of the raw data
             linear_reg: Linear regression object
         weights_biases: Weights and biases of the model

    """

    raw_data = pd.read_csv(data_path)

    heading_names = list(raw_data)
    raw_data = raw_data.values

    print('There are {} feature(s) and {} label(s) with {} examples.'.format(
        raw_data.shape[1] - 1, 1, raw_data.shape[0]))

    # Train / Test split
    train_x, test_x, train_y, test_y = train_test_split(raw_data[:, 1:],
                                                        raw_data[:, 0],
                                                        test_size=test_size,
                                                        shuffle=shuffle,
                                                        random_state=42)

    # Reshape for TensorFlow
    train_x = train_x.reshape(-1, raw_data.shape[1] - 1)
    test_x = test_x.reshape(-1, raw_data.shape[1] - 1)

    train_y = train_y.reshape(-1, 1)
    test_y = test_y.reshape(-1, 1)

    # Normalization
    if testing:
        min_max_normalization = load(norm_path)

    else:
        min_max_normalization = MinMaxNormalization(
            np.concatenate([train_y, train_x], axis=1))

    training_data = min_max_normalization(
        np.concatenate([train_y, train_x], axis=1))
    testing_data = min_max_normalization(
        np.concatenate([test_y, test_x], axis=1))

    # Reshape for TensorFlow
    train_x = training_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)
    test_x = testing_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)

    train_y = training_data[:, 0].reshape(-1, 1)
    test_y = testing_data[:, 0].reshape(-1, 1)

    # Test cases for NaN values
    assert (not np.isnan(train_x).any())
    assert (not np.isnan(test_x).any())

    assert (not np.isnan(train_y).any())
    assert (not np.isnan(test_y).any())

    with tf.Session() as sess:

        # Build linear regression object
        linear_reg = LinearRegression(sess,
                                      train_x,
                                      train_y,
                                      test_x,
                                      test_y,
                                      lr=lr,
                                      minibatch_size=minibatch_size,
                                      train_size=(1 - test_size),
                                      epochs=epochs,
                                      lambd=lambd)

        # If testing, just run it
        if testing:
            # Restore model
            linear_reg.saver.restore(sess, save_path=model_path)

            # Pred testing values
            pred = linear_reg.test(test_x)

            # Unnormalize
            pred = min_max_normalization.unnormalize_y(pred)
            test_y = min_max_normalization.unnormalize_y(test_y)

            # Evaluate loss
            rmse, mae = linear_reg.eval_loss(pred, test_y)

            print('Test RMSE: {:2f} | Test MAE: {:2f}'.format(rmse, mae))

            weights_biases = linear_reg.weights_and_biases()

            # Non-scrambled data plot
            seq_pred(session=sess,
                     model=linear_reg.z,
                     features=linear_reg.X,
                     normalizer=min_max_normalization,
                     data=raw_data,
                     time_start=plot_start,
                     time_end=plot_end,
                     adv_plot=False)

        else:

            # Load old model for further testing
            if loading:
                linear_reg.saver.restore(sess, Model_path)

            else:
                # Global variables initializer
                sess.run(linear_reg.init)

            for epoch in range(linear_reg.epochs):

                for i in range(linear_reg.total_batch_number):

                    # Mini-batch gradient descent
                    batch_index = i * linear_reg.minibatch_size
                    minibatch_x = train_x[batch_index:batch_index +
                                          linear_reg.minibatch_size, :]
                    minibatch_y = train_y[batch_index:batch_index +
                                          linear_reg.minibatch_size, :]

                    # Optimize machine learning model
                    linear_reg.train(features=minibatch_x, labels=minibatch_y)

                    # Record loss
                    if i % 10 == 0:
                        _ = linear_reg.loss_check(features=train_x,
                                                  labels=train_y)

                    # Evaluate train and test losses
                    if i % 150 == 0:
                        current_loss = linear_reg.loss_check(features=train_x,
                                                             labels=train_y)

                        train_pred = linear_reg.test(features=train_x)

                        # Unnormalize data
                        train_pred = min_max_normalization.unnormalize_y(
                            train_pred)
                        actual_y = min_max_normalization.unnormalize_y(train_y)

                        # Evaluate error
                        train_rmse, train_mae = linear_reg.eval_loss(
                            train_pred, actual_y)

                        test_pred = linear_reg.test(features=test_x)

                        # Unnormalize data
                        test_pred = min_max_normalization.unnormalize_y(
                            test_pred)
                        actual_y = min_max_normalization.unnormalize_y(test_y)

                        test_rmse, test_mae = linear_reg.eval_loss(
                            test_pred, actual_y)

                        print(
                            'Epoch: {} | Loss: {:2f} | Train RMSE: {:2f} | Test RMSE: {:2f}'
                            .format(epoch, current_loss, train_rmse,
                                    test_rmse))

            # Save model
            linear_reg.saver.save(sess, model_path)
            print("Model saved at: {}".format(model_path))

            # Save normalizer
            save(min_max_normalization, norm_path)
            print("Normalization saved at: {}".format(norm_path))

            # Final test
            test_pred = linear_reg.test(features=test_x)

            # Unnormalize data
            test_pred = min_max_normalization.unnormalize_y(test_pred)
            actual_y = min_max_normalization.unnormalize_y(test_y)

            test_rmse, test_mae = linear_reg.eval_loss(test_pred, actual_y)
            print('Final Test Results:  Test RMSE: {:2f} | Test MAE: {:2f}'.
                  format(test_rmse, test_mae))

            weights_biases = linear_reg.weights_and_biases()

            # Non-scrambled data plot
            seq_pred(session=sess,
                     model=linear_reg.z,
                     features=linear_reg.X,
                     normalizer=min_max_normalization,
                     data=raw_data,
                     time_start=plot_start,
                     time_end=plot_end,
                     adv_plot=False)

    return raw_data, heading_names, linear_reg, weights_biases, min_max_normalization
예제 #2
0
def train_model(data_path, model_path, norm_path, test_size=0.05, shuffle=True, lr=0.003, minibatch_size=2048,
                epochs=30, lambd=0.001, h1_nodes=20, h2_nodes=20, h3_nodes=20,
                testing=True, loading=False):

    """
    Description
       ---
          Trains a normalized (min-max) three-layer feedforward neural network model, given the data from data_path.
          Model will be saved to model_path.  Advanced settings are set above.


    Inputs
       ---
              data_path: Path for the process data.  First column should be labels
             model_path: Path for the model saving.
              norm_path: Path for the normalization object.
              test_size: Size of testing data set
                shuffle: Boolean, shuffle the data for training?  Breaks time correlation of data
                     lr: Learning rate of the model, higher learning rate results in faster, more unstable learning.
         minibatch_size: Size of batches for stochastic / minibatch gradient descent
                 epochs: Number of passes through the whole data
                  lambd: Regularization term
               h1_nodes: Amount of neurons in 1st hidden layer
               h2_nodes: Amount of neurons in 2nd hidden layer
               h3_nodes: Amount of neurons in 3rd hidden layer
                testing: Training or testing?
                loading: True if you want to load an old model for further training


    Returns
       ---
               raw_data: Data used for model building
          heading_names: Headings of the raw data
             linear_reg: Linear regression object

    """

    # Load data
    raw_data = pd.read_csv(data_path)

    # Get heading tags / names, then transform into NumPy array
    heading_names = list(raw_data)
    raw_data = raw_data.values

    print('There are {} feature(s) and {} label(s) with {} examples.'.format(raw_data.shape[1] - 1, 1,
                                                                             raw_data.shape[0]))

    # Train / test split
    train_x, test_x, train_y, test_y = train_test_split(raw_data[:, 1:], raw_data[:, 0], test_size=test_size,
                                                        shuffle=shuffle, random_state=42)

    train_x = train_x.reshape(-1, raw_data.shape[1] - 1)
    test_x = test_x.reshape(-1, raw_data.shape[1] - 1)

    train_y = train_y.reshape(-1, 1)
    test_y = test_y.reshape(-1, 1)

    # Normalization
    if testing:
        min_max_normalization = load(norm_path)

    else:
        min_max_normalization = MinMaxNormalization(np.concatenate([train_y, train_x], axis=1))

    training_data = min_max_normalization(np.concatenate([train_y, train_x], axis=1))
    testing_data = min_max_normalization(np.concatenate([test_y, test_x], axis=1))

    train_x = training_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)
    test_x = testing_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)

    train_y = training_data[:, 0].reshape(-1, 1)
    test_y = testing_data[:, 0].reshape(-1, 1)

    # Test cases for NaN cases
    assert(not np.isnan(train_x).any())
    assert(not np.isnan(test_x).any())

    assert(not np.isnan(train_y).any())
    assert(not np.isnan(test_y).any())

    with tf.Session() as sess:

        # Build the neural network object
        nn = NeuralNetwork(sess, train_x, train_y, test_x, test_y, lr=lr, minibatch_size=minibatch_size,
                           train_size=1 - test_size, h1_nodes=h1_nodes, h2_nodes=h2_nodes, h3_nodes=h3_nodes,
                           epochs=epochs, lambd=lambd)

        # If testing, just run the model
        if testing:

            # Restore model
            nn.saver.restore(sess, model_path)

            # Predict based on model
            pred = nn.test(features=test_x, training=not testing)

            # Unnormalize
            pred = min_max_normalization.unnormalize_y(pred)
            test_y = min_max_normalization.unnormalize_y(test_y)

            # Evaluate loss
            rmse, mae = nn.eval_loss(pred, test_y)
            r2 = r_squared(pred, test_y)
            print('Final Test Results:  Test RMSE: {:2f} | Test MAE: {:2f} | R2: {:2f}'.format(rmse, mae, r2))

            plt.plot(pred[:-4900], label='Predictions')
            plt.plot(test_y[:-4900], label='Actual')

            plt.xlabel('Time, t (min)')
            plt.ylabel('Flow Rate, Q (bbl/h)')

            plt.legend(loc='0', frameon=False)

            # plt.savefig('08smallnn_valid.eps', format='eps', dpi=1000)

            plt.show()

            """
            Residual Analysis
            """
            residuals = pred - test_y
            plt.plot(residuals)
            plt.ylim([-50, 50])
            plt.show()

            bad_residuals = test_y[0:-1] - test_y[1:]
            plt.plot(bad_residuals)
            plt.ylim([-50, 50])
            plt.show()

        else:

            # If loading old model for continued training
            if loading:
                nn.saver.restore(sess, Model_path)

            else:
                # Initialize global variables
                sess.run(nn.init)

            for epoch in range(epochs):

                for i in range(nn.total_batch_number):

                    # Minibatch gradient descent
                    batch_index = int(i * nn.total_batch_number)

                    minibatch_x = train_x[batch_index:batch_index + nn.minibatch_size, :]
                    minibatch_y = train_y[batch_index:batch_index + nn.minibatch_size, :]

                    # Optimize the machine learning model
                    nn.train(features=minibatch_x, labels=minibatch_y, training=not testing)

                    # Record loss
                    if i % 10 == 0:
                        _ = nn.loss_check(features=train_x, labels=train_y, training=not testing)

                    # Evaluate training and testing losses
                    if i % 150 == 0:

                        # Check for loss
                        current_loss = nn.loss_check(features=test_x, labels=test_y, training=not testing)

                        # Predict training loss
                        pred = nn.test(features=train_x, training=not testing)

                        # Unnormalize
                        pred = min_max_normalization.unnormalize_y(pred)
                        label_y = min_max_normalization.unnormalize_y(train_y)

                        # Evaluate training loss
                        train_rmse, _ = nn.eval_loss(pred, label_y)

                        # Predict test loss
                        pred = nn.test(features=test_x, training=not testing)

                        # Unnormalize
                        pred = min_max_normalization.unnormalize_y(pred)
                        label_y = min_max_normalization.unnormalize_y(test_y)

                        # Evaluate training loss
                        test_rmse, _ = nn.eval_loss(pred, label_y)

                        print('Epoch: {} | Loss: {:2f} | Train RMSE: {:2f} | Test RMSE: {:2f}'.format(epoch,
                                                                                                      current_loss,
                                                                                                      train_rmse,
                                                                                                      test_rmse))

            # Save the model
            nn.saver.save(sess, model_path)
            print("Model saved at: {}".format(model_path))

            # Save normalizer
            save(min_max_normalization, norm_path)
            print("Normalization saved at: {}".format(norm_path))

            # Final test
            test_pred = nn.test(features=test_x, training=not testing)

            # Unnormalize data
            test_pred = min_max_normalization.unnormalize_y(test_pred)
            actual_y = min_max_normalization.unnormalize_y(test_y)

            test_rmse, test_mae = nn.eval_loss(test_pred, actual_y)
            r2 = r_squared(test_pred, actual_y)
            print('Final Test Results:  Test RMSE: {:2f} | Test MAE: {:2f} | R2: {:2f}'.format(test_rmse, test_mae, r2))

    return raw_data, heading_names, nn
예제 #3
0
    raw_data.shape[1], raw_data.shape[0]))

train_X, test_X, train_y, test_y = train_test_split(raw_data[:, 1:],
                                                    raw_data[:, 0],
                                                    test_size=0.05,
                                                    random_state=42,
                                                    shuffle=True)

train_X = train_X.reshape(-1, raw_data.shape[1] - 1)
test_X = test_X.reshape(-1, raw_data.shape[1] - 1)

train_y = train_y.reshape(-1, 1)
test_y = test_y.reshape(-1, 1)

# Normalization.  Recombine to normalize at once, then split them into their train/test forms
min_max_normalization = MinMaxNormalization(
    np.concatenate([train_y, train_X], axis=1))
training_data = min_max_normalization(
    np.concatenate([train_y, train_X], axis=1))
testing_data = min_max_normalization(np.concatenate([test_y, test_X], axis=1))

# X1: Constrained, strictly positive weights  X2: Unconstrained weights
train_X1 = training_data[:, 1:9].reshape(-1, 8)
train_X2 = training_data[:, 9:].reshape(-1, 3)

test_X1 = testing_data[:, 1:9].reshape(-1, 8)
test_X2 = testing_data[:, 9:].reshape(-1, 3)

train_y = training_data[:, 0].reshape(-1, 1)
test_y = testing_data[:, 0].reshape(-1, 1)

# Neural network parameters
예제 #4
0
def train_model(data_path, model_path, norm_path, test_size=0.05, shuffle=True, lr=0.003, minibatch_size=2048,
                train_size=0.9, epochs=30, lambd=0.001, testing=False, loading=False, num_of_const=10):
    """
    Description
       ---
          Trains a normalized (min-max) linear regression model, given the data from data_path.  Model will be saved
          to model_path.  Advanced settings are set above.


    Inputs
       ---
              data_path: Path for the process data.  First column should be labels
             model_path: Path for the model saving.
              norm_path: Path for the normalization object.
              test_size: Size
                shuffle: Boolean, shuffle the data for training?  Breaks time correlation of data
                     lr: Learning rate of the model, higher learning rate results in faster, more unstable learning.
         minibatch_size: Size of batches for stochastic / minibatch gradient descent
                 epochs: Number of passes through the whole data
                  lambd: Regularization term
                testing: Training or testing?
                loading: If you want to load an old model for further training


    Returns
       ---
               raw_data: Data used for model building
          heading_names: Headings of the raw data
             linear_reg: Linear regression object
         weights_biases: Weights and biases of the model

    """

    raw_data = pd.read_csv(data_path)

    heading_names = list(raw_data)
    raw_data = raw_data.values

    print('There are {} feature(s) and {} label(s) with {} examples.'.format(raw_data.shape[1] - 1, 1,
                                                                             raw_data.shape[0]))

    # Train / Test split
    train_x, test_x, train_y, test_y = train_test_split(raw_data[:, 1:], raw_data[:, 0],
                                                        test_size=test_size, shuffle=shuffle, random_state=42)

    # Reshape for TensorFlow
    train_x = train_x.reshape(-1, raw_data.shape[1] - 1)
    test_x = test_x.reshape(-1, raw_data.shape[1] - 1)

    train_y = train_y.reshape(-1, 1)
    test_y = test_y.reshape(-1, 1)

    # Normalization
    if testing:
        min_max_normalization = load(norm_path)

    else:
        min_max_normalization = MinMaxNormalization(np.concatenate([train_y, train_x], axis=1))

    training_data = min_max_normalization(np.concatenate([train_y, train_x], axis=1))
    testing_data = min_max_normalization(np.concatenate([test_y, test_x], axis=1))

    # Reshape for TensorFlow
    train_x = training_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)
    test_x = testing_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)

    train_y = training_data[:, 0].reshape(-1, 1)
    test_y = testing_data[:, 0].reshape(-1, 1)

    # Test cases for NaN values
    assert(not np.isnan(train_x).any())
    assert(not np.isnan(test_x).any())

    assert(not np.isnan(train_y).any())
    assert(not np.isnan(test_y).any())

    with tf.Session() as sess:

        # Build linear regression object
        linear_reg = LinearRegression(sess, train_x, train_y, test_x, test_y, lr=lr, minibatch_size=minibatch_size,
                                      train_size=train_size, epochs=epochs, lambd=lambd, num_of_const=num_of_const)

        # If testing, just run it
        if testing:
            # Restore model
            linear_reg.saver.restore(sess, save_path=model_path)

            # Pred testing values.  First num_of_const variables are constrained, remaining are unconstrained
            pred = linear_reg.test(test_x[:, :num_of_const], test_x[:, linear_reg.nx_Cons:])

            # Unnormalize
            pred = min_max_normalization.unnormalize_y(pred)
            test_y = min_max_normalization.unnormalize_y(test_y)

            # Evaluate loss
            rmse, mae = linear_reg.eval_loss(pred, test_y)
            se = standard_error(pred, test_y)
            r2 = r_squared(pred, test_y)

            print('Test RMSE: {:2f} | Test MAE: {:2f} | SE: {:2f} | R2: {:2f}'.format(rmse, mae, se, r2))

            plt.plot(pred[:2600], label='Predictions')
            plt.plot(test_y[:2600], label='Actual')

            plt.xlabel('Time, t (min)')
            plt.ylabel('Flow Rate, Q (bbl/h)')

            plt.legend(frameon=False)
            # plt.savefig('08cluster1_test.eps', format='eps', dpi=1000)
            plt.show()

            """
            Residual Analysis
            """
            residuals = pred - test_y
            plt.plot(residuals)
            plt.show()

            plt.acorr(residuals[:, 0], maxlags=100)
            plt.show()

            weights_biases = linear_reg.weights_and_biases()

        else:

            # Load an old model for further training
            if loading:
                linear_reg.saver.restore(sess, Model_path)

            else:
                # Global variables initializer
                sess.run(linear_reg.init)

            for epoch in range(linear_reg.epochs):

                for i in range(linear_reg.total_batch_number):

                    # Mini-batch gradient descent
                    batch_index = i * linear_reg.minibatch_size
                    minibatch_x = train_x[batch_index:batch_index + linear_reg.minibatch_size, :]
                    minibatch_y = train_y[batch_index:batch_index + linear_reg.minibatch_size, :]

                    # Optimize machine learning model
                    linear_reg.train(const_features=minibatch_x[:, :num_of_const],
                                     unconst_features=minibatch_x[:, linear_reg.nx_Cons:],
                                     labels=minibatch_y)

                    # Record loss
                    if i % 10 == 0:
                        _ = linear_reg.loss_check(const_features=train_x[:, :num_of_const],
                                                  unconst_features=train_x[:, linear_reg.nx_Cons:],
                                                  labels=train_y)

                    # Evaluate train and test losses
                    if i % 150 == 0:
                        current_loss = linear_reg.loss_check(const_features=train_x[:, :num_of_const],
                                                             unconst_features=train_x[:, linear_reg.nx_Cons:],
                                                             labels=train_y)

                        train_pred = linear_reg.test(const_features=train_x[:, :num_of_const],
                                                     unconst_features=train_x[:, linear_reg.nx_Cons:])

                        # Unnormalize data
                        train_pred = min_max_normalization.unnormalize_y(train_pred)
                        actual_y = min_max_normalization.unnormalize_y(train_y)

                        # Evaluate error
                        train_rmse, train_mae = linear_reg.eval_loss(train_pred, actual_y)

                        test_pred = linear_reg.test(const_features=test_x[:, :num_of_const],
                                                    unconst_features=test_x[:, linear_reg.nx_Cons:])

                        # Unnormalize data
                        test_pred = min_max_normalization.unnormalize_y(test_pred)
                        actual_y = min_max_normalization.unnormalize_y(test_y)

                        test_rmse, test_mae = linear_reg.eval_loss(test_pred, actual_y)

                        print('Epoch: {} | Loss: {:2f} | Train RMSE: {:2f} | Test RMSE: {:2f}'.format(epoch,
                                                                                                      current_loss,
                                                                                                      train_rmse,
                                                                                                      test_rmse))

            # Save model
            linear_reg.saver.save(sess, model_path)
            print("Model saved at: {}".format(model_path))

            # Save normalizer
            save(min_max_normalization, norm_path)
            print("Normalization saved at: {}".format(norm_path))

            # Final test
            test_pred = linear_reg.test(const_features=test_x[:, :num_of_const],
                                        unconst_features=test_x[:, linear_reg.nx_Cons:])

            # Unnormalize data
            test_pred = min_max_normalization.unnormalize_y(test_pred)
            actual_y = min_max_normalization.unnormalize_y(test_y)

            # Evaluation Metrics
            test_rmse, test_mae = linear_reg.eval_loss(test_pred, actual_y)
            se = standard_error(test_pred, actual_y)
            r2 = r_squared(test_pred, actual_y)

            print('Final Test Results:  Test RMSE: {:2f} | Test MAE: {:2f} | SE: {:2f} | R2: {:2f}'.format(test_rmse,
                                                                                                           test_mae,
                                                                                                           se,
                                                                                                           r2))

            weights_biases = linear_reg.weights_and_biases()

    return heading_names, weights_biases
예제 #5
0
    raw_data.shape[1], raw_data.shape[0]))

train_X, test_X, train_y, test_y = train_test_split(raw_data[:, 1:],
                                                    raw_data[:, 0],
                                                    test_size=0.05,
                                                    random_state=42,
                                                    shuffle=True)

train_X = train_X.reshape(-1, raw_data.shape[1] - 1)
test_X = test_X.reshape(-1, raw_data.shape[1] - 1)

train_y = train_y.reshape(-1, 1)
test_y = test_y.reshape(-1, 1)

# Normalization.  Recombine to normalize at once, then split them into their train/test forms
min_max_normalization = MinMaxNormalization(
    np.concatenate([train_y, train_X], axis=1))
training_data = min_max_normalization(
    np.concatenate([train_y, train_X], axis=1))
testing_data = min_max_normalization(np.concatenate([test_y, test_X], axis=1))

train_X = training_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)
test_X = testing_data[:, 1:].reshape(-1, raw_data.shape[1] - 1)

train_y = training_data[:, 0].reshape(-1, 1)
test_y = testing_data[:, 0].reshape(-1, 1)

# Neural network parameters
input_size = train_X.shape[1]
output_size = 1
learning_rate = Args['lr']
mini_batch_size = Args['minibatch_size']