コード例 #1
0
ファイル: run_genn_demo.py プロジェクト: thesadman/smt
def run_demo_1D(is_gradient_enhancement=True):  # pragma: no cover
    """Test and demonstrate GENN using a 1D example"""

    # Test function
    f = lambda x: x * np.sin(x)
    df_dx = lambda x: np.sin(x) + x * np.cos(x)

    # Domain
    lb = -np.pi
    ub = np.pi

    # Training data
    m = 4
    xt = np.linspace(lb, ub, m)
    yt = f(xt)
    dyt_dxt = df_dx(xt)

    # Validation data
    xv = lb + np.random.rand(30, 1) * (ub - lb)
    yv = f(xv)
    dyv_dxv = df_dx(xv)

    # Initialize GENN object
    genn = GENN()
    genn.options["alpha"] = 0.05
    genn.options["beta1"] = 0.9
    genn.options["beta2"] = 0.99
    genn.options["lambd"] = 0.05
    genn.options["gamma"] = int(is_gradient_enhancement)
    genn.options["deep"] = 2
    genn.options["wide"] = 6
    genn.options["mini_batch_size"] = 64
    genn.options["num_epochs"] = 25
    genn.options["num_iterations"] = 100
    genn.options["seed"] = SEED
    genn.options["is_print"] = True

    # Load data
    load_smt_data(genn, xt, yt, dyt_dxt)

    # Train
    genn.train()
    genn.plot_training_history()
    genn.goodness_of_fit(xv, yv, dyv_dxv)

    # Plot comparison
    if genn.options["gamma"] == 1.0:
        title = 'with gradient enhancement'
    else:
        title = 'without gradient enhancement'
    x = np.arange(lb, ub, 0.01)
    y = f(x)
    y_pred = genn.predict_values(x)
    fig, ax = plt.subplots()
    ax.plot(x, y_pred)
    ax.plot(x, y, 'k--')
    ax.plot(xv, yv, 'ro')
    ax.plot(xt, yt, 'k+', mew=3, ms=10)
    ax.set(xlabel='x', ylabel='y', title=title)
    ax.legend(['Predicted', 'True', 'Test', 'Train'])
    plt.show()
コード例 #2
0
    def test_genn(self):
        import numpy as np
        import matplotlib.pyplot as plt
        from smt.surrogate_models.genn import GENN, load_smt_data

        # Training data
        lower_bound = -np.pi
        upper_bound = np.pi
        number_of_training_points = 4
        xt = np.linspace(lower_bound, upper_bound, number_of_training_points)
        yt = xt * np.sin(xt)
        dyt_dxt = np.sin(xt) + xt * np.cos(xt)

        # Validation data
        number_of_validation_points = 30
        xv = np.linspace(lower_bound, upper_bound, number_of_validation_points)
        yv = xv * np.sin(xv)
        dyv_dxv = np.sin(xv) + xv * np.cos(xv)

        # Truth model
        x = np.arange(lower_bound, upper_bound, 0.01)
        y = x * np.sin(x)

        # GENN
        genn = GENN()
        genn.options[
            "alpha"] = 0.1  # learning rate that controls optimizer step size
        genn.options[
            "beta1"] = 0.9  # tuning parameter to control ADAM optimization
        genn.options[
            "beta2"] = 0.99  # tuning parameter to control ADAM optimization
        genn.options[
            "lambd"] = 0.1  # lambd = 0. = no regularization, lambd > 0 = regularization
        genn.options[
            "gamma"] = 1.0  # gamma = 0. = no grad-enhancement, gamma > 0 = grad-enhancement
        genn.options["deep"] = 2  # number of hidden layers
        genn.options["wide"] = 6  # number of nodes per hidden layer
        genn.options[
            "mini_batch_size"] = 64  # used to divide data into training batches (use for large data sets)
        genn.options["num_epochs"] = 20  # number of passes through data
        genn.options[
            "num_iterations"] = 100  # number of optimizer iterations per mini-batch
        genn.options["is_print"] = True  # print output (or not)
        load_smt_data(
            genn, xt, yt, dyt_dxt
        )  # convenience function to read in data that is in SMT format
        genn.train()  # API function to train model
        genn.plot_training_history(
        )  # non-API function to plot training history (to check convergence)
        genn.goodness_of_fit(
            xv, yv,
            dyv_dxv)  # non-API function to check accuracy of regression
        y_pred = genn.predict_values(
            x)  # API function to predict values at new (unseen) points

        # Plot
        fig, ax = plt.subplots()
        ax.plot(x, y_pred)
        ax.plot(x, y, "k--")
        ax.plot(xv, yv, "ro")
        ax.plot(xt, yt, "k+", mew=3, ms=10)
        ax.set(xlabel="x", ylabel="y", title="GENN")
        ax.legend(["Predicted", "True", "Test", "Train"])
        plt.show()
コード例 #3
0
ファイル: run_genn_demo.py プロジェクト: thesadman/smt
def run_demo_2d(alpha=0.1,
                beta1=0.9,
                beta2=0.99,
                lambd=0.1,
                gamma=1.0,
                deep=3,
                wide=6,
                mini_batch_size=None,
                iterations=30,
                epochs=100):
    """
    Predict Rastrigin function using neural net and compare against truth model. Provided with proper training data,
    the only hyperparameters the user needs to tune are:

    :param alpha = learning rate
    :param beta1 = adam optimizer parameter
    :param beta2 = adam optimizer parameter
    :param lambd = regularization coefficient
    :param gamma = gradient enhancement coefficient
    :param deep = neural net depth
    :param wide = neural net width

    This restricted list is intentional. The goal was to provide a simple interface for common regression tasks
    with the bare necessary tuning parameters. More advanced prediction tasks should consider tensorflow or other
    deep learning frameworks. Hopefully, the simplicity of this interface will address a common use case in aerospace
    engineering, namely: predicting smooth functions using computational design of experiments.
    """
    if gamma > 0.:
        title = 'GENN'
    else:
        title = 'NN'

    # Practice data
    X_train, Y_train, J_train = get_practice_data(random=False)
    X_test, Y_test, J_test = get_practice_data(random=True)

    # Convert training data to SMT format
    xt = X_train.T
    yt = Y_train.T
    dyt_dxt = J_train[
        0].T  # SMT format doesn't handle more than one output at a time, hence J[0]

    # Convert test data to SMT format
    xv = X_test.T
    yv = Y_test.T
    dyv_dxv = J_test[
        0].T  # SMT format doesn't handle more than one output at a time, hence J[0]

    # Initialize GENN object
    genn = GENN()
    genn.options["alpha"] = alpha
    genn.options["beta1"] = beta1
    genn.options["beta2"] = beta2
    genn.options["lambd"] = lambd
    genn.options["gamma"] = gamma
    genn.options["deep"] = deep
    genn.options["wide"] = wide
    genn.options["mini_batch_size"] = mini_batch_size
    genn.options["num_epochs"] = epochs
    genn.options["num_iterations"] = iterations
    genn.options["seed"] = SEED
    genn.options["is_print"] = True

    # Load data
    load_smt_data(
        genn, xt, yt, dyt_dxt
    )  # convenience function that uses SurrogateModel.set_training_values(), etc.

    # Train
    genn.train()
    genn.plot_training_history()
    genn.goodness_of_fit(xv, yv, dyv_dxv)

    # Contour plot
    contour_plot(genn, title=title)