Esempio n. 1
0
def my_solution2(X_train, X_test, y_train, y_test, hyperparams=None):
    if hyperparams == None:
        hyperparams = {
            'hidden_layer_sizes': (3, ),
            'learning_rate': 0.1,
            'epoch': 3852,
            'momentum': 0.04,
            'tol': 1e-10,
            'reg_coef': 0
        }

    if not hasattr(hyperparams, 'batch_size'):
        hyperparams['batch_size'] = X_train.shape[0]

    # train with self
    nn = NeuralNetwork(**hyperparams)
    nn.fit(X_train, y_train)
    y_pred = nn.predict(X_test)

    y_pred = y_pred.argmax(axis=1)
    y_test = y_test.argmax(axis=1)

    # evaluate
    print('My implements2:')
    print(classification_report(y_test, y_pred))

    return nn, y_pred, y_test
Esempio n. 2
0
def main(filename='data/iris-virginica.txt'):
    # Load data
    data = read_data('%s/%s' % (filepath, filename))

    X, y = data[:,:-1].astype(float), data[:,-1]


    class_vec = list(set(y))
    K = len(class_vec)


    Y = pd.get_dummies(y).astype(int).as_matrix()


    # Define parameters
    n = X.shape[0]
    d = X.shape[1]
    #
    # # Define layer sizes
    print(n,d,K)
    layers = [d, 5, K]

    model = NeuralNetwork(layers=layers, num_epochs=1000, learning_rate=0.10, alpha=0.9,
                          activation_func='sigmoid', epsilon=0.001, print_details=True)
    model.fit(X, Y)

    Y_hat = model.predict(X)
    accuracy = compute_acc(Y_hat, Y)
    print('Model training accuracy:\t%.2f' % (accuracy))
Esempio n. 3
0
def train(iterations, rate):
    global training_inputs, temp_training_outputs, test_inputs, test_outputs
    NeuralNetwork.train(NumberNet,
                        training_inputs=training_inputs,
                        training_outputs=temp_training_outputs,
                        training_iterations=iterations,
                        learning_rate=rate)
Esempio n. 4
0
def task_cv_single(t, modeLearn: ModeLearn, f:ActivFunct, theta:dict, errorFunct:ActivFunct, miniBatchDim = None):
    (trSet, vlSet) = t
    nn = NeuralNetwork(trSet, f, theta)
    nn.learn(modeLearn, errorFunct, miniBatchDim)

    vecErr = np.array([nn.getError(vlSet, i, 1/len(vlSet), errorFunct) for i in range(nn.hyp['OutputUnits'])])
    return norm(vecErr,2)
Esempio n. 5
0
def main(filename='data/iris-virginica.txt'):
    # Load data
    data = read_data('%s/%s' % (filepath, filename))

    X, y = data[:, :-1].astype(float), data[:, -1]

    class_vec = list(set(y))
    K = len(class_vec)

    Y = pd.get_dummies(y).astype(int).as_matrix()

    # Define parameters
    n = X.shape[0]
    d = X.shape[1]
    #
    # # Define layer sizes
    print(n, d, K)
    layers = [d, 5, K]

    model = NeuralNetwork(layers=layers,
                          num_epochs=1000,
                          learning_rate=0.10,
                          alpha=0.9,
                          activation_func='sigmoid',
                          epsilon=0.001,
                          print_details=True)
    model.fit(X, Y)

    Y_hat = model.predict(X)
    accuracy = compute_acc(Y_hat, Y)
    print('Model training accuracy:\t%.2f' % (accuracy))
Esempio n. 6
0
    def __init__(self, depth):
        self.depth = depth

        # self.heuristic = heuristic

        n = NeuralNetwork(64, 10, 0.003)
        n.readfile()
        self.heuristic = n.heuristic
Esempio n. 7
0
def exp_500():
    net = NeuralNetwork([28*28, 500, 10])
    trainNetwork(net, cross_entropy=True)

    testNetwork(net)

    # Dump the network into a file, load it and test it again
    with open("network_files/mnist_500_network_file", "wb") as f:
        net.dump(f)
Esempio n. 8
0
    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size

        self.brain = NeuralNetwork(self.state_size, self.action_size)
        self.brain_target = NeuralNetwork(self.state_size, self.action_size)

        self.memory = Memory(BUFFER_SIZE)

        self.epsilon = EPSILON
Esempio n. 9
0
def recognize(src):
    nn = NeuralNetwork([784, 250, 10], 'logistic')
    img_list = GetCutZip(src)
    final_result = ''
    for img_array in img_list:
        img_array = img_array.flatten()
        result_list = nn.predict(img_array)
        result = np.argmax(result_list)
        final_result = final_result + str(result)
    return final_result
Esempio n. 10
0
class NoisyLabelNeuralNetwork:
    def __init__(self, train_x=None, train_y=None, test_x=None, test_y=None):
        self.NN = NeuralNetwork(train_x=train_x, train_y=train_y, test_x=test_x, test_y=test_y)

    def set_model_path(self, model_path):
        self.NN.model_path = model_path

    def pretrain(self, labels=None, noise_level=None, save_state=None, batch_size=100, epoch_size=10):
        # direct to NN function
        self.NN.train_NN(self, labels=labels, noise_level=noise_level, save_state=save_state, batch_size=batch_size, epoch_size=epoch_size)

    def run_NLNN(self, labels=None, noise_level=None, it_nr=15, batch_size=100, epoch_size=10):
        # set number of iterations of NLNN, 'before' model must already exist

        # retrieve predictions and initializes EM module, prints accuracy for information
        prob_y = self.NN.restored_prob_y(noise_level, state='before')
        self.NN.get_acc(noise_level, 'before')
        self.EM = EMModule(initializer=prob_y, labels=labels)

        # iterate, gets improved theta, updates NN and checks for convergence
        for it in range(it_nr):
            prev_theta = self.EM.theta
            c, new_theta = self.EM.iteration(it_nr=it, new_prob_y=prob_y)

            acc, prob_y = self.NN.train_NN(save_state='after', labels=c, noise_level=noise_level, batch_size=batch_size, epoch_size=epoch_size)

            if ut.dist(prev_theta, new_theta) < 10**-3:
                print('Converged after %s iterations\n'%it_nr)
                break

        # print accuracy for information
        self.NN.get_acc(noise_level, state='after')
Esempio n. 11
0
def main():
    Info.printTitle()

    # get options from arguments.
    dicOptions = Opts.getOptions()
    if dicOptions == None:
        Info.printHelp()
        return False
    workClass = dicOptions[PAR_MODE]
    
    # get variables for NN.
    dicNNVariables = NNet.getNNVariables()

    # set up parameters.
    workClass.setOptions(dicOptions)
    workClass.setNNVariables(dicNNVariables)
    
    # run work-function.
    if workClass.run() == False:
        print("Fail.")
        return False

    print("Complete.")

    return True
Esempio n. 12
0
def test():
    user = {}
    if request.method == "GET":
        user['clean'] = request.args.get("img")
    else:
        if 'file' not in request.files:
            flash('No file part')
            return redirect("/")
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect("/")
        if file and allowed(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            user['clean'] = filename
        else:
            flash('Format file tidak didukung(png,jpeg,jpg)')
            return redirect("/")

    user['guess'] = NN.test(user['clean'])
    #return user
    return render_template('test.html', title='Test', info=user)
Esempio n. 13
0
def test(num_tests):
    global training_inputs, temp_training_outputs, test_inputs, test_outputs
    correct = 0
    incorrect = 0

    # Test NN amount of times user specifies with num_tests
    for i in range(num_tests):
        # Get random test data
        r = randint(0, 59999)
        # Get test data in correct format
        new_test_input = np.array([[0.0 for x in range(784)]])
        temp = test_inputs[:, r]
        for j in range(784):
            new_test_input[0][j] = temp[j]
        new_test_input = new_test_input.T

        # Get NN's guess for input
        nnGuess = NeuralNetwork.think(NumberNet, test_input=new_test_input)

        # Log test results
        if test_outputs[r] == nnGuess:
            correct += 1
        else:
            incorrect += 1

    # Print test results
    print("Correct: " + str(correct))
    print("Incorrect: " + str(incorrect))
Esempio n. 14
0
def train():
    user = {
        'l1': NN.nodes_in_input_layer,
        'l2': NN.nodes_in_hidden_layer,
        'l3': NN.nodes_in_output_layer,
        'akurasi': NN.accuration()
    }
    user['exist'] = os.path.isfile("./NN/mnist_train.csv")
    return render_template('training.html', title='Training', info=user)
Esempio n. 15
0
class NoisyLabelNeuralNetwork:
    def __init__(self, train_x=None, train_y=None, test_x=None, test_y=None):
        self.NN = NeuralNetwork(train_x=train_x,
                                train_y=train_y,
                                test_x=test_x,
                                test_y=test_y)

    def set_model_path(self, model_path):
        self.NN.model_path = model_path

    def pretrain(self,
                 labels=None,
                 noise_level=None,
                 save_state=None,
                 batch_size=100,
                 epoch_size=10):
        # direct to NN function
        self.NN.train_NN(self,
                         labels=labels,
                         noise_level=noise_level,
                         save_state=save_state,
                         batch_size=batch_size,
                         epoch_size=epoch_size)

    def run_NLNN(self,
                 labels=None,
                 noise_level=None,
                 it_nr=15,
                 batch_size=100,
                 epoch_size=10):
        # set number of iterations of NLNN, 'before' model must already exist

        # retrieve predictions and initializes EM module, prints accuracy for information
        prob_y = self.NN.restored_prob_y(noise_level, state='before')
        self.NN.get_acc(noise_level, 'before')
        self.EM = EMModule(initializer=prob_y, labels=labels)

        # iterate, gets improved theta, updates NN and checks for convergence
        for it in range(it_nr):
            prev_theta = self.EM.theta
            c, new_theta = self.EM.iteration(it_nr=it, new_prob_y=prob_y)

            acc, prob_y = self.NN.train_NN(save_state='after',
                                           labels=c,
                                           noise_level=noise_level,
                                           batch_size=batch_size,
                                           epoch_size=epoch_size)

            if ut.dist(prev_theta, new_theta) < 10**-3:
                print('Converged after %s iterations\n' % it_nr)
                break

        # print accuracy for information
        self.NN.get_acc(noise_level, state='after')
Esempio n. 16
0
def hello():
    user = {
        'l1': NN.nodes_in_input_layer,
        'l2': NN.nodes_in_hidden_layer,
        'l3': NN.nodes_in_output_layer,
        'akurasi': NN.accuration()
    }
    user['file'] = filter(None, [
        v if v.split('.').pop() in allowed_file else ""
        for v in os.listdir("static/clean")
    ])
    return render_template('home.html', title='Home', info=user)
Esempio n. 17
0
def NN_fit(train_set, val_set, nn=5, epochs=10, width=10, layers=2):
    from NN import NeuralNetwork

    last_error = 100
    last_predicated = None
    fnn = None
    for x in range(nn):
        nn = NeuralNetwork()
        nn.train(
            train_set,
            val_set,
            epochs=epochs,
            width=width,
            layers=layers,
            batch_size=20,
            learning_rate=0.001,
        )
        predicted_y, _ = nn.predict(train_set.X)

        logger.info(
            f"NN train MSE {mean_squared_error(train_set.Y, predicted_y)}")
        predicted_y, _ = nn.predict(val_set.X)
        error = mean_squared_error(val_set.Y, predicted_y)
        logger.info(f"NN dev MSE {error}")

        if error < last_error:
            last_error = error
            fnn = nn

    return fnn
Esempio n. 18
0
def do_a_graph(params, a_list):
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    x_min = params["Grid"]["x_min"]
    x_step_size = params["Grid"]["x_step_size"]
    x_steps = params["Grid"]["x_steps"]
    x_max = x_min + (x_steps - 1) * x_step_size
    n_time_steps = len(a_list)
    T_mat = params["PayoffChars"]["T"]
    t_min = 0.
    t_max = T_mat
    t_steps = params["PayoffChars"]["NTime"]
    t_step_size = T_mat / n_time_steps
    # t = np.arange(t_min, t_max, t_step_size)
    # x = np.arange(x_min, x_max, x_step_size)
    t = np.linspace(t_min, t_max, num=t_steps)
    x = np.linspace(x_min, x_max, num=x_steps)
    X, T = np.meshgrid(x, t)
    a_np = np.array(a_list)
    zs = a_np
    Z = zs.reshape(T.shape)

    ax.plot_surface(X, T, Z)
    ax.set_xlabel(r'$x$')
    ax.set_ylabel(r'$t$')
    # ax.set_zlabel(r'$\tilde a^{\hat \theta^*}$')

    title = r"$\tilde a^{\hat \theta^*}$ Surface"
    # plt.title(title, pad=20)

    if params["General"]["Run"]["ShowEvalAOnGrid"]:
        plt.show()

    conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
        params["Conf"], params["Diffusion"]["Type"])
    a_type = "a_" + params["a"]
    for angle_step in range(12):
        for azim_step in range(3):
            azim = 30 * (azim_step - 1)
            angle = 30 * angle_step
            file_path = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/a_surface/" + str(
                azim_step - 1
            ) + "/graph_a_surface_" + conf_type + "_" + diffusion_type + "_" + str(
                azim) + "_" + str(angle) + ".png"
            directory = os.path.dirname(file_path)
            if not os.path.exists(directory):
                os.makedirs(directory)
            ax.view_init(azim, angle)
            fig.savefig(file_path)
    plt.close()
def dbnunfoldtonn(dbn, outputsize):
    #DBNUNFOLDTONN Unfolds a DBN to a NN
    #   dbnunfoldtonn(dbn, outputsize ) returns the unfolded dbn with a final
    #   layer of size outputsize added.
    if outputsize is not None:
        size = np.append(dbn.dbnSizes, outputsize)
    else:
        size = np.array(dbn.sizes)

    nn = NeuralNetwork.NN(size)
    for i in range(0, len(dbn.RBM)):
        nn.W[i][:, 0] = dbn.RBM[i].C.squeeze()
        nn.W[i][:, 1:] = dbn.RBM[i].W

    return nn
def trainOR():
    Network0 = NeuralNetwork(sturcture=[2, 3, 1], learningRate=0.5)

    for i in range(1000):
        Network0.train([0, 0], [0])
        Network0.train([0, 1], [1])
        Network0.train([1, 0], [1])
        Network0.train([1, 1], [1])
        print(i, "", round((1 - Network.costFunction) * 100, 0))
        pass

    print(round((1 - Network0.costFunction) * 100, 2))

    Network0.answer([0, 0])

    print(Network0.neurons[2][0].getOutput())
    pass
def trainXOR():
    print("\ntrain XOR")
    sturcture = [2, 3, 1]
    Network0 = NeuralNetwork(sturcture=sturcture, learningRate=1)

    for i in range(10000):
        Network0.train([0, 0], [0])
        Network0.train([0, 1], [1])
        Network0.train([1, 0], [1])
        Network0.train([1, 1], [0])

        #print(i,"",round((1-Network0.costFunction)*100, 0))
        print(i, "",
              round(abs(Network0.neurons[len(sturcture) - 1][0].a - 0), 3))

    print(Network0.answer([0, 0]))
    print(Network0.answer([1, 0]))
    print(Network0.answer([0, 1]))
    print(Network0.answer([1, 1]))

    pass
Esempio n. 22
0
def do_single_run(params):
    nn = NeuralNetwork(params)
    final_loss = nn.train(
        number_of_batches=params["Run"]["NumberOfBatchesForTraining"],
        learning_rate=params["Run"]["LearningRate"])
    if params["General"]["Run"]["DoEvalAOnGrid"]:
        a_list = nn.eval_a_on_grid(params["Grid"]["x_min"],
                                   params["Grid"]["x_steps"],
                                   params["Grid"]["x_step_size"])

        do_a_graph(params, a_list)

    monte_carlo_price, monte_carlo_std = nn.monte_carlo_price(
        number_of_batches=params["Run"]["NumberOfBatchesForEval"])
    print("real price: ", nn.bachelier_call_price())
    print("monte_carlo_price: ", monte_carlo_price)
    print("monte_carlo_std: ", monte_carlo_std)
    _, _, = nn.eval(number_of_batches=params["Run"]["NumberOfBatchesForEval"])

    if params["General"]["Run"]["DoRobustGraphs"]:
        params["General"]["Run"]["RatioRobustGraphs"] = params["General"][
            "Run"]["RatioNumberOfBatchesForEval"] * params["General"]["Run"][
                "RatioNumberOfBatchesForTraining"]
        do_robust_graphs(params, nn)

    nn.sess.graph.finalize()
    tf.reset_default_graph()
    nn.sess.close()
    tf.reset_default_graph()

    conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
        params["Conf"], params["Diffusion"]["Type"])
    a_type = "a_" + params["a"]
    file_path_final_loss = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + "_final_loss.txt"
    f = open(file_path_final_loss, "w")
    file_string = "Var(Z) for single run is: " + str(final_loss)
    f.write(file_string)
    f.close()
Esempio n. 23
0
 def __init__(self, train_x=None, train_y=None, test_x=None, test_y=None):
     self.NN = NeuralNetwork(train_x=train_x, train_y=train_y, test_x=test_x, test_y=test_y)
Esempio n. 24
0
sys.path.append("../../..")

import csv

import numpy as np

from NN import NeuralNetwork


# Need to be decompressed from kaggle_test_dataset.tar file
imgs_test = "test.csv"


if __name__ == "__main__":
    # Create network from serialization file
    net = NeuralNetwork()

    with open("../network_files/mnist_500_network_file", "rb") as f:
        net.load(f)

    # Open images files
    imgs = open(imgs_test, "r")
    imgs_reader = csv.reader(imgs, delimiter=',')

    # Make the output
    print("ImageId,Label")

    first = True
    index = 0
    for i in imgs_reader:
        if first:
Esempio n. 25
0
import numpy as np
from NN import NeuralNetwork
import pygame
from pygame.locals import *

side = 400
screen = pygame.display.set_mode((side, side))
running = 1

brain = NeuralNetwork(2, 4, 1, 0.3)
#brain.readState('XOR.state.json')

training_data = [{
    'input': [0, 0],
    'target': [0]
}, {
    'input': [1, 0],
    'target': [1]
}, {
    'input': [0, 1],
    'target': [1]
}, {
    'input': [1, 1],
    'target': [0]
}]


def drawPrediction():
    resolution = 10
    cols = rows = int(side / resolution)
Esempio n. 26
0
def do_graph(params):
    number_of_steps = params["Graph"]["NumberOfSteps"]
    if params["Payoff"] == "Call" or params["Payoff"] == "Calls&Puts":
        real_prices_array = np.full(number_of_steps, 0.)
    mc_prices_array = np.full(number_of_steps, 0.)
    mc_stds_array = np.full(number_of_steps, 0.)
    ad_prices_array = np.full(number_of_steps, 0.)
    ad_stds_array = np.full(number_of_steps, 0.)
    x_array = np.full(number_of_steps, 0.)
    x_min = params["Graph"]["XMin"]
    step_size = params["Graph"]["StepSize"]
    number_of_batches_for_training = params["Run"][
        "NumberOfBatchesForTraining"]
    number_of_batches_for_eval = params["Run"]["NumberOfBatchesForEval"]

    nn_list = [None for i in range(number_of_steps)]
    x_original = copy.deepcopy(params["Diffusion"]["X"])
    all_final_losses = np.full(number_of_steps, 0.)
    for i in range(number_of_steps):
        x = x_min + i * step_size
        params["Diffusion"]["X"] = x
        nn_list[i] = NeuralNetwork(params)
        mc_prices_array[i], mc_stds_array[i] = nn_list[i].monte_carlo_price(
            number_of_batches=number_of_batches_for_eval)
        if params["Run"]["DoAutomaticLearningRate"]:
            variance = np.square(
                mc_stds_array[i]
            ) * number_of_batches_for_eval * params["NN"]["NBatchSize"]
            log10_variance = np.floor(np.log10(variance))
            learning_rate = params["Run"][
                "BaseForAutomaticLearningRate"] / np.power(10, log10_variance)
        elif params["Run"]["DoListLearningRates"]:
            learning_rate = params["Run"]["ListLearningRates"][i]
        else:
            learning_rate = params["Run"]["LearningRate"]
        if params["NN"]["DoAutomaticLambdaConstraint"]:
            variance = np.square(
                mc_stds_array[i]
            ) * number_of_batches_for_eval * params["NN"]["NBatchSize"]
            log10_variance = np.floor(np.log10(variance))
            lambda_constraint = params["NN"][
                "BaseForAutomaticLambdaConstraint"] * np.power(
                    10, log10_variance)
            nn_list[i].params["NN"]["LambdaConstraint"] = lambda_constraint
            nn_list[i].lambda_constraint = lambda_constraint

        tf.Session().graph.finalize()
        all_final_losses[i] = nn_list[i].train(
            number_of_batches=number_of_batches_for_training,
            learning_rate=learning_rate)
        ad_prices_array[i], ad_stds_array[i] = nn_list[i].eval(
            number_of_batches=number_of_batches_for_eval)
        print("step_number =", i)
        print("x = ", x)
        print("learning_rate = ", learning_rate)
        print("lambda_constraint = ", lambda_constraint)
        print("mc_price = ", mc_prices_array[i])
        print("ad_price = ", ad_prices_array[i])
        print("mc_std = ", mc_stds_array[i])
        print("ad_std = ", ad_stds_array[i])
        if params["Diffusion"]["Type"] != "LV":
            if params["Payoff"] == "Call" or params["Payoff"] == "Calls&Puts":
                real_prices_array[i] = nn_list[i].bachelier_call_price()
        x_array[i] = x
        nn_list[i].sess.graph.finalize()
        tf.reset_default_graph()
        nn_list[i].sess.close()
        tf.reset_default_graph()
    params["Diffusion"]["X"] = x_original

    fig, ax1 = plt.subplots()
    # ax1.set_xlabel(r'$x_0$')
    if params["Payoff"] == "Call" or params["Payoff"] == "Calls&Puts":
        if params["Diffusion"]["Type"] != "LV":
            ax1.plot(x_array,
                     real_prices_array,
                     label=r'$\textnormal{Bachelier Price}$',
                     color='black')
    ax1.plot(x_array,
             ad_prices_array,
             linestyle=':',
             label=r'$\textnormal{Adaptative Price}$',
             color='tab:blue')
    """
    ax1.plot(x_array, ad_prices_array + 300 * ad_stds_array, linestyle=':', label=r'$\textnormal{Adaptative + 300 std}$',
             color='tab:cyan')
    ax1.plot(x_array, ad_prices_array - 300 * ad_stds_array, linestyle=':', label=r'$\textnormal{Adaptative - 300 std}$',
             color='tab:cyan')
    """
    ax1.plot(x_array,
             mc_prices_array,
             linestyle='-',
             label=r'$\textnormal{MC Price}$',
             color='tab:red')
    """
    ax1.plot(x_array, mc_prices_array + 300 * mc_stds_array, linestyle=':', label=r'$\textnormal{MC + 300 std}$',
             color='tab:pink')
    ax1.plot(x_array, mc_prices_array - 300 * mc_stds_array, linestyle=':', label=r'$\textnormal{MC - 300 std}$',
             color='tab:pink')
    """
    ax1.tick_params(axis='y')
    if params["Diffusion"]["Type"] == "Bachelier":
        my_title = r'Bachelier Prices vs Adaptative Prices vs MC Prices'
    elif params["Diffusion"]["Type"] == "LV":
        my_title = r'Adaptative Prices vs MC Prices for Local Volatility Diffusion'
    # plt.title(my_title)
    # plt.tight_layout()
    # plt.subplots_adjust(bottom=0.3)
    # ax1.legend(loc='upper center', bbox_to_anchor=(0.5, - 0.2))

    if params["Graph"]["ShowGraph"]:
        plt.show()

    if params["Graph"]["Save"]:
        conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
            params["Conf"], params["Diffusion"]["Type"])
        a_type = "a_" + params["a"]
        file_path = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + ".png"
        directory = os.path.dirname(file_path)
        if not os.path.exists(directory):
            os.makedirs(directory)
        fig.savefig(file_path)
        file_path_json = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/json_" + conf_type + "_" + diffusion_type + ".json"
        params_json = json.dumps(params, indent=4)
        f = open(file_path_json, "w")
        f.write(params_json)
        f.close()
    plt.close()

    fig, ax1 = plt.subplots()
    # ax1.set_xlabel(r'$x_0$')
    """
    if params["Payoff"] == "Call" or params["Payoff"] == "Calls&Puts":
        error_adaptative = ad_prices_array - real_prices_array
        error_mc = mc_prices_array - real_prices_array
    """

    handle_list = [None for i in range(3)]
    label_list = [None for i in range(3)]
    # ax1.plot(x_array, error_adaptative, label=r'$\textnormal{Adaptative Error}$', color='tab:blue')
    # ax1.plot(x_array, error_mc, label=r'$\textnormal{MC Error}$', color='tab:red')
    handle_list[0], = ax1.plot(
        x_array,
        ad_stds_array,
        label=r'$\textnormal{Adaptative Standard Deviation}$',
        color='tab:blue',
        linestyle=":")
    label_list[0] = r'$\textnormal{Adaptative Standard Deviation}$'

    handle_list[1], = ax1.plot(x_array,
                               mc_stds_array,
                               label=r'$\textnormal{MC Standard Deviataion}$',
                               color='tab:red',
                               linestyle="-")
    label_list[1] = r'$\textnormal{MC Standard Deviataion}$'

    # ax1.set_ylabel(r"Standard Deviation")
    ax1.tick_params(axis='y', labelcolor='tab:blue')
    my_title = r'Adaptative vs MC Errors and Standard Deviations'
    # plt.title(my_title)
    # plt.tight_layout()
    # ax1.legend(loc=2)

    ax2 = ax1.twinx()
    handle_list[2], = ax2.plot(
        x_array,
        ad_stds_array / mc_stds_array,
        label=r'Ratio of Adaptive and MC Standard Deviations',
        color="tab:orange",
        linestyle="--")
    label_list[2] = r'Ratio of Adaptive and MC Standard Deviations'
    # ax2.legend(loc=1)
    # ax2.set_ylabel(r"Ratio")
    ax2.set_ylim(0.0, 1.5)
    ax2.tick_params(axis="y", labelcolor="tab:orange")

    # plt.subplots_adjust(bottom=0.3)
    # plt.legend(handles=handle_list, labels=label_list, loc='upper center', bbox_to_anchor=(0.5, - 0.2))

    if params["Graph"]["ShowGraph"]:
        plt.show()

    if params["Graph"]["Save"]:
        conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
            params["Conf"], params["Diffusion"]["Type"])
        a_type = "a_" + params["a"]
        file_path = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + "_errors_and_stds.png"
        directory = os.path.dirname(file_path)
        if not os.path.exists(directory):
            os.makedirs(directory)
        fig.savefig(file_path)
    plt.close()

    file_path_all_final_losses = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + "_all_final_losses.txt"
    f = open(file_path_all_final_losses, "w")
    file_string = ""
    for i in range(len(all_final_losses)):
        file_string += "var(Z) for step i = " + str(i) + " is: " + str(
            all_final_losses[i]) + "\n"
    f.write(file_string)
    f.close()
Esempio n. 27
0
def main():
    parser = argparse.ArgumentParser(
        description='Script to train a language model')
    parser.add_argument("--training_size",
                        default=10000,
                        type=int,
                        help="define training data set size")
    parser.add_argument("--test_size",
                        default=1000,
                        type=int,
                        help="define test data set size")
    parser.add_argument(
        "--data_biased",
        default="../data/target_biased.txt",
        type=str,
        help="text file containing the source biased data (all)")
    parser.add_argument("--data_neutral",
                        default="../data/target_neutral.txt",
                        type=str,
                        help="text file containing the source neutral data")
    parser.add_argument(
        "--model",
        default="NB",
        type=str,
        help=
        "choose model ||| NB --> Naïve Bayes ||| SVM --> Support Vector Machine ||| NN --> Neural Network"
    )
    parser.add_argument(
        "--user_input",
        default=False,
        type=bool,
        help="Command line input for custom sentence classification")
    parser.add_argument("--load_model",
                        default=False,
                        type=bool,
                        help="Load the best performing SVM model")
    args = parser.parse_args()

    #Load data from files
    biased_data = load_data(args.data_biased, "biased")
    neutral_data = load_data(args.data_neutral, "neutral")

    #Split data into sets
    training_data, dev_data, test_data = prepare_data(args.training_size,
                                                      args.test_size,
                                                      biased_data,
                                                      neutral_data)

    #Get POS tags
    training_data_pos = pos_tag(training_data)
    dev_data_pos = pos_tag(dev_data)
    test_data_pos = pos_tag(test_data)

    print("Length of TRAINING data: ", len(training_data[0]))
    print("Length of DEVELOPMENT data: ", len(dev_data[0]))
    print("Length of TEST data: ", len(test_data[0]))

    if (args.model == "NB"):

        model = NaiveBayes(training_data_pos, test_data_pos)

    elif (args.model == "SVM"):

        if args.load_model:

            model = SVM(training_data_pos, test_data_pos, False,
                        "SVM10000POS.joblib")
        else:

            model = SVM(training_data_pos, test_data_pos, False)

    elif (args.model == "NN"):

        model = NeuralNetwork(training_data_pos, dev_data_pos, test_data_pos,
                              False)

    if args.user_input:
        user_input = input("Enter the sentence that you want to classify: ")
        user_label = input(
            "Do you think its biased or not? Enter 1 (for biased) | 0 (for neutral): "
        )
        user_data = [[user_input], [user_label]]
        model.predict(pos_tag(user_data))
    else:

        model.predict()
Esempio n. 28
0
File: v1.py Progetto: ruitaiS/vybLab
def generate():
    Digit_NN = NeuralNetwork(no_of_in_nodes=image_pixels,
                             no_of_out_nodes=10,
                             no_of_hidden_nodes=100,
                             learning_rate=0.1)
    '''
    Letter_NN = NeuralNetwork(no_of_in_nodes = image_pixels, 
                        no_of_out_nodes = 26, 
                        no_of_hidden_nodes = 100,
                        learning_rate = 0.1)
    '''

    Meta_NN = NeuralNetwork(Digit_NN.no_of_out_nodes, 2, 100, 0.05)

    #Train Digit NN
    for i in range(len(digits_train_imgs)):
        Digit_NN.train(digits_train_imgs[i], digits_train_labels_one_hot[i])

    #Display Statistics for Digits
    corrects, wrongs = Digit_NN.evaluate(digits_train_imgs,
                                         digits_train_labels)
    print("accuracy train: ", corrects / (corrects + wrongs))
    corrects, wrongs = Digit_NN.evaluate(digits_test_imgs, digits_test_labels)
    print("accuracy: test", corrects / (corrects + wrongs))

    #Train MetaNN, save NN output vectors to be evaluated later
    for i in range(len(mixed_train_imgs)):
        Meta_NN.train(np.sort(Digit_NN.run(mixed_train_imgs[i]).T),
                      mixed_train_labels[i])

    #Display Statistics for Meta
    #TODO: Investigate whether this has redundant code
    corrects, wrongs = Meta_NN.metaEval(Digit_NN, mixed_train_imgs,
                                        mixed_train_labels)
    train_accuracy = corrects / (corrects + wrongs)
    print("Train Accuracy: ", train_accuracy)
    print("Train Confusion Matrix: ")
    print(
        Meta_NN.meta_confusion_matrix(Digit_NN, mixed_train_imgs,
                                      mixed_train_labels, mixed_train_values))

    corrects, wrongs = Meta_NN.metaEval(Digit_NN, mixed_test_imgs,
                                        mixed_test_labels)
    test_accuracy = corrects / (corrects + wrongs)
    print("Test Accuracy: ", test_accuracy)
    print("Test Confusion Matrix: ")
    print(
        Meta_NN.meta_confusion_matrix(Digit_NN, mixed_test_imgs,
                                      mixed_test_labels, mixed_test_values))

    return train_accuracy, test_accuracy, Digit_NN, Meta_NN
Esempio n. 29
0
def double_cross_validation(workers: int,
                            testFolder: int,
                            nFolder: int,
                            dataSet,
                            f: ActivFunct,
                            learnRate: list,
                            momRate: list,
                            regRate: list,
                            ValMax: list,
                            HiddenUnits: list,
                            OutputUnits: list,
                            MaxEpochs: list,
                            Tolerance: list,
                            startTime,
                            errorFunct=None,
                            modeLearn: ModeLearn = ModeLearn.BATCH,
                            miniBatchDim=None,
                            errorVlFunct=None,
                            hiddenF: ActivFunct = None):
    if testFolder <= 1:
        raise ValueError("Wrong value of num. folders inserted")

    cp = dataSet.copy()

    #Rimescolo il data set.
    rnd.shuffle(cp)

    #Costruisco la sottolista dei dati divisibile esattamente per testFolder.
    h = len(cp) - len(cp) % testFolder
    dataSetExact = cp[0:h]

    #Creo la lista dei folders.
    folderDim = int(len(dataSetExact) / testFolder)
    folders = [
        cp[i * folderDim:(i + 1) * folderDim] for i in range(testFolder)
    ]

    #Inserisco gli elementi di avanzo.
    for i in range(len(dataSet) - h):
        folders[i].append(cp[i + h])

    errList = list()
    for i in range(len(folders)):
        foldersCopy = folders.copy()
        testSet = foldersCopy[i]
        del (foldersCopy[i])

        vlSet = list()
        for j in range(len(foldersCopy)):
            vlSet += foldersCopy[j]

        e = cross_validation(workers,
                             nFolder,
                             modeLearn,
                             vlSet,
                             f,
                             errorFunct,
                             learnRate,
                             momRate,
                             regRate,
                             ValMax,
                             HiddenUnits,
                             OutputUnits,
                             MaxEpochs,
                             Tolerance,
                             startTime,
                             miniBatchDim=miniBatchDim,
                             errorVlFunct=errorVlFunct,
                             hiddenF=hiddenF)
        theta = getBestResult(e)[0]
        nn = NeuralNetwork(vlSet, f, new_hyp=theta, Hiddenf=hiddenF)
        (_, testErr, _, _) = nn.learn(modeLearn, errorFunct, miniBatchDim,
                                      testSet)
        errList.append(testErr[-1])

    return 1 / testFolder * sum(errList)
Esempio n. 30
0
def k_fold_CV_single(k: int,
                     dataSet,
                     f: ActivFunct,
                     theta,
                     errorFunct=None,
                     modeLearn: ModeLearn = ModeLearn.BATCH,
                     miniBatchDim=None,
                     errorVlFunct=None,
                     hiddenF: ActivFunct = None):
    if k <= 0:
        raise ValueError("Wrong value of num. folders inserted")

    cp = dataSet.copy()

    #Rimescolo il data set.
    rnd.shuffle(cp)

    #Costruisco la sottolista dei dati divisibile esattamente per k.
    h = len(cp) - len(cp) % k
    dataSetExact = cp[0:h]

    #Creo la lista dei folders.
    folderDim = int(len(dataSetExact) / k)
    folders = [cp[i * folderDim:(i + 1) * folderDim] for i in range(k)]

    #Inserisco gli elementi di avanzo.
    for i in range(len(cp) - h):
        folders[i].append(cp[i + h])

    errore = list()

    #per stampare l'errore sul traininig set e sul validation set
    trErrorPlot = list()
    vlErrorPlot = list()

    for i in range(len(folders)):
        lcopy = folders.copy()
        del (lcopy[i])

        vlSet = folders[i]
        trSet = list()
        for j in range(len(lcopy)):
            trSet += lcopy[j]
        nn = NeuralNetwork(trSet, f, theta, Hiddenf=hiddenF)
        (trErr, vlErr, trAcc, vlAcc) = nn.learn(modeLearn,
                                                errorFunct,
                                                miniBatchDim,
                                                vlSet,
                                                errorVlFunct=errorVlFunct)
        trErrorPlot.append(trErr)
        vlErrorPlot.append(vlErr)

        errore.append(nn.getError(vlSet, 1 / len(vlSet), errorVlFunct))

    err = sum(errore) / k

    #controllo che tutti gli errorPlot abbiano la stessa lunghezza
    maxLen = len(trErrorPlot[0])
    for i in range(1, len(trErrorPlot)):
        if len(trErrorPlot[i]) > maxLen:
            maxLen = len(trErrorPlot[i])

    for i in range(len(trErrorPlot)):
        if len(trErrorPlot[i]) < maxLen:
            for j in range(maxLen - len(trErrorPlot[i])):
                trErrorPlot[i].append(trErrorPlot[i][-1])
                vlErrorPlot[i].append(vlErrorPlot[i][-1])

    trErrorArray = np.array(trErrorPlot[0])
    vlErrorArray = np.array(vlErrorPlot[0])

    for i in range(1, len(trErrorPlot)):
        trErrorArray = trErrorArray + np.array(trErrorPlot[i])
    trErrorArray = trErrorArray / k

    for i in range(1, len(vlErrorPlot)):
        vlErrorArray = vlErrorArray + np.array(vlErrorPlot[i])
    vlErrorArray = vlErrorArray / k

    return (err, trErrorArray, vlErrorArray)
def genetic_algorithm(training_inputs, training_groundtruth, test_inputs, test_groundtruth,
                     num_population,times, invasion, hidden_nodes, lr, lr_decay, mf, batch_size, epoch):
    
    '''
    In this function, I will perform the genetic_algorithm to find out the best combination of hyperparameters for the training.
    [training_inputs, training_groundtruth, test_inputs, test_groundtruth] is a training, testing set which are obtained for evalute the training performance.
    num_population: the number of random candidates with the random number of feathers.
    times: total times of "cross" for the parents to exchange the features which uis aiming at getting 'progeny" with the better performance.
    invasion: number of new "invasion" individuals, besides the progeny got from the 'cross', for each time I also introduce some new individuals with different features, 
    which is aimming at making the whole population get more information for optimization.
    
    [hidden_nodes, lr, lr_decay, mf, batch_size, epoch] are the six features i planned to use as the hyperparameter features that needs to be optimized
    They are all put in as the two-element list, which represents the range. In the function, I will use rand funvtion to generate a random number between the range.
    '''
    
    
    # generate the parents population
    print('generating '+ str(num_population)+' individuals')
    
    # makeing sure the input are correct
    assert hidden_nodes[0] < hidden_nodes[1], 'something went wrong!'
    assert lr[0] < lr[1], 'something went wrong!'
    assert lr_decay[0] < lr_decay[1], 'something went wrong!'
    assert mf[0] < mf[1], 'something went wrong!'
    assert batch_size[0] < batch_size[1], 'something went wrong!'
    assert epoch[0] < epoch[1], 'something went wrong!'
    

    # generating a bunch of individuals based on the range that provided
    individuals_genom = []
    individuals_phyno = []
    for i in range(num_population):
        # randonly generate the feature
        _hidden_nodes = int(rand(hidden_nodes[0],hidden_nodes[1]))
        _lr = rand(lr[0], lr[1])
        _lr_decay = rand(lr_decay[0], lr_decay[1])
        _mf = rand(mf[0],mf[1])
        _batch_size = int(rand(batch_size[0],batch_size[1] ))
        _epoch = int(rand(epoch[0], epoch[1]))
        
        # build an individual and put it into the whole set
        individuals_genom.append( [_hidden_nodes, _lr, _lr_decay, _mf, _batch_size, _epoch])
        NN = NeuralNetwork(input_layer=68, hidden_layer= _hidden_nodes, output_layer=1,
                           lr = _lr, lr_decay= _lr_decay, iteration= _epoch,
                           batch_size= _batch_size, mf= _mf)
        NN.make_weights()
        NN.train(training_inputs, training_groundtruth)
        # also store the individual's performance in a list vector
        individuals_phyno.append(AUROC_cruve(NN, test_inputs, test_groundtruth, Fig=False))
        
        
    # take the best performance people and keep it for the next generation
    my_phyno = max(individuals_phyno)
    idx = individuals_phyno.index(my_phyno)
    my_genome = individuals_genom[idx]
    
    n = invasion
    # begin the cross, do N times of cross
    for t in range(times):
        print('For the time '+str(t)+' the best candidates and the best result is')
        print(my_genome)
        print(my_phyno)
      
        if t >=1:   
            if len(individuals_genom) % 2 == 0:  # add the new invasion people,
                                                 # make sure that the number is even 
                add = n
            else:
                add = n+1
            for i in range(add):
        
                _hidden_nodes = int(rand(hidden_nodes[0],hidden_nodes[1]))
                _lr = rand(lr[0] , lr[1])
                _lr_decay = rand(lr_decay[0], lr_decay[1])
                _mf = rand(mf[0],mf[1])
                _batch_size = int(rand(batch_size[0],batch_size[1] ))
                _epoch = int(rand(epoch[0], epoch[1]))

                individuals_genom.append( [_hidden_nodes, _lr, _lr_decay, _mf, _batch_size, _epoch])
                NN = NeuralNetwork(input_layer=68, hidden_layer= _hidden_nodes, output_layer=1,
                                       lr = _lr, lr_decay= _lr_decay, iteration= _epoch,
                                       batch_size= _batch_size, mf= _mf)
                
                
                NN.make_weights()
                NN.train(training_inputs, training_groundtruth)
                individuals_phyno.append(AUROC_cruve(NN, test_inputs, test_groundtruth, Fig=False))
        #   
        next_generation_genom = []  
        next_generation_phyno = []
        next_generation_genom.append(my_genome)
        next_generation_phyno.append(my_phyno)
        assert len(individuals_genom) %2 == 0, 'something wrong!'
        for i in range(int(len(individuals_genom)/2)):

            child_genom, child_phyno = cross(individuals_genom[i*2],individuals_genom[ i*2 +1],
                                             training_inputs, training_groundtruth, test_inputs, test_groundtruth)
            # get the child, put the child into the next generation
            next_generation_genom.append(child_genom)
            next_generation_phyno.append(child_phyno)
          
        # next is now the parents
        individuals_genom = next_generation_genom
        individuals_phyno = next_generation_phyno
        # shift it randomly
        index = list(range(len(individuals_genom)))
        random.shuffle(index)
        individuals_phyno = [individuals_phyno[x] for x in index]
        individuals_genom = [individuals_genom[x] for x in index]
        # still get the best performance
        my_phyno = max(individuals_phyno)
        idx = individuals_phyno.index(my_phyno)
        my_genome = individuals_genom[idx]
    
    print('The whole crossing process is done')
    my_phyno = max(individuals_phyno)
    idx = individuals_phyno.index(my_phyno)
    my_genome = individuals_genom[idx]   
    print('the best candidates and the best result is')
    print(my_genome)
    print(my_phyno)
        
def cross(Father, Mother, training_inputs, training_groundtruth, test_inputs, test_groundtruth ):
    '''
    This cross function is used for the genetic algorithm optimization. the inputs are two individuals, and will return a "child" with a better training performance.
    This function will first generate two children. And for each child's one single feature, it will either be from the 'mother' or the 'father'
    And then a quick training and testing process will be used to evalue the AUROC score.
    And the 'child' with a better performance will be returned.
    '''
    Child_1 = []
    Child_2 = []
    for i in range(6):
        coin = rand(-1,1)
        if coin>= 0:
            Child_1.append(Father[i])
            Child_2.append(Mother[i])
        else: 
            Child_1.append(Mother[i])
            Child_2.append(Father[i])
    Child_1 = mutate(Child_1)
    Child_2 = mutate(Child_2)
    

    
    # build the network, make weights, and train it
    NN_1 = NeuralNetwork(input_layer=68, hidden_layer= Child_1[0], output_layer=1,
                            lr = Child_1[1], lr_decay= Child_1[2], iteration= Child_1[5],
                            batch_size= Child_1[4], mf= Child_1[3])
        
    NN_2 = NeuralNetwork(input_layer=68, hidden_layer= Child_2[0], output_layer=1,
                            lr = Child_2[1], lr_decay= Child_2[2], iteration= Child_2[5],
                            batch_size= Child_2[4], mf= Child_2[3])
        
    NN_1.make_weights()
    NN_2.make_weights()

    NN_1.train(training_inputs, training_groundtruth)
    NN_2.train(training_inputs, training_groundtruth)
        
    Score_1 = AUROC_cruve(NN_1, test_inputs, test_groundtruth, Fig=False)
    Score_2 = AUROC_cruve(NN_2, test_inputs, test_groundtruth, Fig=False)
    if Score_1 > Score_2:
        return Child_1, Score_1
    else: 
        return Child_2, Score_2
Esempio n. 33
0
from NN import NeuralNetwork
from Normalizer import Normalizer
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

cancer = load_breast_cancer()
X = cancer['data']
y = cancer['target']
length = len(cancer['feature_names'])

nn = NeuralNetwork([length, 30, 20, 10, 5, 1])

X_train, X_test, y_train, y_test = train_test_split(X, y)

normalize = Normalizer()
normalize.fit(X_train)
X_train = normalize.transform(X_train)
X_test = normalize.transform(X_test)

nn.fit(X_train, y_train, epochs=1000, verbose=False)
predictions = nn.predict(X_test)
print(nn.cost(predictions, y_test))
Esempio n. 34
0
import pandas
import pygame
import numpy as np
from pygame.locals import *
from NN import NeuralNetwork

side = 28 * 8
step = 8

screen = pygame.display.set_mode((side, side))
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30)
running = 1
numberIndex = 0

brain = NeuralNetwork(784, 100, 10, 0.3)
brain.readState('digit-recognizer.state.json')
numbers = pandas.read_csv('digit-recognizer/test.csv')

numberIndex = 0


def drawNumber(index):
    number = numbers.iloc[index]

    pixelX = 0
    pixelY = 0
    for (columnName, color) in number.iteritems():
        if (pixelX == 28):
            pixelX = 0
            pixelY += 1
Esempio n. 35
0
def do_robust_graphs(params, nn):
    if params["Diffusion"]["Type"] == "LV":
        parameters = ["X", "A", "B", "Rho", "M", "Sigma"]
    elif params["Diffusion"]["Type"] == "Bachelier":
        parameters = ["X", "Sigma"]

    for parameter in parameters:
        base_value = params["Diffusion"][parameter]
        number_of_steps = 20
        step_size = base_value * 0.8 / number_of_steps

        sigma_array_adaptive = np.full(number_of_steps, 0.)
        sigma_array_mc = np.full(number_of_steps, 0.)
        base_values_array = np.full(number_of_steps, 0.)

        ratio = params["General"]["Run"]["RatioRobustGraphs"]

        for step in range(number_of_steps):
            cur_parameter = base_value * 0.6 + step * step_size
            base_values_array[step] = cur_parameter
            if parameter == "X":
                nn.params["Diffusion"]["X"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["X"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

            if parameter == "A":
                nn.params["Diffusion"]["A"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["A"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

            if parameter == "B":
                nn.params["Diffusion"]["B"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["B"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

            if parameter == "M":
                nn.params["Diffusion"]["M"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["M"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

            if parameter == "Rho":
                nn.params["Diffusion"]["Rho"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["Rho"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

            if parameter == "Sigma":
                nn.params["Diffusion"]["Sigma"] = cur_parameter
                av_price, glob_std = nn.eval(
                    params["Run"]["NumberOfBatchesForEval"] // ratio,
                    doing_robust_graph=True)
                mc_price, mc_std = nn.monte_carlo_price(
                    params["Run"]["NumberOfBatchesForEval"] // ratio)
                sigma_array_adaptive[step] = glob_std
                sigma_array_mc[step] = mc_std
                nn.params["Diffusion"]["Sigma"] = base_value
                print("Finished step=", step,
                      "of do_robust_graphs with parameter", parameter)

        fig, ax1 = plt.subplots()
        ax1.plot(base_values_array,
                 sigma_array_mc,
                 color='tab:blue',
                 linestyle=":")
        ax1.tick_params(axis="y", labelcolor="tab:blue")
        ax1.plot(base_values_array,
                 sigma_array_adaptive,
                 color="tab:red",
                 linestyle="-")
        ax1.set_ylim(bottom=0.0)

        ax2 = ax1.twinx()
        ax2.plot(base_values_array,
                 sigma_array_adaptive / sigma_array_mc,
                 color="tab:orange",
                 linestyle="--")
        ax2.set_ylim(0.0, 1.5)
        ax2.tick_params(axis="y", labelcolor="tab:orange")

        if params["Graph"]["ShowGraph"]:
            plt.show()

        if params["Graph"]["Save"]:
            conf_type, diffusion_type = NeuralNetwork.get_conf_and_diffusion_types(
                params["Conf"], params["Diffusion"]["Type"])
            a_type = "a_" + params["a"]
            file_path = "./figures/" + conf_type + "/" + diffusion_type + "/" + a_type + "/graph_" + conf_type + "_" + diffusion_type + "_robust_" + parameter + ".png"
            directory = os.path.dirname(file_path)
            if not os.path.exists(directory):
                os.makedirs(directory)
            fig.savefig(file_path)
        plt.close()

    return