Beispiel #1
0
from neural_network.activations import *
from neural_network.data_manipulation import load_easy_data
from neural_network.plots import plot_data_2d, plot_measure_results_data
from neural_network.learn_network import learn_network
import matplotlib.pyplot as plt

# load dataset
train, test = load_easy_data()

# x and y - observations and values for them
x = train[:, 0:2]
y = train[:, 2:3]

# plot data classes
plt.figure(figsize=(12.8, 9.6))
plt.subplot(221)
plot_data_2d(x[:, 0], x[:, 1], y[:, 0], title='True classes of points on the plane', show=False)

# learn model and plot result classes
plt.subplot(222)
mse_linear = learn_network(x, y, [20], [sigmoid, linear], iterations=100, regression=False, plot_title='Predicted classes for linear function', plot_show=False)
plt.subplot(223)
mse_softmax = learn_network(x, y, [20], [sigmoid, softmax], iterations=100, regression=False, plot_title='Predicted classes for softmax function', plot_show=False)

plt.subplot(224)
plot_measure_results_data([mse_linear, mse_softmax], labels=['linear', 'softmax'], title_base="Accuracy", ylabel="Accuracy", title_ending=" for last layer activation function", show=False)
plt.show()
Beispiel #2
0
            regularization_type=reg_type,
            plot_title="Prediction with sigmoid activation function and " +
            str(i) + " hidden layers",
            plot=False,
            return_result=True,
            x_test=x_test,
            y_test=y_test,
            use_test_and_stop_learning=True,
            no_change_epochs_to_stop=no_change_epochs_to_stop)
        mses.append(mse_reg)
        results.append(res_reg)

plt.figure(figsize=(12.8, 4.8))
plt.subplot(121)
plot_data_1d_compare(
    x,
    y,
    results,
    labels=["true"] + labels,
    title=
    "Comparison of different regularization methods on multimodal sparse dataset",
    show=False)
# plot from 5th iteration to better see differences
measures = add_the_same_value_to_the_same_length(mses)
plt.subplot(122)
plot_measure_results_data(measures,
                          labels=labels,
                          title_ending=" for multimodal sparse dataset",
                          from_error=50)
plt.show()
Beispiel #3
0
mse_Xavier, Xavier = learn_network(x,
                                   y, [20], [sigmoid, linear],
                                   initialize_weights='Xavier',
                                   iterations=100,
                                   momentum_type='normal',
                                   plot=False,
                                   return_result=True)

labels = ['Zero weights', 'Uniform distribution', 'Xavier']

plot_data_1d_compare(x,
                     y, [zeros, normal, Xavier],
                     labels=["true"] + labels,
                     title="Comparison of initialization weights")
plot_measure_results_data([mse_zeros, mse_normal, mse_Xavier],
                          labels=labels,
                          title_ending=" for initialization weights",
                          from_error=5)

# batch size experiment
mse_all, res_all = learn_network(x,
                                 y, [20], [sigmoid, linear],
                                 initialize_weights='Xavier',
                                 eta=0.00001,
                                 batch_size=10000,
                                 iterations=100,
                                 momentum_type='normal',
                                 plot=False,
                                 return_result=True)
mse_batch, batch = learn_network(x,
                                 y, [20], [sigmoid, linear],
                                 initialize_weights='Xavier',
Beispiel #4
0
            x,
            y,
            neurons[:i], [activation] * i + [softmax],
            beta=0.01,
            eta=0.01,
            epochs=1,
            iterations=iterations,
            regression=False,
            regularization_lambda=reg_lambda,
            regularization_type=reg_type,
            plot_title="Prediction with sigmoid activation function and " +
            str(i) + " hidden layers",
            plot=True,
            return_result=False,
            x_test=x_test,
            y_test=y_test,
            use_test_and_stop_learning=True,
            no_change_epochs_to_stop=no_change_epochs_to_stop)
        mses.append(mse_reg)

# plot from 5th iteration to better see differences
measures = add_the_same_value_to_the_same_length(mses)
plt.subplot(122)
plot_measure_results_data(measures,
                          labels=labels,
                          title_base="accuracy",
                          ylabel="accuracy",
                          title_ending=" for different regularization methods",
                          from_error=200)
plt.show()
Beispiel #5
0
    best_results = []
    gen = Genetic(500, 5, eval_function, mutation_coef=1, mutation_percentage=mutation_percentage)
    for i in range(100):
        gen.learn_population(epochs=1)
        best_results.append(gen.get_best()[0])
    mses1.append(best_results)
    print(f'Ended {mutation_percentage}')

mses2 = []
mutation_coefs = [0.1, 0.2, 0.5, 1]
for mutation_coef in mutation_coefs:
    best_results = []
    gen = Genetic(500, 5, eval_function, mutation_coef=mutation_coef, mutation_percentage=0.3)
    for i in range(100):
        gen.learn_population(epochs=1)
        best_results.append(gen.get_best()[0])
    mses2.append(best_results)
    print(f'Ended {mutation_coef}')

# plot results
title = "Function value "
y_label = "Logarithm of function value "
labels = ["Population size = " + str(val) for val in population_size]
plot_measure_results_data(mses0, title_base=title, ylabel=y_label, labels=labels, from_error=0, y_log=True)

labels = ["Mutation percentage = " + str(val) for val in mutation_percentages]
plot_measure_results_data(mses1, title_base=title, ylabel=y_label, labels=labels, from_error=0, y_log=True)

labels = ["Mutation coefficient = " + str(val) for val in mutation_coefs]
plot_measure_results_data(mses2, title_base=title, ylabel=y_label, labels=labels, from_error=0, y_log=True)
Beispiel #6
0
        str(i) + " hidden layers",
        plot=False)
    mse_tanh = learn_network(
        x,
        y,
        neurons[:i], [tanh] * i + [softmax],
        beta=0.01,
        eta=0.01,
        epochs=1,
        iterations=1000,
        regression=False,
        plot_title="Prediction with tanh activation function and " + str(i) +
        " hidden layers",
        plot=False)

    plt.subplot(220 + i + 1)
    plot_measure_results_data([mse_linear, mse_ReLU, mse_sigmoid, mse_tanh],
                              labels=['linear', 'ReLU', 'sigmoid', 'tanh'],
                              title_base="accuracy",
                              ylabel="accuracy",
                              title_ending=" for " + str(i) +
                              " layers networks",
                              show=False,
                              from_error=5)
plt.show()
# Results
# The best activation is tanh, then (ReLU and sigmoid)*, linear is the worst one
# * for bigger networks sigmoid is better (and as good as tanh) than ReLU and for 3 layers ReLU had nan in gradient and then it fails
# Linear activation is the worst for any number of layers
# The more layers the better results achieve networks
Beispiel #7
0
                15,
                score_type,
                use_speciation=speciation)

    for i in range(50):
        print(f'Start of epoch: {i+1}')
        neat.epoch(i)

    scores.append(neat.best_scores)
    accuracies.append(neat.best_accuracies)

    best = neat.get_best_population("accuracy")[0]
    res = neat.population[best].evaluate(copy.deepcopy(data))
    results = np.vstack([res[2], res[3], res[4]])
    y_predicted = np.argmax(results, axis=0)
    plt.subplot(2, 2, index)
    plot_data_2d(x[:, 0],
                 x[:, 1],
                 y_predicted,
                 title=labels[index - 2],
                 show=False)

plt.show()

# plot_measure_results_data(scores, title_base="Score ", ylabel="Score ", labels=labels, from_error=0, y_log=True)
plot_measure_results_data(accuracies,
                          title_base="Accuracy ",
                          ylabel="Accuracy ",
                          labels=labels,
                          from_error=0)
Beispiel #8
0
        indexes = np.argsort(cut.evaluate())
        scores = []
        for i in range(50):
            cut.learn_population(1)
            indexes = np.argsort(cut.evaluate())
            res = cut.__evaluate_one__(indexes[0], return_area=False)
            scores.append(res / cut.best_rectangle_score_per_unit / circle)
        Ascores1.append(scores)
    AAscores1.append(Ascores1)

plt.figure(figsize=(12.8, 14.4))
for i, Ascores1 in enumerate(AAscores1):
    plt.subplot(3, 2, i + 1)
    plot_measure_results_data(Ascores1,
                              title_base=f"Score for R = {r_labels[i]}",
                              labels=labels1,
                              ylabel="Score normalized",
                              show=False)
plt.show()

AAscores2, labels2, r_labels = [], [], []
for r in [800, 850, 1000, 1100, 1200]:
    r_labels.append(str(r))
    Ascores1 = []
    for mutation_percentage in [0.1, 0.2, 0.3, 0.5]:
        labels2.append(f'Mutation percentage = {mutation_percentage}')
        cut = CuttingStock(r, "r" + str(r) + ".csv", n=1500)
        circle = r * r * math.pi
        indexes = np.argsort(cut.evaluate())
        scores = []
        for i in range(50):
Beispiel #9
0
        " hidden layers",
        plot=False,
        return_result=True)

    plt.subplot(420 + 2 * i - 1)
    plot_data_1d_compare(x,
                         y, [res_linear, res_ReLU, res_sigmoid, res_tanh],
                         labels=["true"] + labels,
                         title="Comparison of activation functions for " +
                         str(i) + " hidden layers networks",
                         show=False)
    # plot from 5th iteration to better see differences
    plt.subplot(420 + 2 * i)
    plot_measure_results_data([mse_linear, mse_ReLU, mse_sigmoid, mse_tanh],
                              labels=labels,
                              title_ending=" for " + str(i) +
                              " layers networks",
                              from_error=5,
                              show=False)
    if i == 3:
        plt.subplot(420 + 2 * i + 2)
        plot_measure_results_data([mse_linear, mse_sigmoid, mse_tanh],
                                  labels=labels[:1] + labels[2:4],
                                  colors=['red', 'cyan', 'yellow'],
                                  title_ending=" for " + str(i) +
                                  " layers networks",
                                  from_error=5,
                                  show=False)
plt.show()

# Results
# mainly the same as in steps-large, only differences:
Beispiel #10
0
                             momentum_type='momentum',
                             lambda_momentum=0.9,
                             eta=0.01,
                             epochs=1,
                             iterations=100,
                             plot=False,
                             return_result=True)

# learn model with RMSProp and show model
mse_rms, rms = learn_network(x,
                             y, [20], [sigmoid, linear],
                             momentum_type='RMSProp',
                             beta=0.01,
                             eta=0.1,
                             epochs=1,
                             iterations=100,
                             plot=False,
                             return_result=True)

labels = ['No momentum', 'Momentum', 'RMSProp']

# plot data and mse
plot_data_1d_compare(x,
                     y, [base, mom, rms],
                     labels=["true"] + labels,
                     title="Comparison of momentum learning")
plot_measure_results_data([mse, mse_mom, mse_rms],
                          labels=labels,
                          title_ending=" for momentum learning",
                          from_error=5)