def task_Seeds():
    hidden_nodes = 2
    epochs = 1500
    learning_rate = 0.01
    momentum = 0.1
    bias = True
    network = NeuralNetwork(input_nodes=7,
                            hidden_nodes=hidden_nodes,
                            output_nodes=3,
                            learning_rate=learning_rate,
                            momentum=momentum,
                            bias=bias,
                            epochs=epochs)

    input_list = initialize_data_with1stcolumn("seeds_dataset.csv")
    output = []
    indices = []
    print(input_list)
    for i in range(int(len(input_list) / 3)):
        output.append([[1, 0, 0], input_list[i]])
        indices.append(0)
    for i in range(int(len(input_list) / 3)):
        output.append([[0, 1, 0], input_list[i + int(len(input_list) / 3)]])
        indices.append(1)
    indices.append(1)
    for i in range(int(len(input_list) / 3)):
        output.append([[0, 0, 1],
                       input_list[i + int(len(input_list) / 3) * 2]])
        indices.append(2)
    indices.append(2)

    print(output)

    shuffled_output = copy.copy(output)
    random.shuffle(shuffled_output)

    for i in range(epochs):
        for e in range(len(shuffled_output)):
            network.train_manual_epochs(shuffled_output[e][1],
                                        shuffled_output[e][0], i, e == 0)

    numpy.set_printoptions(suppress=True)  # avoid e-05 notation

    fin = []
    for i in range(len(input_list)):
        fin.append(network.query(input_list[i]))

    error = 0
    for elem in range(len(fin)):
        if numpy.argmax(fin[elem]) != indices[elem]:
            error += 1

    print("Seeds error rate = " + str(error / len(fin) * 100) + "%")

    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum,
                                      epochs, bias)
    calculate_results_table(3, indices, fin,
                            'Seeds result table \n' + parameters)
    print_plot(network.sampling_iteration, network.errors_for_plot,
               'Seeds error plot\n' + parameters)
Пример #2
0
def task_Seeds():
    numpy.set_printoptions(suppress=True)  # avoid e-05 notation
    hidden_nodes = 4
    epochs = 1000
    learning_rate = 0.2
    momentum = 0.9
    bias = False
    network = NeuralNetwork(input_nodes=7, hidden_nodes=hidden_nodes, output_nodes=3,
                            learning_rate=learning_rate, momentum=momentum, bias=bias, epochs=epochs)

    input_list = initialize_data_with1stcolumn("data/seeds.csv")
    output = []
    indices = []
    test = []
    test = input_list[39:69] + input_list[109:139] + input_list[179:209]

    for i in range(int(len(input_list) / 3)):
        output.append([[1, 0, 0], input_list[i]])
    for i in range(int(len(input_list) / 3)):
        output.append([[0, 1, 0], input_list[i + int(len(input_list) / 3)]])
    for i in range(int(len(input_list) / 3)):
        output.append([[0, 0, 1], input_list[i + int(len(input_list) / 3) * 2]])

    train = []
    train = output[:39] + output[69:109] + output[139:179]

    for i in range(0,30):
        indices.append(0)
    for i in range(30,60):
        indices.append(1)
    for i in range(60,90):
        indices.append(2)

    shuffled_output = copy.copy(train)
    randomize = np.arange(len(train))

    for i in range(len(train)):
        shuffled_output[i] = shuffled_output[randomize[i]]

    for i in range(epochs):
        for e in range(len(shuffled_output)):
            network.train_manual_epochs(shuffled_output[e][1], shuffled_output[e][0], i, e == 0)

    fin = []
    for i in range(len(test)):
        fin.append(network.query(test[i]))

    error = 0
    for elem in range(len(fin)):
        if numpy.argmax(fin[elem]) != indices[elem]:
            error += 1

    print("Seeds error rate = " + str(error / len(fin) * 100) + "%")

    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum, epochs, bias)
    calculate_results_table(3, indices, fin, 'Seeds result table \n' + parameters)
    print_plot(network.sampling_iteration, network.errors_for_plot, 'Zbiór: nasiona\n Wykres zależności wartości błędu od ilości epok\n' + parameters)
Пример #3
0
def task_SISE():
    epochs = 200
    hidden_nodes = 5
    learning_rate = 0.2
    momentum = 0.9
    bias = True
    network = NeuralNetwork(input_nodes=1,
                            hidden_nodes=hidden_nodes,
                            output_nodes=1,
                            learning_rate=learning_rate,
                            momentum=momentum,
                            bias=bias,
                            epochs=epochs)

    input_list = list(range(1, 100))
    output = []
    for var in input_list:
        output.append(math.sqrt(var))

    c = list(zip(input_list, output))

    # shuffle in the same way
    random.shuffle(c)

    input_list, output = zip(*c)

    input_norm = normalize(input_list)
    output_norm = normalize(output)
    for e in range(1000):
        for i in range(len(input_norm)):
            network.train_manual_epochs([input_norm[i]], [output_norm[i]],
                                        e)  # inputs need to be arrays

    fin = []
    query_list = []
    for i in range(len(input_norm)):
        query_list.append(input_norm[i])
        fin.append(network.query([query_list[i]]))

    numpy.set_printoptions(suppress=True)  # avoid e-05 notation
    print(output)

    results = denormalize(output, fin)
    for var in results:
        print(var)

    result_tab = numpy.zeros(shape=(len(input_norm), len(input_norm)))

    for i in range(len(fin)):
        result_tab[numpy.argmax(query_list[i])][numpy.argmax(fin[i])] += 1

    print('Ex 1 results table')
    print(result_tab)
    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum,
                                      epochs, bias)
    funs.print_plot(network.sampling_iteration, network.errors_for_plot,
                    'Ex1 error plot ' + parameters)
Пример #4
0
def task_2():
    epochs = 200
    hidden_nodes = 10
    learning_rate = 0.2
    momentum = 0.9
    bias = True
    network = NeuralNetwork(input_nodes=4,
                            hidden_nodes=hidden_nodes,
                            output_nodes=4,
                            learning_rate=learning_rate,
                            momentum=momentum,
                            bias=bias,
                            epochs=epochs)

    input_list = [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]
    output = [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]

    x = numpy.zeros(epochs * 4, dtype=int)
    for i in range(epochs):
        x[i] = 0
        x[i + epochs] = 1
        x[i + epochs * 2] = 2
        x[i + epochs * 3] = 3

    random.shuffle(x)

    for e in range(len(x)):
        network.train_manual_epochs(input_list[(x[e])], output[(x[e])], e)

    fin = []
    query_list = []
    for i in range(64):
        query_list.append(input_list[x[i]])
        fin.append(network.query(query_list[i]))

    numpy.set_printoptions(suppress=True)  # avoid e-05 notation
    print(output)

    for elem in range(len(fin)):
        print(fin[elem])

    result_tab = numpy.zeros(shape=(len(input_list), len(input_list)))

    for i in range(len(fin)):
        result_tab[numpy.argmax(query_list[i])][numpy.argmax(fin[i])] += 1

    print('Ex 1 results table')
    print(result_tab)
    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum,
                                      epochs, bias)
    funs.print_plot(network.sampling_iteration, network.errors_for_plot,
                    'Ex1 error plot ' + parameters)
Пример #5
0
def task_SISE():
    epochs = 2000
    hidden_nodes = 5
    learning_rate = 0.2
    momentum = 0.9
    bias = False
    network = NeuralNetwork(input_nodes=1,
                            hidden_nodes=hidden_nodes,
                            output_nodes=1,
                            learning_rate=learning_rate,
                            momentum=momentum,
                            bias=bias,
                            epochs=epochs)

    input_list = []
    input_list = list(range(1, 100))
    output = []
    for var in input_list:
        output.append(math.sqrt(var))

    c = list(zip(input_list, output))

    # shuffle in the same way
    random.shuffle(c)

    input_list, output = zip(*c)

    input_norm = normalize(input_list)
    output_norm = normalize(output)
    for e in range(epochs):
        for i in range(len(input_norm)):
            network.train_manual_epochs([input_norm[i]], [output_norm[i]],
                                        e)  # inputs need to be arrays

    fin = []
    query_list = []
    for i in range(len(input_norm)):
        query_list.append(input_norm[i])
        fin.append(network.query([query_list[i]]))

    numpy.set_printoptions(suppress=True)  # avoid e-05 notation
    print(output)

    results = denormalize(output, fin)
    for e in range(len(results)):
        print(input_list[e], results[e])

    result_tab = numpy.zeros(shape=(len(input_norm), len(input_norm)))

    for i in range(len(fin)):
        result_tab[numpy.argmax(query_list[i])][numpy.argmax(fin[i])] += 1

    print('Results table')
    print(result_tab)
    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum,
                                      epochs, bias)
    funs.print_plot(network.sampling_iteration, network.errors_for_plot,
                    'Error plot ' + parameters)

    flat_results = [item for sublist in results for item in sublist]
    flat_input = list(input_list)
    flat_output = list(output)
    title = "Expected(green) and calculated(red) values"
    pyplot.figure(title)
    pyplot.plot(flat_input, flat_results, 'r.')
    pyplot.plot(flat_input, flat_output, 'g.')
    pyplot.grid(axis='both',
                color='black',
                which='major',
                linestyle='--',
                linewidth=1)
    pyplot.xlabel("x")
    pyplot.ylabel("sqrt(x)")
    pyplot.suptitle(title)
    pyplot.savefig('plotValues.png')
    pyplot.show()
def task_MNIST():
    numpy.set_printoptions(suppress=True)  # avoid e-05 notation
    hidden_nodes = 10
    epochs = 3
    learning_rate = 0.05
    momentum = 0.2
    bias = True
    network = NeuralNetwork(input_nodes=784,
                            hidden_nodes=hidden_nodes,
                            output_nodes=10,
                            learning_rate=learning_rate,
                            momentum=momentum,
                            bias=bias,
                            epochs=epochs,
                            error_sampling_rate=(1 / epochs))

    # load the mnist training data CSV file into a list
    training_data_file = open("mnist_train.csv", 'r')
    training_data_list = training_data_file.readlines()
    training_data_file.close()
    # train the neural network
    # go through all records in the training data set
    for e in range(epochs):
        print("Epoch:" + str(e))
        collect_data_for_plot = True
        for record in training_data_list:
            # split the record by the ',' commas
            all_values = record.split(',')
            # scale and shift the inputs
            inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
            # create the target output values (all 0.01, except the desired    label    which is 0.99)
            targets = numpy.zeros(10) + 0.01
            # all_values[0] is the target label for this record
            targets[int(all_values[0])] = 0.99
            network.train_manual_epochs(inputs.tolist(), targets, e,
                                        collect_data_for_plot)
            collect_data_for_plot = False

    # load the mnist test data CSV file into a list
    test_data_file = open("mnist_test.csv", 'r')
    test_data_list = test_data_file.readlines()
    test_data_file.close()

    error = 0
    # test the neural network
    # scorecard for how well the network performs, initially empty
    scorecard = []
    # go through all the records in the test data set
    fin = []
    values = []
    for record in test_data_list:
        # split the record by the ',' commas
        all_values = record.split(',')
        # correct answer is first value
        correct_label = int(all_values[0])
        values.append(correct_label)
        # print(correct_label, "correct label")
        # scale and shift the inputs
        inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01

        # query the network
        outputs = network.query(inputs.tolist())
        fin.append(outputs)
        # the index of the highest value corresponds to the label
        label = numpy.argmax(outputs)
        # print(label, "network's answer")
        # append correct or incorrect to list
        if label == correct_label:
            # network's answer matches correct answer, add 1 to        scorecard
            error += 1

    print("MNIST accuracy = " + str(error / 10000 * 100) + "%")

    parameters = parameters_as_string(hidden_nodes, learning_rate, momentum,
                                      epochs, bias)
    calculate_results_table(10, values, fin,
                            'MNIST result table\n' + parameters)
    print_plot(network.sampling_iteration, network.errors_for_plot,
               'MNIST error plot \n' + parameters)