Пример #1
0
def _get_predictions(neural_network: NeuralNetwork, nr_of_elements, nr_of_dimensions):
    """
    Precict ``nr_of_elements`` random elements by ``neural_network``
    :param neural_network: The NeuralNetwork to predict with
    :param nr_of_elements: Amount of elements to predict
    :param nr_of_dimensions: Number of dimensions to reduce preditions to
    :return:
    """
    should_reduce = nr_of_dimensions < neural_network.classcount
    if should_reduce:
        neural_network.save_dimension_reducer(nr_of_dimensions)
    data = {
        "x": [],
        "y": [],
        "true_label": [],
        "filename": [],
    }
    if nr_of_dimensions == 3:
        data["z"] = []
    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
        generator = ImageDataGenerator(rescale=1 / 255).flow_from_directory(
            neural_network.directory,
            color_mode="rgb",
            batch_size=32,
            shuffle=True,
            target_size=(neural_network.img_width, neural_network.img_height),
            follow_links=True,
            class_mode='categorical',
        )

    class_indices = {v: k for k, v in generator.class_indices.items()}
    for batch_index in range(nr_of_elements // 32):
        elements = generator.next()
        true_labels = [_get_labelname(class_indices, label) for label in elements[1]]
        predicted_labels = neural_network.network.predict(elements[0])
        dimensions_reduced = neural_network.dimension_reducer.transform(predicted_labels) \
            if should_reduce else predicted_labels

        data["x"].extend(dimensions_reduced[:, 0])
        data["y"].extend(dimensions_reduced[:, 1])
        if nr_of_dimensions == 3:
            data["z"].extend(dimensions_reduced[:, 2])
        data["true_label"].extend(true_labels)
        filenames = [generator.filenames[generator.index_array[file_index + 32 * batch_index]] for file_index in
                     range(32)]
        data["filename"].extend(filenames)
    return data
def main():
    dataset = Uji()
    OPTIMIZERS = [
        tf.train.RMSPropOptimizer, tf.train.AdamOptimizer,
        tf.train.GradientDescentOptimizer, tf.train.AdagradOptimizer,
        tf.train.AdadeltaOptimizer
    ]
    LEARNING_RATES = [
        0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7
    ]
    TRAINING_EPOCHS = [10, 20, 50, 100, 200, 300, 400, 500, 700, 1000]
    RESULTS = []

    for optimizer in OPTIMIZERS:
        print(optimizer)
        for learning_rate in LEARNING_RATES:
            print("Learning rate: {}".format(learning_rate))
            with NeuralNetwork(input_nodes=dataset.feature_number,
                               hidden_nodes=200,
                               output_nodes=dataset.label_number,
                               learning_rate=learning_rate,
                               batch_size=100,
                               training_epochs=TRAINING_EPOCHS[-1],
                               dropout=0.6,
                               optimizer=optimizer,
                               debug=False) as nn:
                accuracy_results = nn.fit(dataset, TRAINING_EPOCHS)
                for accuracy, training_epoch in zip(accuracy_results,
                                                    TRAINING_EPOCHS):
                    RESULTS.append(
                        tuple([
                            accuracy, optimizer, learning_rate, training_epoch
                        ]))

    RESULTS.sort(key=lambda row: row[0][0], reverse=True)

    with open(os.path.join(BASE_DIR, 'parametr_stats_by_accuracy.txt'),
              'w') as fh:
        for snapshot_results, optimizer, learning_rate, training_epochs in RESULTS:
            fh.write(
                "Accuracy {0} Cost {1} Optimizer {2} Learning Rate {3}, Training Epoches: {4}\n"
                .format(snapshot_results[0], snapshot_results[1], optimizer,
                        learning_rate, training_epochs))

    RESULTS.sort(key=lambda row: row[0][1])

    with open(os.path.join(BASE_DIR, 'parametr_stats_by_cost.txt'), 'w') as fh:
        for snapshot_results, optimizer, learning_rate, training_epochs in RESULTS:
            fh.write(
                "Accuracy {0} Cost {1} Optimizer {2} Learning Rate {3}, Training Epoches: {4}\n"
                .format(snapshot_results[0], snapshot_results[1], optimizer,
                        learning_rate, training_epochs))
Пример #3
0
def main():
    # dataset = Mnist()
    dataset = Uji()
    # joblib.dump(dataset, os.path.join(OBJECT_DIR, "Mnist"))
    joblib.dump(dataset, os.path.join(OBJECT_DIR, "Uji"))
    # Konfiguracja ~ 64 %
    # input_nodes=dataset.feature_number, hidden_nodes=200, output_nodes=dataset.label_number,
    #                  learning_rate=0.01, batch_size=100, training_epochs=350, dropout=0.6,
    #                  optimizer=tf.train.AdamOptimizer, debug=False
    with NeuralNetwork(input_nodes=dataset.feature_number, hidden_nodes=200, output_nodes=dataset.label_number,
                       learning_rate=0.3, batch_size=100, training_epochs=500, dropout=0.6,
                       optimizer=tf.train.AdagradOptimizer, debug=False) as nn:
        nn.fit(dataset)
        nn.save_model()
Пример #4
0
def parse_input(args):
    """
    Parse commandline arguments and create plots
    :param args: Commandline arguments
    """
    filename = args.network.replace('.h5', '')
    directory = args.directory
    nr_of_clusters_to_plot = args.nr_of_clusters_to_plot
    cluster_plot_dimensions = args.cluster_plot_dimension

    network = NeuralNetwork.load(f'{filename}.h5', directory)
    history = None
    if (os.path.exists(f'{filename}.json')):
        with open(f'{filename}.json') as f:
            history = json.loads(f.read().strip('"').replace("'", '"'))
    filename = filename.split(os.sep)[-1]
    if history is not None:
        _plot_accuracy(network, history, filename)
    _plot_clusters(network, nr_of_clusters_to_plot, cluster_plot_dimensions, filename)
Пример #5
0
def evaluate(args):
    results_dt_path = results_path.format(args.evaluate)

    x, y = load_data(args.evaluate)
    n_classes = y.shape[1]

    rs = np.random.RandomState(25)

    # defaults = get_defaults(x, n_classes, rs)

    with open(path.join(results_dt_path, 'best.csv'), 'r') as best:
        content = best.read()

    bests = {}
    for line in content.split('\n'):
        b = line.split(',')
        if len(b) > 1:
            bests[b[0]] = float(b[1])

    bests['n_neurons'] = int(bests['n_neurons'])
    bests['n_layers'] = int(bests['n_layers'])

    r = bests['r']
    a = bests['alpha']
    b = bests['beta']
    bs = int(bests['batchsize'] * x.shape[0])

    w = [weights(bests['n_neurons'], x.shape[1] + 1, rs)]
    w += [weights(bests['n_neurons'], bests['n_neurons'] + 1, rs) for _ in range(bests['n_layers'] - 1)]
    w += [weights(n_classes, bests['n_neurons'] + 1, rs)]

    # # Number of folds to get 20% test set
    k = int(np.ceil(x.shape[0] / (x.shape[0] * 0.2)))

    model = NeuralNetwork(w, r, a, b)
    scores = cross_validate(model, x, y, k, bs, rs)
    mean = np.mean(scores)
    std = np.std(scores)
    print("Mean F1-Score = {} +- {}".format(round(mean, 3), round(std, 3)))
Пример #6
0
from src.loss_function import SquareLoss, CrossEntropy
from src.neural_network import NeuralNetwork
from src.layers import Conv2D, Dense, Activation
import numpy as np
# loss = SquareLoss()

# print(loss.loss(np.random.normal(1,0.2, size=(256,10)), np.ones((256,10))))
# print(type(SquareLoss),type(CrossEntropy)

X = 

model = NeuralNetwork(SquareLoss())
model.add(Conv2D)

Пример #7
0
    if args.randomseed:
        seed = Game.generate_seed()

    if args.multimap:
        seed = []

        for i in range(50):
            seed.append(Game.generate_seed())

        generations = 100

        if args.generations:
            generations = int(args.generations)

        nn = NeuralNetwork(False, seed, generations, True)
        winner = nn.run()

        if not args.noviz:
            nn.visualize_results()

        if args.save:
            with open(args.save, 'wb') as f:
                pickle.dump(winner, f)

    if args.load:
        genome = pickle.load(open(args.load, 'rb'))

        try:
            seed = pickle.load(open(args.load + "_seed", 'rb'))
Пример #8
0
def run(x, y, w, r, a, b, bs, rs):
    model = NeuralNetwork(w, r, a, b)
    return cross_validate(model, x, y, k_fold, bs, rs)
import numpy as np
import time

from src.neural_network import NeuralNetwork

layers = [2, 10, 10, 5]

network = NeuralNetwork(layers)

print(network.feed_forward([10, 10]))
Пример #10
0
                    str(time.time()),
                    type=str,
                    help='Desired Filename')
parser.add_argument('-r',
                    '--learningrate',
                    default=0.007,
                    type=float,
                    help='Learning Rate')
args = parser.parse_args()

filename = args.filename
learningRate = args.learningrate

layers = [64, 20, 10]

network = NeuralNetwork(layers, learningRate)
print(network.activationFunctions)
print(network.trainMethod)
print(network.inputSize)
print(network.outputSize)

train_inputs = []
train_outputs = []

with open('test_files/handwritten_digits/optdigits_train.txt', 'r') as f:
    for line in f:
        array = list(map(int, line.split(",")))
        value = array[-1]
        output = np.zeros(10)
        assert value < 10
        output[value] = 1.0
Пример #11
0
def run_network(properties, train_data, test_data, iteration):
    """
    Creates and runs a neural network using the data in properties,
    creates a confusion matrix and returns a dictionary with the metrics
    of the neural network and the las pair of predicted and expected
    classes
    """
    neural_network = NeuralNetwork(properties, Sigmoid())

    precision = []
    recall = []
    f1 = []
    cost = []
    epoch = []

    expected, predictions = predict(neural_network, test_data)

    precision.append(precision_score(
        expected, predictions, average='weighted'))
    recall.append(recall_score(expected, predictions, average='weighted'))
    f1.append(f1_score(expected, predictions, average='weighted'))
    cost.append(calculate_cost(expected, predictions))
    epoch.append(0)

    for i in range(1, properties["epoch"]+1):
        if i % properties["sampling_rate"] == 0:
            print("Epoch {:4}:".format(i))
            expected, predictions = predict(neural_network, test_data)
            epoch_precision = precision_score(
                expected, predictions, average='weighted')
            epoch_recall = recall_score(
                expected, predictions, average='weighted')
            epoch_f1 = f1_score(expected, predictions, average='weighted')
            epoch_cost = calculate_cost(expected, predictions)

            print("{:>14} {:.4}".format('Precision:', epoch_precision))
            print("{:>14} {:.4}".format('Recall:', epoch_recall))
            print("{:>14} {:.4}".format('F1:', epoch_cost))
            print("{:>14} {:.4}".format('Cost:', epoch_cost))

            precision.append(epoch_precision)
            recall.append(epoch_recall)
            f1.append(epoch_f1)
            cost.append(epoch_cost)
            epoch.append(i)

        train(neural_network, train_data)
    print("Finished")
    print("Calculating confusion matrix")
    classes = np.array(["setosa", "versicolor", "virginica"])

    title = "Iteration {}".format(iteration)
   
    plot_iteraton(
        title,
        (precision, recall, f1, cost, epoch),
        (expected, predictions, classes))

    metrics = {
        "iteration:": iteration,
        "precision": precision,
        "recall": recall,
        "f1": f1,
        "cost": cost,
        "last_expected": expected,
        "last_predicted": predictions
    }

    return metrics
Пример #12
0
def main(args):
	np.set_printoptions(precision=precision)

	network_path = args.files[0]
	initial_weights_path = args.files[1]
	dataset_path = args.files[2]

	r, n_inputs, n_neurons, n_outputs = load_network(network_path)
	initial_weights = load_weights(initial_weights_path)
	x, y = load_benchmark(dataset_path)
	epsilon = 0.0000010000
	n = x.shape[0]

	model = NeuralNetwork(deepcopy(initial_weights), r, 0.99, 0)

	print("Parâmetro de regularização lambda={}\n".format(round(r, 3)))
	print("Inicializando rede com a seguinte estrutura de neurônios por camadas: {}\n".format([n_inputs] + n_neurons + [n_outputs]))

	for i in range(len(initial_weights)):
		print("Theta{} inicial (pesos de cada neurônio, incluindo bias, armazenados nas linhas):\n{}".format(i + 1, str_matrix(initial_weights[i], '\t')))

	print("Conjunto de treinamento")
	for i in range(x.shape[0]):
		print("\tExemplo {}".format(i + 1))
		print("\t\tx: {}".format(x[i, :]))
		print("\t\ty: {}".format(y[i, :]))

	print("\n--------------------------------------------")
	print("Calculando erro/custo J da rede")

	for i in range(x.shape[0]):
		print("\tProcessando exemplo de treinamento {}".format(i + 1))
		print("\tPropagando entrada {}".format(x[i, :]))

		f = model.forward_propagation(x[i, :])
		cost = model.cost_x(y[i, :], f)

		print("\t\ta1: {}\n".format(model.a[0]))

		for l in range(1, model.n_layers + 1):
			print("\t\tz{}: {}".format(l + 1, model.z[l]))
			print("\t\ta{}: {}\n".format(l + 1, model.a[l]))

		print("\t\tf(x[{}]): {}".format(i + 1, f))

		print("\tSaida predita para o exemplo {}: {}".format(i + 1, f))
		print("\tSaida esperada para o exemplo {}: {}".format(i + 1, y[i, :]))
		print("\tJ do exemplo {}: {}\n".format(i + 1, cost))

	print("J total do dataset (com regularizacao): {}\n".format(model.cost(x, y)))

	print("\n--------------------------------------------")
	print("Rodando backpropagation")

	for i in range(n):
		print("\tCalculando gradientes com base no exemplo {}".format(i + 1))

		model.g = [np.zeros(model.w[i].shape) for i in range(model.n_layers)]
		model.m = [np.zeros(model.w[i].shape) for i in range(model.n_layers)]

		pred = model.forward_propagation(x[i, :])
		model.d[model.last_layer] = pred - y[i, :]
		model.update_deltas(x[i, :])

		for d in range(model.last_layer, -1, -1):
			print("\t\tdelta{}: {}".format(d + 2, model.d[d]))

		model.accumulate_gradients()

		for t in range(model.last_layer, -1, -1):
			print("\t\tGradientes de Theta{} com base no exemplo {}:\n{}".format(t + 1, i + 1, str_matrix(model.g[t], '\t\t\t')))

	print("\tDataset completo processado. Calculando gradientes regularizados")

	model.final_gradients(n)

	for t in range(model.n_layers):
		print("\t\tGradientes finais para Theta{} (com regularizacao):\n{}".format(t + 1, str_matrix(model.g[t], '\t\t\t')))

	print("\n--------------------------------------------")
	print("Rodando verificacao numerica de gradientes (epsilon={})".format(epsilon))

	backprop_gradients = deepcopy(model.g)
	model.g = [np.zeros(model.w[i].shape) for i in range(model.n_layers)]

	for t in range(model.n_layers):

		for i in range(model.g[t].shape[0]):
			for j in range(model.g[t].shape[1]):
				w = model.w[t][i, j]

				model.w[t][i, j] = w + epsilon
				c1 = model.cost(x, y)

				model.w[t][i, j] = w - epsilon
				c2 = model.cost(x, y)

				model.g[t][i, j] += (c1 - c2) / (2 * epsilon)
				model.w[t][i, j] = w

		print("\tGradiente numerico de Theta{}:\n{}".format(t + 1, str_matrix(model.g[t], '\t\t')))

	print("\n--------------------------------------------")
	print("Verificando corretude dos gradientes com base nos gradientes numericos:")
	for t in range(model.n_layers):
		errors = np.sum(np.abs(model.g[t] - backprop_gradients[t]))
		print("\tErro entre gradiente via backprop e gradiente numerico para Theta{}: {}".format(t + 1, errors))