def test_crossover(): """ Verifies that the population crossover function works """ parent_a = net.Network( 1, 2, [11, 9, 7, 5, 3], ['relu', 'relu', 'relu', 'softmax', 'softmax'], ) parent_b = net.Network( 1, 2, [10, 8, 6], ['relu', 'softmax', 'softmax'], ) print("---------------------------------------") print("---------------------------------------") # for i in parent_a.model.get_weights(): # print(i) # print("--------------") # print(parent_a.model.layers) crosser = pop.Population(0, 1, 2) child_a, child_b = crosser.cross(parent_a, parent_b) print("Child A:\n", child_a) print("---------------------------------------") print("Child B:\n", child_b)
def create_network1(): net = network.Network(is_cuda=cuda.is_available()) leaves = net.AddBinaryNodes(2) sum1 = net.AddSumNodes(3) weights1 = np.array([[2, 8, 0, 0], [1, 9, 0, 0], [0, 0, 4, 6]], dtype='float32').T edges1 = net.AddSumEdges(leaves, sum1, weights=weights1, parameters=parameters) prod1 = net.AddProductNodes(2) mask1 = np.array([[1, 0], [0, 1], [1, 1]]) edges2 = net.AddProductEdges(sum1, prod1, mask=mask1) sum_final = net.AddSumNodes(1) weights_final = np.array([[.3, .7]], dtype='float32').T edges_final = net.AddSumEdges(prod1, sum_final, weights=weights_final, parameters=parameters) return (leaves, net)
def test_network_creation(self): """ Verifies that Network constructors are working """ nn = network.Network( 1, 1, # Network dimensions [5, 4, 10, 3, 9], ['softmax', 'relu', 'tanh', 'sigmoid', 'softmax']) print("Network Creation is Online...")
def test_creation(): """ Verifies that Network constructors are working """ nn = net.Network( 1, 1, # Network dimensions [5, 4, 10, 3, 9], ['softmax', 'relu', 'tanh', 'sigmoid', 'softmax']) assert nn != None, "Network was not created successfully" print("Network Creation is Online...")
def test_crossover(self): parent_a = network.Network( 1, 2, [11, 9, 7, 5, 3], ['relu', 'relu', 'relu', 'softmax', 'softmax'], ) parent_b = network.Network( 1, 2, [10, 8, 6], ['relu', 'softmax', 'softmax'], ) print("---------------------------------------") print("---------------------------------------") # for i in parent_a.model.get_weights(): # print(i) # print("--------------") # print(parent_a.model.layers) pop = population.Population(0, 1, 2) child_a, child_b = pop.cross(parent_a, parent_b) print("Child A:\n", child_a) print("---------------------------------------") print("Child B:\n", child_b)
def mnist_model(): """ Simple dense model for MNIST.""" # Note that the layer input/output sizes must match. layers_ = [ layers.ConvolutionalLayer(1, 8), layers.MaxPoolLayer(), layers.ConvolutionalLayer(8, 8), layers.MaxPoolLayer(), layers.FlattenLayer((7, 7, 8)), layers.DenseLayer(32, 7*7*8, activations.Relu), layers.DenseLayer(32, 32, activations.Relu), layers.DenseLayer(10, 32, activations.Linear) ] loss = losses.CrossEntropy() return network.Network(layers_, loss)
from src import mnist_loader from src import network training_data, validation_data, test_data = mnist_loader.load_data_wrapper() net = network.Network([784, 30, 10]) net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
#!/usr/bin/env python3 import numpy as np import os.path import sys from torch import optim sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from src import param from src import network parameters = param.Param() net = network.Network() input1234 = net.AddBinaryNodes(4) # with size: 4 x = np.array([[0, 1, 0, 0], [1, 0, 1, 1]]) # print(input1234.val) prod12 = net.AddProductNodes(3) prod34 = net.AddProductNodes(3) # with size: 3 mask12 = np.array([[1, 1, 0],
from src import mnist_loader, network if __name__ == '__main__': training_data, validation_data, test_data = mnist_loader.load_data_wrapper() net = network.Network([784, 50, 50, 50, 50, 50, 50, 50, 50, 10]) net.SGD(training_data, 300, 1000, 10.0, test_data=test_data)
#!/usr/bin/env python3 import numpy as np import os.path import sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from src import param from src import network parameters = param.Param() network_config = network.Network() list_prob = [ np.array([ [.7, .4], # associated w/ r.v. X1 [.2, .3], [.1, .3] ]), np.array([ [.7, .2], # associated w/ r.v. X2 [.3, .8] ]), np.array([ [.1, .6], # associated w/ r.v. X3 [.9, .4] ]) ]
from __future__ import absolute_import import src.network as network import src.network_batch_backprop as network_batch_b import src.mnist_loader as mnist_loader import src.cnn_network as cnn_network import src.graph_plot as graph # import src.mnist_data x_train, y_train, x_validation, y_validation, x_test, y_test = mnist_loader.load_data_wrapper( ) # # Using Mini Batch for Training the Model print("*" * 100) print("Training Mini Batch BackPropagation") print("*" * 100) net = network.Network([784, 20, 40, 10]) net.SGD(x_train, y_train, 50, 10, 3.0, x_test, y_test) # # #Using Batch Propagation to train the network print("*" * 100) print("Training Batch BackPropagation") print("*" * 100) net = network_batch_b.Network_batch([784, 20, 40, 10]) net.SGD(x_train, y_train, 500, 3.0, x_test, y_test) print("*" * 100) # #Using CNN Encoder with adding some noise to training data print("*" * 100) print("Training CNN") print("*" * 100) # parameters are epoch, batch_size, n_factor(what percentage of noise we need to add in data) network = cnn_network.CNN_network(30, 128, 10) print("Accuracy on Test Data : ", network.save_weights()[1] * 100)
def init_network(self): """init a empy network with set sizes""" self.net = network.Network([784, 30, 10]) return ("made a network yo")
new.save(file_path) def clear(self): self.image.fill(Qt.white) self.update() def guess(self): # Obtener imagen image = self.image.scaled(28, 28) # Scale image n = loader.qimage_to_ndarray(image) drawing, percentage = self.network.evaluate_drawing(n) message = ("I'm %s%% sure it's a %s!" % (int(percentage[0]), loader.image_classifier(int(drawing)))) QMessageBox.about(self, "Result", message) if __name__ == '__main__': # Load Data train, cv, test = loader.load_data(11000) # Create neural network net = network.Network([784, 100, 10]) # training_data, test, cv epochs, batch_size, learning, lambda net.train(train, test, cv, 8, 20, 0.4, 10) app = QApplication(sys.argv) window = Window(net) window.show() app.exec()
import sys from torch import optim, cuda sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from src import param from src import network def Near(a, b): return np.abs(a - b) < 0.0001 parameters = param.Param() net = network.Network(is_cuda=cuda.is_available()) mean = np.array([-.2], dtype='float32') std = np.array([1.3], dtype='float32') leaf1 = net.AddGaussianNodes(mean, std, parameters=parameters) parameters.register(net) x = np.array([[0.7], [-.8]]) x_cond_mask = np.array([[0.], [0.]]) p0 = net.ComputeProbability(val_dict={leaf1: x}, cond_mask_dict={leaf1: x_cond_mask}) print(p0)