def testCreateNetwork(self):
        ##Generating Phenotype
        inputNode = NodeGene(0,1,1,"SENSOR")
        outputNode = NodeGene(1,1,1,"OUTPUT")
        conn = ConnectionGene(1,0,1,True,1)

        connections = [conn]
        nodes = [inputNode,outputNode]


        genes = Genome(connections,nodes)


        network = Network(genes)

        assert(network.fire([1]) == [1])
예제 #2
0
def analysis(layers,
             learningRate,
             batchSize,
             iteration,
             probabilistic,
             training_labels,
             training_images,
             testing_labels,
             testing_images,
             saveName="LastNN.pkl"):
    print("\n" + str(layers) +
          " - Batch {} - Rate {} - Probabilitic {}".format(
              batchSize, learningRate, probabilistic))
    net = Network.NeuralNetwork(layers)

    print("Start backpropagation")
    net.backpropagation(training_images, training_labels, learningRate,
                        batchSize, probabilistic, iteration)

    numberTrain, outputTrain = test(net, training_images, training_labels)
    numberTest, outputTest = test(net, testing_images, testing_labels)
    print("Test")
    print("On the training set : {} / {}".format(numberTrain,
                                                 len(training_labels)))
    print("On the testing set : {} / {}".format(numberTest,
                                                len(testing_labels)))

    save.save(net, saveName)

    return net, outputTrain, outputTest
예제 #3
0
    def buildNetwork(self):
        inputs = []
        for i in range(len(self.menus) - 3):
            if self.menus[i].get() == '':
                self.errorLabel.grid(row=len(self.labels) + 3,
                                     column=1,
                                     pady=5,
                                     sticky='w')
                return
            inputs.append(int(self.menus[i].get()))

        # HyperParameters

        try:
            learnRate = float(self.menus[-1].get())
        except:
            self.errorLabel.grid(row=len(self.labels) + 3,
                                 column=1,
                                 pady=5,
                                 sticky='w')
            self.menus[-1].set('')
        activationFunc = self.menus[-3].get()
        errorFunc = self.menus[-2].get()

        self.errorLabel.grid_remove()
        Globals.NN = Network.Network(inputs, learnRate, activationFunc,
                                     errorFunc)

        Node.drawNodes(self.canvas)
예제 #4
0
def main():

    parse_args()
    seed = torch.seed() % 20
    log_dir = cfg.TENSORBOARD_DIR + cfg.MODEL.NAME + "/" + str(seed)
    logger = Logger(log_dir)
    model_dir = cfg.MODEL.DIR + cfg.MODEL.NAME + "/" + str(seed)
    if os.path.isdir(model_dir) != True:
        os.makedirs(model_dir)

    model = Network(cfg)
    dump_input = torch.rand((1, cfg.DATASET.NUM_JOINTS, cfg.DEFAULT_FRAMES))
    logger.add_graph(model, (dump_input, ))  # Log Model Architecture

    #Toggle to get model summary
    summary(model, dump_input.shape[1:], batch_size=32, device="cpu")
    trainer = ResTCN_trainer(model)

    optimizer = trainer.optimizer
    print("------STARTING TRAINING-------")

    for epoch in range(cfg.EPOCHS):

        training_log = trainer.train()
        print("-" * 50)
        print("Epoch: {} & Loss: {}".format(epoch, training_log["Loss"]))
        print("-" * 50)
        logger.log_scalars(training_log, logger.step)
        logger.step += 1

        if epoch % cfg.SAVE_FREQUENCY == 0:
            perf_indicator = trainer.cal_accuracy('train')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': cfg.MODEL.NAME + str(seed),
                    'state_dict': model.state_dict(),
                    'perf': perf_indicator,
                    'optimizer': optimizer.state_dict(),
                },
                output_dir=model_dir)
def neural_network(dataset_train, dataset_test):
    # Prendere la colonna Target del dataset
    classiOutput = set()
    for esempio in dataset_train:
        classiOutput.add(esempio[len(esempio) - 1])

    # Selezionare i Target senza duplicazione
    listaClassi = []

    for c in classiOutput:
        listaClassi.append(c)

    # Costruzione dell topologie della Rete
    lng = len(dataset_train[0])
    numeroInput = lng - 1
    # Creo l'oggetto Network
    rete = Network(numeroInput, listaClassi, math.tanh)
    # Aggiunta degli strati
    rete.add_hidden_layer(4)
    # Lo strato di Out avrà 3 unità
    rete.add_output_layer()
    rete.train(dataset_train)

    # Classificazione
    giuste = 0
    sbagliate = 0
    lable_learning = list()
    for test in dataset_test:
        output_class = rete.classify(test[:len(test) - 1])  # Passaggio dell'input alla rete
        lable_learning.append(output_class)
        if output_class == test[len(test) - 1]:
            giuste += 1
        else:
            sbagliate += 1

    rete.close_report()
    return lable_learning
 def test_XOR(self):
     """
     Tests the xor gate on an Neural Network
     :return:
     """
     inputs = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]]
     answers = [[0], [1], [0], [1]]
     XOR = Network(3, 2, 3, 1)
     XOR.training(300000, 0.5, inputs, answers)
     self.assertEqual(XOR.forward_propagation(inputs[0])[0] < 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[1])[0] > 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[2])[0] > 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[3])[0] < 0.5, True)
 def test_nand(self):
     """
     Tests the nand gate on an Neural Network
     :return:
     """
     inputs = [[0, 0], [0, 1], [1, 1], [1, 0]]
     answers = [[1], [0], [0], [0]]
     NAND = Network(1, 1, 2, 1)
     NAND.training(50000, 0.1, inputs, answers)
     self.assertEqual(NAND.forward_propagation(inputs[0])[0] > 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[1])[0] < 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[2])[0] < 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[3])[0] < 0.5, True)
예제 #8
0
 def __init__(self, graphWin, player_id, popName=False, playerSize=50):
     playerStartingPoint = [30, 540]
     self.player_id = player_id
     Rectangle.__init__(
         self, Point(playerStartingPoint[0], playerStartingPoint[1]),
         Point(playerStartingPoint[0] + playerSize,
               playerStartingPoint[1] + playerSize))
     self.graphWin = graphWin
     self.speed = 0
     self.status = 'nothing'
     self.alive = True
     self.setFill(
         color_rgb(random.randint(0, 255), random.randint(0, 255),
                   random.randint(0, 255)))
     self.setOutline(color_rgb(0, 0, 0))
     if self.graphWin.renderization:
         self.draw(graphWin)
     if self.graphWin.gameMode == 'aitrain':
         self.brain = Network(popName + str(player_id))
예제 #9
0
    def optimize(network: Network, x, y, epochs=5, batch_size=10, alpha=0.03, validation_split=0.1,
                 metrics=[Accuracy()]):
        error_history = []
        metrics_history = []

        # split the data into train set and validation set
        data = split_data(x, y, validation_split, test_set_name="validation")

        data_valid = {
            "x": data["validation"]["x"],
            "y": data["validation"]["y"]
        }

        num_train_examples = len(data["train"]["y"])

        for epoch in range(epochs):
            i = 0

            # Shuffle train data
            indices = np.arange(num_train_examples)
            np.random.shuffle(indices)
            data_train = {
                "x": data["train"]["x"][indices],
                "y": data["train"]["y"][indices]
            }

            while i < num_train_examples:
                data_batch = {
                    "x": data_train["x"][i: i + batch_size],
                    "y": data_train["y"][i: i + batch_size]
                }
                i += batch_size

                # forward propagation
                tensors = network.forward(data_batch)

                # error
                error_history.append(tensors[-1][0].elements)

                # backward propagation
                deltas = network.backprop(tensors)
                deltas.reverse()

                # parameter update
                for layer, tensorlist in zip(network.layers, deltas):
                    if tensorlist is not None:
                        for tensor in tensorlist:
                            tensor.elements = alpha * tensor.elements
                        layer.update_parameter(tensorlist)

            # Evaluate with validation set
            epoch_metrics = [None] * len(metrics)
            prediction = network.predict(data_valid).elements
            for i in range(len(metrics)):
                epoch_metrics[i] = metrics[i].score(prediction, data_valid["y"])
            metrics_history.append(epoch_metrics)

            # Print progress update
            sys.stdout.write(
                '\r' + "Epoch %i / " % (epoch + 1) + str(epochs) + " Metrics: " + str(epoch_metrics))
            sys.stdout.flush()

        return error_history, metrics_history
예제 #10
0
import mnist_loader
from NeuralNetwork import Network

train_data, validate_data, test_data = mnist_loader.load_data_wrapper()

net = Network(configurationFileName='NetConfig')

print('Done {0} from {1}'.format(net.check(test_data), len(test_data)))
예제 #11
0
from NeuralNetwork import Network

nn = Network('lastmodel')

nn.train(10000)
예제 #12
0
from NeuralNetwork import Network
zero = [0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0]

one = [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0]

two = [0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]

three = [1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1]

exampleZero = [zero, [0, 0]]
exampleOne = [one, [0, 1]]
exampleTwo = [two, [1, 0]]
exampleThree = [three, [1, 1]]
examples = [exampleZero, exampleOne, exampleTwo, exampleThree]

network = Network()
network.addLayer(10, 20)
network.addLayer(2, None)
network.train(examples)

print network.process(zero)
print network.process(one)
print network.process(two)
print network.process(three)
예제 #13
0
NUM_TRAIN_BATCHES = 1500

LEN_TEST_TEXT = 2000  # Number of test characters of text to generate after training the network
ckpt_filename = 'model'
midi_filename = 'midiOut.txt'

## Initialize the network
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)

net = Network(in_size=in_size,
              lstm_size=lstm_size,
              num_layers=num_layers,
              out_size=out_size,
              session=sess,
              learning_rate=0.003,
              name="char_rnn_network")

sess.run(tf.global_variables_initializer())

saver = tf.train.Saver(tf.global_variables())

## 1) TRAIN THE NETWORK
if ckpt_file == "":
    last_time = time.time()

    batch = np.zeros((batch_size, time_steps, in_size))
    batch_y = np.zeros((batch_size, time_steps, in_size))
예제 #14
0
	def addWeakClassifier(self, layers):
		"""
		Adds a neural network in the Adaboost
		"""
		self.weakClassifiers.append(Network.NeuralNetwork(layers))
예제 #15
0
def main():
    topology = []
    topology.append(2)
    topology.append(3)
    topology.append(3)
    topology.append(1)
    net = Network(topology)
    neuron.eta = 0.09
    neuron.alpha = 0.015
    inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
    outputs = [[0], [1], [1], [0]]
    while True:
        err = 0
        for i in range(len(inputs)):
            net.setInput(inputs[i])
            net.feedForward()
            net.backPropagate(outputs[i])
            err = err + net.getError(outputs[i])
        print("Error: ", err)
        if err < 1.5:
            break
    while True:
        a = int(input("First input: "))
        b = int(input("Second input: "))
        net.setInput([a, b])
        net.feedForward()
        print(net.getThResults())
예제 #16
0
def neural_network_factory(filename: str) -> Tuple:
    file = open(filename)
    config = json.load(file)
    """ Dataset """
    dataset_name: str = config["dataset"]["name"]
    case_fraction: float = config["dataset"]["case_fraction"]
    validation_fraction: float = config["dataset"]["validation_fraction"]
    test_fraction: float = config["dataset"]["test_fraction"]
    cases = dataset_factory(dataset_name)
    """ Arcitechture """
    input_size: int = config["arcitechture"]["input_size"]
    layer_specification: List = config["arcitechture"]["layer_specification"]
    weight_range = (config["arcitechture"]["weight_range"]["from"],
                    config["arcitechture"]["weight_range"]["to"])
    bias_range = (config["arcitechture"]["bias_range"]["from"],
                  config["arcitechture"]["bias_range"]["to"])
    activation_functions = list(
        map(lambda x: activation_factory(x),
            config["arcitechture"]["activation_functions"]))
    """ Training """
    optimizer: Callable = optimizer_factory(config["training"]["optimizer"])
    epochs = config["training"]["epochs"]
    minibatch_size = config["training"]["minibatch_size"]
    loss_function = loss_factory(config["training"]["loss_function"])
    learning_rate = config["training"]["learning_rate"]
    test_frequency = config["training"]["test_frequency"]
    """ Visualization """
    visualization_on = config["visualization"]["on"]
    display_weights = config["visualization"]["display_weights"]
    display_biases = config["visualization"]["display_biases"]
    display_layers = config["visualization"]["display_layers"]
    map_size = config["visualization"]["map_batch_size"]
    dendrogram_layers = config["visualization"]["dendrogram_layers"]

    # Create network
    network = Network(input_size=input_size,
                      dimensions=layer_specification,
                      activations=activation_functions,
                      loss_function=loss_function,
                      optimizer=optimizer,
                      learning_rate=learning_rate,
                      minibatch_size=minibatch_size,
                      epochs=epochs,
                      weight_range=weight_range,
                      bias_range=bias_range,
                      test_frequency=test_frequency,
                      display_weights=display_weights,
                      display_layers=display_layers,
                      display_biases=display_biases,
                      visualization_on=visualization_on,
                      dendrogram_layers=dendrogram_layers,
                      map_size=map_size)

    # Create casemanager
    cases = CaseManager(
        cases=cases,
        validation_fraction=validation_fraction,
        test_fraction=test_fraction,
        case_fraction=case_fraction,
    )
    return network, cases
예제 #17
0
f.close()

print("Loading test label data")
f = open("../test/t10k-labels.idx1-ubyte", "rb")
Y_test, nlabels_test = loadLabelFile(f)
f.close()

X = np.array(X)
y = np.array(y)

nn = Network(layers = [ FullConnectedLayer(
							nNodes = 784,
							keep_prob = 0.5),
						SoftmaxLayer(10)],
			mini_batch_size = 1024,
			num_iterations = 50,
			learning_rate = 0.01,
			momentum_rate = 0.9,
			rmsprop_rate = 0.999,
			l2_regularization = 0.7,
			verbose = False)

nn.fit(X, y)

X_test = np.array(X_test)
Y_test = np.array(Y_test)

test_error = nn.validate(X_test, Y_test) * 1 / nImages_test
print("Test error : ", 1 - test_error)
print("Test acc.  : %.02f"%(test_error * 100))
print("--------------------------------")
예제 #18
0
THIRD_LAYER_INPUT = 10
FOURTH_LAYER_INPUT = 15
LABELS_NUM = 2
NUM_OF_SAMPLES = 100
x = np.random.random((NUM_OF_SAMPLES, FIRST_LAYER_INPUT)).T
#x = np.array([[1, 0], [2, 1], [4, 1], [5, 2], [1, 4], [2, 3], [0.9, 1],
#              [2, 5], [1, 5], [6, 3]]).T
#NUM_OF_SAMPLES = 100
y_arr = []
for i in range(NUM_OF_SAMPLES):
    if i % 2 == 0:
        y_arr.append(np.array([1, 0]))
    else:
        y_arr.append(np.array([0, 1]))
y = np.array(y_arr).T
n = Network()
n.add_layer(Layer(FIRST_LAYER_INPUT, SECOND_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(SECOND_LAYER_INPUT, THIRD_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(THIRD_LAYER_INPUT, FOURTH_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(FOURTH_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True))
curr_batch = np.random.permutation(range(NUM_OF_SAMPLES))
batch_x = np.array([x.T[ind] for ind in curr_batch]).T
batch_y = np.array([y.T[ind] for ind in curr_batch]).T
l = n.get_layer(0)

# Softmax test functions

l_sm = Layer(FIRST_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True)

W_EXAMPLE = np.array([[2, 0], [0, 1]]).T
B_EXAMPLE = (np.array([1, 1]))
예제 #19
0
    ]
    plt.scatter(coord_y_pos, coord_x_pos, alpha=0.2)
    plt.scatter(coord_y_neg, coord_x_neg, alpha=0.2)
    plt.savefig('spirals/plot_{}.png'.format(ind))


mat = scipy.io.loadmat('SwissRollData.mat')
labels = mat['Ct']
training = mat['Yt']
labels_validation = mat['Cv']
samples_validation = mat['Yv']
x = training
y = labels
x_validation = samples_validation
y_validation = labels_validation
n = Network()
n.add_layer(Layer(2, 10, TANH_ACTIVATION))
#n.add_layer(Layer(10, 10, TANH_ACTIVATION))
#n.add_layer(Layer(10, 10, TANH_ACTIVATION))
n.add_layer(Layer(10, 10, TANH_ACTIVATION))
n.add_layer(Layer(10, 2, None, softmax_layer=True))
epochs = [20]
batch_sizes = [100]
learning_rates = [0.5]
for epoch, batch_size, learning_rate in product(epochs, batch_sizes,
                                                learning_rates):
    print('epochs {} batch_size {} learning_rate {}'.format(
        epoch, batch_size, learning_rate))
    n, obj = stochastic_gradient_descent(n,
                                         x,
                                         y,
예제 #20
0
import numpy as np
import random
import math
from NeuralNetwork import Network

n_inputs, hidden_layers_length, n_neurons, n_outputs = 2, 5, 4, 2
network = Network(n_inputs, hidden_layers_length, n_neurons, n_outputs)


print("\ninputlayer: \n" + str(network.inputLayer.weights))
for i in range(hidden_layers_length):
    print("\nhiddenLayers" + str(i+1) +":\n" + str(network.hiddenLayers[i].weights))
print("\noutputLayer: \n" + str(network.outputLayer.weights))
def main():
    result_list = [[None], [None]]
    topology = []
    topology.append(2)
    topology.append(3)
    topology.append(3)
    topology.append(1)
    net1 = Network(topology)
    net2 = Network(topology)
    neuron.eta = 0.09
    neuron.alpha = 0.015
    inputs1 = [[0, 0], [0, 1]]
    outputs1 = [[0], [1]]
    inputs2 = [[1, 0], [1, 1]]
    outputs2 = [[1], [0]]
    threads = []
    nets = [net1, net2]
    outputs = [outputs1, outputs2]
    inputs = [inputs1, inputs2]
    for i in range(2):
        t = threading.Thread(target=TrainNet,
                             args=(inputs[i], [nets[i]], outputs[i], i,
                                   result_list))
        threads.append(t)
        t.start()
    for i in range(2):
        threads[i].join()
    net1 = result_list[0]
    net2 = result_list[1]
    while True:
        a = int(input("First input: "))
        b = int(input("Second input: "))
        if (a > 0):
            net2.setInput([a, b])
            net2.feedForward()
            print(net1.getThResults())
        else:
            net1.setInput([a, b])
            net1.feedForward()
            print(net1.getThResults())
예제 #22
0
import mnist_loader
from NeuralNetwork import Network

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = Network([784, 30, 10])
net.train(training_data, 30, 10, 3.0, test_data=test_data)
net.save_configuration()

print("Done")
예제 #23
0
    data_part = 5  # only one fifth of the whole dataset to speed up training

    for i in range(len(labels_full) // batch_size // data_part):
        images.append(images_full[i * batch_size:(i + 1) * batch_size])
        labels.append(labels_full[i * batch_size:(i + 1) * batch_size])

    y = []

    for batch in labels:
        y.append([])
        for label in batch:
            y[-1].append([1.0 if i == label else 0.0 for i in range(10)])

    y = np.array(y)

    network = Network(SIZES)

    network.epoch(images, y, batch_training_size)  # from dynamic parameters
    '''
    for i, el in enumerate(l3):
        print(labels[0][i], "=", np.argmax(el), " predictions: ", el)
    '''

    testing_images, testing_labels = mndata.load_testing()
    correct = 0.0
    for i, (image, label) in enumerate(zip(testing_images, testing_labels)):
        prediction = network.run(image)
        if label == prediction:
            correct += 1.0
        correct_rate = correct / (i + 1.0)
        print("{} = {} (correct {}%)".format(label, prediction,