def main():
    topology = []
    topology.append(2)
    topology.append(3)
    topology.append(3)
    topology.append(1)
    net = Network(topology)
    neuron.eta = 0.09
    neuron.alpha = 0.015
    inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
    outputs = [[0], [1], [1], [0]]
    while True:
        err = 0
        for i in range(len(inputs)):
            net.setInput(inputs[i])
            net.feedForward()
            net.backPropagate(outputs[i])
            err = err + net.getError(outputs[i])
        print("Error: ", err)
        if err < 1.5:
            break
    while True:
        a = int(input("First input: "))
        b = int(input("Second input: "))
        net.setInput([a, b])
        net.feedForward()
        print(net.getThResults())
示例#2
0
    def buildNetwork(self):
        inputs = []
        for i in range(len(self.menus) - 3):
            if self.menus[i].get() == '':
                self.errorLabel.grid(row=len(self.labels) + 3,
                                     column=1,
                                     pady=5,
                                     sticky='w')
                return
            inputs.append(int(self.menus[i].get()))

        # HyperParameters

        try:
            learnRate = float(self.menus[-1].get())
        except:
            self.errorLabel.grid(row=len(self.labels) + 3,
                                 column=1,
                                 pady=5,
                                 sticky='w')
            self.menus[-1].set('')
        activationFunc = self.menus[-3].get()
        errorFunc = self.menus[-2].get()

        self.errorLabel.grid_remove()
        Globals.NN = Network.Network(inputs, learnRate, activationFunc,
                                     errorFunc)

        Node.drawNodes(self.canvas)
def main():
    result_list = [[None], [None]]
    topology = []
    topology.append(2)
    topology.append(3)
    topology.append(3)
    topology.append(1)
    net1 = Network(topology)
    net2 = Network(topology)
    neuron.eta = 0.09
    neuron.alpha = 0.015
    inputs1 = [[0, 0], [0, 1]]
    outputs1 = [[0], [1]]
    inputs2 = [[1, 0], [1, 1]]
    outputs2 = [[1], [0]]
    threads = []
    nets = [net1, net2]
    outputs = [outputs1, outputs2]
    inputs = [inputs1, inputs2]
    for i in range(2):
        t = threading.Thread(target=TrainNet,
                             args=(inputs[i], [nets[i]], outputs[i], i,
                                   result_list))
        threads.append(t)
        t.start()
    for i in range(2):
        threads[i].join()
    net1 = result_list[0]
    net2 = result_list[1]
    while True:
        a = int(input("First input: "))
        b = int(input("Second input: "))
        if (a > 0):
            net2.setInput([a, b])
            net2.feedForward()
            print(net1.getThResults())
        else:
            net1.setInput([a, b])
            net1.feedForward()
            print(net1.getThResults())
 def test_XOR(self):
     """
     Tests the xor gate on an Neural Network
     :return:
     """
     inputs = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]]
     answers = [[0], [1], [0], [1]]
     XOR = Network(3, 2, 3, 1)
     XOR.training(300000, 0.5, inputs, answers)
     self.assertEqual(XOR.forward_propagation(inputs[0])[0] < 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[1])[0] > 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[2])[0] > 0.5, True)
     self.assertEqual(XOR.forward_propagation(inputs[3])[0] < 0.5, True)
 def test_nand(self):
     """
     Tests the nand gate on an Neural Network
     :return:
     """
     inputs = [[0, 0], [0, 1], [1, 1], [1, 0]]
     answers = [[1], [0], [0], [0]]
     NAND = Network(1, 1, 2, 1)
     NAND.training(50000, 0.1, inputs, answers)
     self.assertEqual(NAND.forward_propagation(inputs[0])[0] > 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[1])[0] < 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[2])[0] < 0.5, True)
     self.assertEqual(NAND.forward_propagation(inputs[3])[0] < 0.5, True)
示例#6
0
 def __init__(self, graphWin, player_id, popName=False, playerSize=50):
     playerStartingPoint = [30, 540]
     self.player_id = player_id
     Rectangle.__init__(
         self, Point(playerStartingPoint[0], playerStartingPoint[1]),
         Point(playerStartingPoint[0] + playerSize,
               playerStartingPoint[1] + playerSize))
     self.graphWin = graphWin
     self.speed = 0
     self.status = 'nothing'
     self.alive = True
     self.setFill(
         color_rgb(random.randint(0, 255), random.randint(0, 255),
                   random.randint(0, 255)))
     self.setOutline(color_rgb(0, 0, 0))
     if self.graphWin.renderization:
         self.draw(graphWin)
     if self.graphWin.gameMode == 'aitrain':
         self.brain = Network(popName + str(player_id))
示例#7
0
def main():

    parse_args()
    seed = torch.seed() % 20
    log_dir = cfg.TENSORBOARD_DIR + cfg.MODEL.NAME + "/" + str(seed)
    logger = Logger(log_dir)
    model_dir = cfg.MODEL.DIR + cfg.MODEL.NAME + "/" + str(seed)
    if os.path.isdir(model_dir) != True:
        os.makedirs(model_dir)

    model = Network(cfg)
    dump_input = torch.rand((1, cfg.DATASET.NUM_JOINTS, cfg.DEFAULT_FRAMES))
    logger.add_graph(model, (dump_input, ))  # Log Model Architecture

    #Toggle to get model summary
    summary(model, dump_input.shape[1:], batch_size=32, device="cpu")
    trainer = ResTCN_trainer(model)

    optimizer = trainer.optimizer
    print("------STARTING TRAINING-------")

    for epoch in range(cfg.EPOCHS):

        training_log = trainer.train()
        print("-" * 50)
        print("Epoch: {} & Loss: {}".format(epoch, training_log["Loss"]))
        print("-" * 50)
        logger.log_scalars(training_log, logger.step)
        logger.step += 1

        if epoch % cfg.SAVE_FREQUENCY == 0:
            perf_indicator = trainer.cal_accuracy('train')
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': cfg.MODEL.NAME + str(seed),
                    'state_dict': model.state_dict(),
                    'perf': perf_indicator,
                    'optimizer': optimizer.state_dict(),
                },
                output_dir=model_dir)
示例#8
0
from NeuralNetwork import Network
zero = [0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0]

one = [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0]

two = [0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]

three = [1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1]

exampleZero = [zero, [0, 0]]
exampleOne = [one, [0, 1]]
exampleTwo = [two, [1, 0]]
exampleThree = [three, [1, 1]]
examples = [exampleZero, exampleOne, exampleTwo, exampleThree]

network = Network()
network.addLayer(10, 20)
network.addLayer(2, None)
network.train(examples)

print network.process(zero)
print network.process(one)
print network.process(two)
print network.process(three)
示例#9
0
from NeuralNetwork import Network

nn = Network('lastmodel')

nn.train(10000)
示例#10
0
def neural_network_factory(filename: str) -> Tuple:
    file = open(filename)
    config = json.load(file)
    """ Dataset """
    dataset_name: str = config["dataset"]["name"]
    case_fraction: float = config["dataset"]["case_fraction"]
    validation_fraction: float = config["dataset"]["validation_fraction"]
    test_fraction: float = config["dataset"]["test_fraction"]
    cases = dataset_factory(dataset_name)
    """ Arcitechture """
    input_size: int = config["arcitechture"]["input_size"]
    layer_specification: List = config["arcitechture"]["layer_specification"]
    weight_range = (config["arcitechture"]["weight_range"]["from"],
                    config["arcitechture"]["weight_range"]["to"])
    bias_range = (config["arcitechture"]["bias_range"]["from"],
                  config["arcitechture"]["bias_range"]["to"])
    activation_functions = list(
        map(lambda x: activation_factory(x),
            config["arcitechture"]["activation_functions"]))
    """ Training """
    optimizer: Callable = optimizer_factory(config["training"]["optimizer"])
    epochs = config["training"]["epochs"]
    minibatch_size = config["training"]["minibatch_size"]
    loss_function = loss_factory(config["training"]["loss_function"])
    learning_rate = config["training"]["learning_rate"]
    test_frequency = config["training"]["test_frequency"]
    """ Visualization """
    visualization_on = config["visualization"]["on"]
    display_weights = config["visualization"]["display_weights"]
    display_biases = config["visualization"]["display_biases"]
    display_layers = config["visualization"]["display_layers"]
    map_size = config["visualization"]["map_batch_size"]
    dendrogram_layers = config["visualization"]["dendrogram_layers"]

    # Create network
    network = Network(input_size=input_size,
                      dimensions=layer_specification,
                      activations=activation_functions,
                      loss_function=loss_function,
                      optimizer=optimizer,
                      learning_rate=learning_rate,
                      minibatch_size=minibatch_size,
                      epochs=epochs,
                      weight_range=weight_range,
                      bias_range=bias_range,
                      test_frequency=test_frequency,
                      display_weights=display_weights,
                      display_layers=display_layers,
                      display_biases=display_biases,
                      visualization_on=visualization_on,
                      dendrogram_layers=dendrogram_layers,
                      map_size=map_size)

    # Create casemanager
    cases = CaseManager(
        cases=cases,
        validation_fraction=validation_fraction,
        test_fraction=test_fraction,
        case_fraction=case_fraction,
    )
    return network, cases
示例#11
0
THIRD_LAYER_INPUT = 10
FOURTH_LAYER_INPUT = 15
LABELS_NUM = 2
NUM_OF_SAMPLES = 100
x = np.random.random((NUM_OF_SAMPLES, FIRST_LAYER_INPUT)).T
#x = np.array([[1, 0], [2, 1], [4, 1], [5, 2], [1, 4], [2, 3], [0.9, 1],
#              [2, 5], [1, 5], [6, 3]]).T
#NUM_OF_SAMPLES = 100
y_arr = []
for i in range(NUM_OF_SAMPLES):
    if i % 2 == 0:
        y_arr.append(np.array([1, 0]))
    else:
        y_arr.append(np.array([0, 1]))
y = np.array(y_arr).T
n = Network()
n.add_layer(Layer(FIRST_LAYER_INPUT, SECOND_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(SECOND_LAYER_INPUT, THIRD_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(THIRD_LAYER_INPUT, FOURTH_LAYER_INPUT, TANH_ACTIVATION))
n.add_layer(Layer(FOURTH_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True))
curr_batch = np.random.permutation(range(NUM_OF_SAMPLES))
batch_x = np.array([x.T[ind] for ind in curr_batch]).T
batch_y = np.array([y.T[ind] for ind in curr_batch]).T
l = n.get_layer(0)

# Softmax test functions

l_sm = Layer(FIRST_LAYER_INPUT, LABELS_NUM, None, softmax_layer=True)

W_EXAMPLE = np.array([[2, 0], [0, 1]]).T
B_EXAMPLE = (np.array([1, 1]))
f.close()

print("Loading test label data")
f = open("../test/t10k-labels.idx1-ubyte", "rb")
Y_test, nlabels_test = loadLabelFile(f)
f.close()

X = np.array(X)
y = np.array(y)

nn = Network(layers = [ FullConnectedLayer(
							nNodes = 784,
							keep_prob = 0.5),
						SoftmaxLayer(10)],
			mini_batch_size = 1024,
			num_iterations = 50,
			learning_rate = 0.01,
			momentum_rate = 0.9,
			rmsprop_rate = 0.999,
			l2_regularization = 0.7,
			verbose = False)

nn.fit(X, y)

X_test = np.array(X_test)
Y_test = np.array(Y_test)

test_error = nn.validate(X_test, Y_test) * 1 / nImages_test
print("Test error : ", 1 - test_error)
print("Test acc.  : %.02f"%(test_error * 100))
print("--------------------------------")
示例#13
0
    data_part = 5  # only one fifth of the whole dataset to speed up training

    for i in range(len(labels_full) // batch_size // data_part):
        images.append(images_full[i * batch_size:(i + 1) * batch_size])
        labels.append(labels_full[i * batch_size:(i + 1) * batch_size])

    y = []

    for batch in labels:
        y.append([])
        for label in batch:
            y[-1].append([1.0 if i == label else 0.0 for i in range(10)])

    y = np.array(y)

    network = Network(SIZES)

    network.epoch(images, y, batch_training_size)  # from dynamic parameters
    '''
    for i, el in enumerate(l3):
        print(labels[0][i], "=", np.argmax(el), " predictions: ", el)
    '''

    testing_images, testing_labels = mndata.load_testing()
    correct = 0.0
    for i, (image, label) in enumerate(zip(testing_images, testing_labels)):
        prediction = network.run(image)
        if label == prediction:
            correct += 1.0
        correct_rate = correct / (i + 1.0)
        print("{} = {} (correct {}%)".format(label, prediction,
示例#14
0
NUM_TRAIN_BATCHES = 1500

LEN_TEST_TEXT = 2000  # Number of test characters of text to generate after training the network
ckpt_filename = 'model'
midi_filename = 'midiOut.txt'

## Initialize the network
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)

net = Network(in_size=in_size,
              lstm_size=lstm_size,
              num_layers=num_layers,
              out_size=out_size,
              session=sess,
              learning_rate=0.003,
              name="char_rnn_network")

sess.run(tf.global_variables_initializer())

saver = tf.train.Saver(tf.global_variables())

## 1) TRAIN THE NETWORK
if ckpt_file == "":
    last_time = time.time()

    batch = np.zeros((batch_size, time_steps, in_size))
    batch_y = np.zeros((batch_size, time_steps, in_size))
示例#15
0
import numpy as np
import random
import math
from NeuralNetwork import Network

n_inputs, hidden_layers_length, n_neurons, n_outputs = 2, 5, 4, 2
network = Network(n_inputs, hidden_layers_length, n_neurons, n_outputs)


print("\ninputlayer: \n" + str(network.inputLayer.weights))
for i in range(hidden_layers_length):
    print("\nhiddenLayers" + str(i+1) +":\n" + str(network.hiddenLayers[i].weights))
print("\noutputLayer: \n" + str(network.outputLayer.weights))