Example #1
0
def testIdentity(trainDataFile):
    trainData = np.genfromtxt(trainDataFile)

    numInput = 8
    numHidden = 3
    numOutput = 8
    seed = 3
    learningRate = 0.3
    maxEpochs = 5000
    momentum = 0.0

    print("Generating %d-%d-%d neural network " % (numInput, numHidden, numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum, showHidden=True)
    print("Training complete")

    accTrain = nn.accuracy(trainData)

    print("\nAccuracy on train data = %0.4f " % accTrain)

    numHidden = 4
    print("\nGenerating %d-%d-%d neural network " % (numInput, numHidden, numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum, showHidden=True)
    print("Training complete")

    accTrain = nn.accuracy(trainData)

    print("\nAccuracy on train data = %0.4f " % accTrain)
Example #2
0
 def reproduction(self):
     # combine neural networks of fitest snakes from previous generation
     self.new_population = []
     for network in self.results:
         partner = random.choice(self.results)
         # create weights1 array for new snake
         new_weights1 = np.zeros(
             (network['weights1'].shape[0], network['weights1'].shape[1]))
         for row in range(new_weights1.shape[0]):
             for col in range(new_weights1.shape[1]):
                 gene1 = network['weights1'][row, col]
                 gene2 = partner['weights1'][row, col]
                 new_weights1[row, col] = random.choice([gene1, gene2])
         # create weights2 array for new snake
         new_weights2 = np.zeros(
             (network['weights2'].shape[0], network['weights2'].shape[1]))
         for row in range(new_weights2.shape[0]):
             for col in range(new_weights2.shape[1]):
                 gene1 = network['weights2'][row, col]
                 gene2 = partner['weights2'][row, col]
                 new_weights2[row, col] = random.choice([gene1, gene2])
         # create new neural network for new population
         self.new_population.append(
             NeuralNetwork(self.inputs, self.hidden_nodes, self.outputs))
         self.new_population[-1].weights1 = new_weights1
         self.new_population[-1].weights2 = new_weights2
     self.population = self.new_population
Example #3
0
def main():
    x = np.array([[x] for x in np.arange(2, 30)])
    y = func(x)
    noised_y = add_noise(y)
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        noised_y,
                                                        test_size=0.3,
                                                        random_state=0)
    network = NeuralNetwork(input_size=x_train.shape[1],
                            output_size=y_train.shape[1],
                            hidden_size=2)

    start_time = time.time()
    train_errors, test_errors, rates = network.fit(x_train=x_train,
                                                   y_train=y_train,
                                                   x_test=x_test,
                                                   y_test=y_test,
                                                   rate=0.5,
                                                   bonus=0.005,
                                                   iterations=10000)
    print("--- %s seconds ---\n" % (time.time() - start_time))

    x_network = x
    y_network = [network.predict(xi) for xi in x]
    print("Train:")
    view = View()
    view.current_error(network, x_train, y_train)
    print("\nTest:")
    view.current_error(network, x_test, y_test)
    view.error_sum_per_epoch(train_errors, test_errors)
    view.error_sum_per_epoch(train_errors, test_errors)
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network)
    view.rates(rates)
    view.show()
Example #4
0
def testTennisOrIris(trainDataFile, testDataFile, attrDataFile):
    data = Preprocessor(trainDataFile, testDataFile, attrDataFile)
    data.loadData()
    trainData = data.getMatrix(data.getTrainData())
    testData = data.getMatrix(data.getTestData())
 
    numInput = data.getNumInput()
    numOutput = len(data.getClasses())
    numHidden = 3
    seed = 4 
    learningRate = 0.1
    maxEpochs = 5000
    momentum = 0.0

    print("Generating neural network: %d-%d-%d" % (numInput, numHidden,numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum)
    print("Training complete")

 #   accTrain = nn.accuracy(trainData)
    accTest = nn.accuracy(testData)

 #   print("\nAccuracy on train data = %0.4f " % accTrain)
   
    print("Accuracy on test data   = %0.4f " % accTest)
def get_trained_stock_neural_network(stock_prices_training_set,
                                     hidden_neuron_count=30,
                                     training_func=None,
                                     threshold=.001,
                                     max_iterations=1000,
                                     activation='HyperbolicTangent',
                                     normalization='Statistical',
                                     learning_rate=0.2,
                                     momentum=0.2):
    number_of_inputs, number_of_outputs = len(
        stock_prices_training_set[0][0]), len(stock_prices_training_set[0][1])

    layers = (
        InputLayer(number_of_inputs,
                   number_of_inputs_per_neuron=1,
                   activation_function='Identity'),
        HiddenLayer(hidden_neuron_count,
                    number_of_inputs_per_neuron=number_of_inputs,
                    activation_function=activation,
                    learning_rate=learning_rate,
                    momentum=momentum),
        OutputLayer(  # The activation function is giving the dot product, so do nothing ...
            number_of_outputs,
            number_of_inputs_per_neuron=hidden_neuron_count,
            activation_function='Identity'))

    return (training_func
            or train)(NeuralNetwork(layers,
                                    allowed_error_threshold=threshold,
                                    max_number_of_iterations=max_iterations,
                                    normalization_class='Statistical'),
                      stock_prices_training_set)
Example #6
0
def main():
    network = NeuralNetwork(2, 5, 1)
    input = []
    label = []
    size = 100
    for i in range(size):
        x = random.randint(0, 800)
        y = random.randint(0, 800)
        labelVal = 1 if x >= y else 0
        input.append([x, y])
        label.append([labelVal])
        #print(input)

    for i in range(1000):
        for j in range(size):
            network.train(input[j], label[j])

    # # for i in range(len(network.input_layer.neurons)):
    # #     print(network.input_layer.neurons[i].weight)
    # # for i in range(len(network.hidden_layer.neurons)):
    # #     print(network.hidden_layer.neurons[i].weight)
    # print("--------------------")
    print(network.test([800, 0]))
    print(network.test([0, 800]))
    print(network.test([800, 0]))
    print(network.test([600, 400]))

    print('-----------------')
    for i in range(len(network.input_layer.neurons)):
        print(network.input_layer.neurons[i].weight)
    print('-----------------')
    for i in range(len(network.hidden_layer.neurons)):
        print(network.hidden_layer.neurons[i].weight)
Example #7
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()
    network = NeuralNetwork([784, 16, 16, 10])

    layers = create_layers(network)
    win, wth = initialize_screen(network.sizes)
    draw_network(win, layers)  # draw initial network with random weights

    epochs = 30
    batch_size = 10
    learning_rate = 3.0

    txt = Text(Point(wth / 2, 830), "Initial Weights")
    txt.draw(win)
    # main training loop
    for i in range(epochs):  # for each iteration of training
        biases, weights = network.train_iteration(training_data, batch_size, \
        learning_rate, i, test_data=test_data)
        txt.setText("Iteration: {0}".format(i))
        for j in range(1, len(layers)):
            layers[j].update_layer(weights[j - 1], biases[j - 1])
        draw_network(win, layers)

    win.getMouse()
    win.close()
Example #8
0
def donut_norm_test():
        from network import act, NeuralNetwork, NetData

        net = NeuralNetwork([2,10,1], act.sigmoid, *NetData.donut_norm_set())
        net.train(learning_rate=0.1, repeat=50000, show_error=True)
        net.show_final_plot()
        net.save("net/donut_norm.txt")
Example #9
0
def train(input_x, t, epsilon, gamma, weights, epochs):
    local_weights = weights
    new_weights = local_weights.copy()
    for f in range(0, epochs):
        for k in range(len(input_x)):
            for i in range(0, len(local_weights)):
                l = local_weights.copy()
                l[i] = local_weights[i] + epsilon
                y1 = NeuralNetwork(l).feedforward(input_x[k])
                l[i] = local_weights[i] - epsilon
                y2 = NeuralNetwork(l).feedforward(input_x[k])
                new_weights[i] = local_weights[i] - gamma * (
                    error_loss(y1, t[k]) - error_loss(y2, t[k])) / 2 * epsilon

            local_weights = new_weights.copy()
    return local_weights
Example #10
0
def testIrisNoisy(trainDataFile, testDataFile, attrDataFile):
    data = Preprocessor(trainDataFile, testDataFile, attrDataFile)
    data.loadData()
    testData = data.getMatrix(data.getTestData()) 
    numInput = data.getNumInput() 
    numOutput = len(data.getClasses())
    numHidden = 3
    seed = 4 
    learningRate = 0.1
    maxEpochs = 5000
    momentum = 0.0
 
    for rate in range(0, 21, 2):
        noisyData = addNoise(data.getTrainData(), rate, data.getClasses())
        trainData = data.getMatrix(noisyData) 
        print("\nNoise Rate (%): " + str(rate)) 
        print("Generating neural network: %d-%d-%d" % (numInput, numHidden,numOutput)) 
        nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
        nn.train(trainData, maxEpochs, learningRate, momentum, showEpochs=False, vRatio=0.85)
        print("Training complete")

        accTrain = nn.accuracy(trainData)
        accTest = nn.accuracy(testData)

        accValidTrain = nn.accuracy(trainData, validationOn=True)
        accValidTest = nn.accuracy(testData, validationOn=True)
        print("w/o validation set:")
        print("Accuracy on train data = %0.4f " % accTrain)
        print("Accuracy on test data   = %0.4f " % accTest)
    
        print("w/ validation set:")
        print("Accuracy on train data = %0.4f " % accValidTrain)
        print("Accuracy on test data   = %0.4f " % accValidTest)
Example #11
0
def test():
    a = NeuralNetwork()
    a.add_node()
    a.add_node()
    a.calculate_move()
    a.calculate_move()
    a.add_node()
    a.calculate_move()
Example #12
0
 def test_init_shape(self):
     network = NeuralNetwork((3, 4, 5))
     self.assertEqual(len(network.weights), 2)
     self.assertEqual(len(network.biases), 2)
     self.assertEqual(network.weights[0].shape, (4, 3))
     self.assertEqual(network.weights[1].shape, (5, 4))
     self.assertEqual(network.biases[0].shape, (4, ))
     self.assertEqual(network.biases[1].shape, (5, ))
Example #13
0
def network_3(input_num, output_num, lr):
    layers = [FullyConnectedLayer(input_num, 14), ReLU()]
    for i in range(27):
        layers.append(FullyConnectedLayer(14, 14))
        layers.append(ReLU())
    layers.append(FullyConnectedLayer(14, output_num))
    layers.append(SoftmaxLayerWithCrossEntropyLoss())
    return NeuralNetwork(name="14-14X28-4", lr=lr, layers=layers)
Example #14
0
def xor_test():
        from network import act, NeuralNetwork, NetData

        net = NeuralNetwork([2,2,1], act.sigmoid, *NetData.xor_set)
        net.train(repeat=30000, show_error=True)
        net.test(show_w_plot=True)
        net.show_final_plot()
        net.save("net/xor.txt")
Example #15
0
def network_2(input_num, output_num, lr):
    layers = [FullyConnectedLayer(input_num, 28), ReLU()]
    for i in range(5):
        layers.append(FullyConnectedLayer(28, 28))
        layers.append(ReLU())
    layers.append(FullyConnectedLayer(28, output_num))
    layers.append(SoftmaxLayerWithCrossEntropyLoss())
    return NeuralNetwork(name="14-28X6-4", lr=lr, layers=layers)
Example #16
0
def compare():
    rate = 0.1
    it = 10000
    x = np.array([[x] for x in np.arange(2, 30)])
    y = func(x)
    noised_y = add_noise(y)
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        noised_y,
                                                        test_size=0.3,
                                                        random_state=0)
    network = NeuralNetwork(input_size=x_train.shape[1],
                            output_size=y_train.shape[1],
                            hidden_size=2)
    network_r = NeuralNetwork(input_size=x_train.shape[1],
                              output_size=y_train.shape[1],
                              hidden_size=2,
                              weights=network.get_weights())
    start_time = time.time()
    tr_e, ts_e, rt = network.fit(x_train=x_train,
                                 y_train=y_train,
                                 x_test=x_test,
                                 y_test=y_test,
                                 rate=rate,
                                 iterations=it)
    tr_e_r, ts_e_r, rt_r = network_r.fit(x_train=x_train,
                                         y_train=y_train,
                                         x_test=x_test,
                                         y_test=y_test,
                                         rate=rate,
                                         iterations=it,
                                         bonus=0.005)
    print("--- %s seconds ---\n" % (time.time() - start_time))

    x_network = x
    y_network = [network.predict(xi) for xi in x]
    y_network_r = [network_r.predict(xi) for xi in x]
    view = View()
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network, "Learning with static rate")
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network_r, "Learning with dynamic rate")
    view.error_sum_per_epoch(tr_e, tr_e_r, "static rate", "dynamic rate")
    view.func(x_network, y_network, x_network, y_network_r)
    view.rates(rt_r)
    view.show()
Example #17
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim,
                                               self.network_config,
                                               self.input_tensor)
         self.encoding_network.stack(*self.encoding_layes)
     self.encoding_network.compute(x)
Example #18
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim,
                                               self.input_tensor)
         for layer in self.encoding_layes:
             self.encoding_network.stack_layer(layer, no_setup=True)
     return self.encoding_network.compute(x)
Example #19
0
        def func(actor, mini_batch):
        for i in range(5):
                net = NeuralNetwork([2,6,1], actor, *NetData.and_norm_set(), last_sigmoid=True)
                net.train(learning_rate=0.1, repeat=200, print_num=1, mini_batch=mini_batch)
                print()

        print("relu-minibatch")
        func(act.relu, True)

        print("sigmoid-non-minibatch")
        func(act.sigmoid, False)
Example #20
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim)
         for layer in self.decoding_layers:
             self.decoding_network.stack_layer(layer, no_setup=True)
     return self.decoding_network.compute(x)
Example #21
0
def network_1(input_num, output_num, lr):
    return NeuralNetwork(name="14-100-40-4",
                         lr=lr,
                         layers=[
                             FullyConnectedLayer(input_num, 100),
                             ReLU(),
                             FullyConnectedLayer(100, 40),
                             ReLU(),
                             FullyConnectedLayer(40, output_num),
                             SoftmaxLayerWithCrossEntropyLoss()
                         ])
Example #22
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim,
                                               self.network_config)
         self.decoding_network.stack(*self.decoding_layers)
     self.decoding_network.compute(x)
Example #23
0
	def __init__(self, game):
		# class initialisation - create first generation
		self.game = game
		self.current_generation = 1
		self.results = []
		self.population = []
		self.highscore = 0
		self.dinosaurs = 0
		self.unicorns = 0
		self.cur_snake = 0
		for i in range(POPULATION_SIZE):
			self.population.append(NeuralNetwork())
def main():
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True)

    train_data = [(x, t) for x, t in zip(x_train, t_train)]
    test_data = [(x, t) for x, t in zip(x_test, t_test)]

    labels = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)

    nn = NeuralNetwork(784, (100,), labels)

    manager = NetworkEvaluator(nn, train_data, test_data)

    manager.train(num_epoch=10, plot=True)
Example #25
0
 def __init__(self, game, population_size):
     # class initialisation - create first generation
     self.game = game
     self.current_generation = 1
     self.population_size = population_size
     self.population = []
     self.results = []
     self.inputs = INPUTS
     self.hidden_nodes = HIDDEN_NODES
     self.outputs = OUTPUTS
     for i in range(population_size):
         self.population.append(
             NeuralNetwork(self.inputs, self.hidden_nodes, self.outputs))
Example #26
0
def main():
    print('LEARNING_RATE', LEARNING_RATE)
    print('MOMENTUM_RATE', MOMENTUM_RATE)
    print('teaching')
    network = NeuralNetwork((FIRST_LAYER, SECOND_LAYER, OUTPUT_LAYER),
                            learning_rate=LEARNING_RATE,
                            momentum=MOMENTUM_RATE)
    network.teach(TRAINING_DATA, 1000)

    print('checking')
    for item in TEST_DATA:
        print(format_output(network.calculate(item['input'])),
              format_output(item['output']))
Example #27
0
def main():
    i_num, h_num, o_num, learning_rate = 3, 5, 2, 0.1
    nn = NeuralNetwork(i_num, h_num, o_num, learning_rate)
    training_data = get_training_data()
    vec = np.array(training_data)
    q = vec[:, 0:i_num]
    o = vec[:, i_num:].T
    for i in range(10000):
        for record in training_data:
            input_list, target = record[0:i_num], [record[i_num:]]
            nn.train(inputs_list=input_list, targets_list=target)
        if i % 1000 == 0:
            print i, nn.calc_loss(q, o)
    print nn.query(q)
Example #28
0
    def __init__(self, target_dot):
        self.max_steps = MAX_STEPS
        self.step = 0
        self.is_dead = False
        self.nn = NeuralNetwork()
        self.position = np.array([PLAYER_START_X_POSITION, PLAYER_START_Y_POSITION], dtype=np.float32)
        self.size = np.array([PLAYER_WIDTH, PLAYER_HEIGHT], dtype=np.float32)
        # How badly this player got stuck
        self.stuck = 0
        self.prev_move = ''
        self.color = PLAYER_COLOR
        self.update_input_neurons(target_dot)

        self.is_arrived = False
        self.fitness = 0.0
Example #29
0
def bool_func(data,
              num_on_hidden=2,
              num_epochs=200,
              learning_rate=0.1,
              display_loss=False,
              label=''):
    x = data.drop(['F'], axis=1).values
    y = np.array([data['F'].values]).T
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

    nw = NeuralNetwork(x_train, y_train, num_on_hidden)
    loss = nw.train(num_epochs, learning_rate, display_loss, label)
    pred = nw.test(x_test)
    pred = (pred > 0.5).astype("int").ravel()
    score = accuracy_score(y_test, pred) * 100

    print_info(data, loss, num_on_hidden, num_epochs, learning_rate, score)
Example #30
0
    def fit(self, X: np.ndarray, y: np.ndarray):
        X_ = check_array(X)

        y_ = check_array(y, ensure_2d=False)
        if len(y_.shape) == 1:
            y_ = y_.reshape(-1, 1)

        assert X_.shape[0] == y_.shape[0], ValueError('inconsistent # of samples')

        in_size = X_.shape[1]
        out_size = 1  # binary classification only!

        ### build neural network
        if self.hidden_size is not None:
            hidden_size = self.hidden_size
        else:
            hidden_size = out_size

        random_state = copy(self.random_state)

        layers = list()
        layer_input = in_size
        # hidden layers
        for _ in range(self.n_hidden):
            layers.append(Layer(
                input_size=layer_input,
                output_size=hidden_size,
                activation=self.hidden_activation,
                random_state=random_state,
            ))
            layer_input = hidden_size  # to make sure layers fit together

        # output layer
        layers.append(
            Layer(layer_input, out_size, self.output_activation,
                  random_state=random_state)
        )

        self.net_ = NeuralNetwork(layers)

        # train
        self.net_, _ = train(self.net_, X_, y_, self.loss, self.lr,
                             self.n_epochs)

        return self