Ejemplo n.º 1
0
def testIdentity(trainDataFile):
    trainData = np.genfromtxt(trainDataFile)

    numInput = 8
    numHidden = 3
    numOutput = 8
    seed = 3
    learningRate = 0.3
    maxEpochs = 5000
    momentum = 0.0

    print("Generating %d-%d-%d neural network " % (numInput, numHidden, numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum, showHidden=True)
    print("Training complete")

    accTrain = nn.accuracy(trainData)

    print("\nAccuracy on train data = %0.4f " % accTrain)

    numHidden = 4
    print("\nGenerating %d-%d-%d neural network " % (numInput, numHidden, numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum, showHidden=True)
    print("Training complete")

    accTrain = nn.accuracy(trainData)

    print("\nAccuracy on train data = %0.4f " % accTrain)
Ejemplo n.º 2
0
def train(net: NeuralNetwork,
          inputs: Tensor,
          targets: Tensor,
          num_epochs: int = 5000,
          iterator: DataIterator = BatchIterator(),
          loss: Loss = CrossEntropy(),
          optimizer: Optimizer = MBGD(),
          showGraph: bool = False) -> None:
    losses = []
    for epoch in range(num_epochs):
        epoch_loss = 0.0
        for batch in iterator(inputs, targets):
            for X, Y in zip(batch.inputs, batch.targets):
                predicted = net.forward(X)
                epoch_loss += loss.loss(predicted, Y)
                grad = loss.grad(predicted, Y)
                net.backwards(grad)
                optimizer.step(net)

        print(epoch, epoch_loss)
        losses.append(epoch_loss)
        if epoch_loss < 300:
            pass
    if showGraph:
        plt.plot(losses)
        plt.show()
Ejemplo n.º 3
0
def testTennisOrIris(trainDataFile, testDataFile, attrDataFile):
    data = Preprocessor(trainDataFile, testDataFile, attrDataFile)
    data.loadData()
    trainData = data.getMatrix(data.getTrainData())
    testData = data.getMatrix(data.getTestData())
 
    numInput = data.getNumInput()
    numOutput = len(data.getClasses())
    numHidden = 3
    seed = 4 
    learningRate = 0.1
    maxEpochs = 5000
    momentum = 0.0

    print("Generating neural network: %d-%d-%d" % (numInput, numHidden,numOutput))
    nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
    nn.train(trainData, maxEpochs, learningRate, momentum)
    print("Training complete")

 #   accTrain = nn.accuracy(trainData)
    accTest = nn.accuracy(testData)

 #   print("\nAccuracy on train data = %0.4f " % accTrain)
   
    print("Accuracy on test data   = %0.4f " % accTest)
Ejemplo n.º 4
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()
    network = NeuralNetwork([784, 16, 16, 10])

    layers = create_layers(network)
    win, wth = initialize_screen(network.sizes)
    draw_network(win, layers)  # draw initial network with random weights

    epochs = 30
    batch_size = 10
    learning_rate = 3.0

    txt = Text(Point(wth / 2, 830), "Initial Weights")
    txt.draw(win)
    # main training loop
    for i in range(epochs):  # for each iteration of training
        biases, weights = network.train_iteration(training_data, batch_size, \
        learning_rate, i, test_data=test_data)
        txt.setText("Iteration: {0}".format(i))
        for j in range(1, len(layers)):
            layers[j].update_layer(weights[j - 1], biases[j - 1])
        draw_network(win, layers)

    win.getMouse()
    win.close()
Ejemplo n.º 5
0
def main():
    x = np.array([[x] for x in np.arange(2, 30)])
    y = func(x)
    noised_y = add_noise(y)
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        noised_y,
                                                        test_size=0.3,
                                                        random_state=0)
    network = NeuralNetwork(input_size=x_train.shape[1],
                            output_size=y_train.shape[1],
                            hidden_size=2)

    start_time = time.time()
    train_errors, test_errors, rates = network.fit(x_train=x_train,
                                                   y_train=y_train,
                                                   x_test=x_test,
                                                   y_test=y_test,
                                                   rate=0.5,
                                                   bonus=0.005,
                                                   iterations=10000)
    print("--- %s seconds ---\n" % (time.time() - start_time))

    x_network = x
    y_network = [network.predict(xi) for xi in x]
    print("Train:")
    view = View()
    view.current_error(network, x_train, y_train)
    print("\nTest:")
    view.current_error(network, x_test, y_test)
    view.error_sum_per_epoch(train_errors, test_errors)
    view.error_sum_per_epoch(train_errors, test_errors)
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network)
    view.rates(rates)
    view.show()
Ejemplo n.º 6
0
def test():
    a = NeuralNetwork()
    a.add_node()
    a.add_node()
    a.calculate_move()
    a.calculate_move()
    a.add_node()
    a.calculate_move()
Ejemplo n.º 7
0
def xor_test():
        from network import act, NeuralNetwork, NetData

        net = NeuralNetwork([2,2,1], act.sigmoid, *NetData.xor_set)
        net.train(repeat=30000, show_error=True)
        net.test(show_w_plot=True)
        net.show_final_plot()
        net.save("net/xor.txt")
Ejemplo n.º 8
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim,
                                               self.network_config,
                                               self.input_tensor)
         self.encoding_network.stack(*self.encoding_layes)
     self.encoding_network.compute(x)
Ejemplo n.º 9
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim,
                                               self.input_tensor)
         for layer in self.encoding_layes:
             self.encoding_network.stack_layer(layer, no_setup=True)
     return self.encoding_network.compute(x)
Ejemplo n.º 10
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim)
         for layer in self.decoding_layers:
             self.decoding_network.stack_layer(layer, no_setup=True)
     return self.decoding_network.compute(x)
Ejemplo n.º 11
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim,
                                               self.network_config)
         self.decoding_network.stack(*self.decoding_layers)
     self.decoding_network.compute(x)
Ejemplo n.º 12
0
        def func(actor, mini_batch):
        for i in range(5):
                net = NeuralNetwork([2,6,1], actor, *NetData.and_norm_set(), last_sigmoid=True)
                net.train(learning_rate=0.1, repeat=200, print_num=1, mini_batch=mini_batch)
                print()

        print("relu-minibatch")
        func(act.relu, True)

        print("sigmoid-non-minibatch")
        func(act.sigmoid, False)
Ejemplo n.º 13
0
def main():
    print('LEARNING_RATE', LEARNING_RATE)
    print('MOMENTUM_RATE', MOMENTUM_RATE)
    print('teaching')
    network = NeuralNetwork((FIRST_LAYER, SECOND_LAYER, OUTPUT_LAYER),
                            learning_rate=LEARNING_RATE,
                            momentum=MOMENTUM_RATE)
    network.teach(TRAINING_DATA, 1000)

    print('checking')
    for item in TEST_DATA:
        print(format_output(network.calculate(item['input'])),
              format_output(item['output']))
Ejemplo n.º 14
0
def testIrisNoisy(trainDataFile, testDataFile, attrDataFile):
    data = Preprocessor(trainDataFile, testDataFile, attrDataFile)
    data.loadData()
    testData = data.getMatrix(data.getTestData()) 
    numInput = data.getNumInput() 
    numOutput = len(data.getClasses())
    numHidden = 3
    seed = 4 
    learningRate = 0.1
    maxEpochs = 5000
    momentum = 0.0
 
    for rate in range(0, 21, 2):
        noisyData = addNoise(data.getTrainData(), rate, data.getClasses())
        trainData = data.getMatrix(noisyData) 
        print("\nNoise Rate (%): " + str(rate)) 
        print("Generating neural network: %d-%d-%d" % (numInput, numHidden,numOutput)) 
        nn = NeuralNetwork(numInput, numHidden, numOutput, seed)
        nn.train(trainData, maxEpochs, learningRate, momentum, showEpochs=False, vRatio=0.85)
        print("Training complete")

        accTrain = nn.accuracy(trainData)
        accTest = nn.accuracy(testData)

        accValidTrain = nn.accuracy(trainData, validationOn=True)
        accValidTest = nn.accuracy(testData, validationOn=True)
        print("w/o validation set:")
        print("Accuracy on train data = %0.4f " % accTrain)
        print("Accuracy on test data   = %0.4f " % accTest)
    
        print("w/ validation set:")
        print("Accuracy on train data = %0.4f " % accValidTrain)
        print("Accuracy on test data   = %0.4f " % accValidTest)
Ejemplo n.º 15
0
def donut_norm_test():
        from network import act, NeuralNetwork, NetData

        net = NeuralNetwork([2,10,1], act.sigmoid, *NetData.donut_norm_set())
        net.train(learning_rate=0.1, repeat=50000, show_error=True)
        net.show_final_plot()
        net.save("net/donut_norm.txt")
Ejemplo n.º 16
0
class AutoEncoder(NeuralNetwork):
    """
    Auto encoder.
    Must call stack_encoding before stack_decoding.
    Parameters:
        rep_dim - dimension of representation
    """
    def __init__(self, input_dim, rep_dim=None, config=None, input_tensor=None):
        super(AutoEncoder, self).__init__(input_dim, config=config, input_tensor=input_tensor)

        self.rep_dim = rep_dim
        self.encoding_layes = []
        self.decoding_layers = []
        self.encoding_network = None
        self.decoding_network = None

    def _cost_func(self, y):
        return T.sum((self.input_variables[0] - y)**2)

    @property
    def cost(self):
        return self._cost_func(self.output)

    @property
    def test_cost(self):
        return self._cost_func(self.test_output)

    def stack_encoders(self, *layers):
        """
        Stack encoding layers, this must be done before stacking decoding layers.
        """
        self.stack(*layers)
        self.encoding_layes.extend(layers)

    def stack_decoders(self, *layers):
        """
        Stack decoding layers.
        """
        self.stack(*layers)
        self.decoding_layers.extend(layers)

    def encode(self, x):
        """
        Encode given input.
        """
        if not self.encoding_network:
            self.encoding_network = NeuralNetwork(self.input_dim, self.network_config, self.input_tensor)
            self.encoding_network.stack(*self.encoding_layes)
        self.encoding_network.compute(x)

    def decode(self, x):
        """
        Decode given representation.
        """
        if not self.rep_dim:
            raise Exception("rep_dim must be set to decode.")
        if not self.decoding_network:
            self.decoding_network = NeuralNetwork(self.rep_dim, self.network_config)
            self.decoding_network.stack(*self.decoding_layers)
        self.decoding_network.compute(x)
Ejemplo n.º 17
0
    def __init__(self, target_dot):
        self.max_steps = MAX_STEPS
        self.step = 0
        self.is_dead = False
        self.nn = NeuralNetwork()
        self.position = np.array([PLAYER_START_X_POSITION, PLAYER_START_Y_POSITION], dtype=np.float32)
        self.size = np.array([PLAYER_WIDTH, PLAYER_HEIGHT], dtype=np.float32)
        # How badly this player got stuck
        self.stuck = 0
        self.prev_move = ''
        self.color = PLAYER_COLOR
        self.update_input_neurons(target_dot)

        self.is_arrived = False
        self.fitness = 0.0
Ejemplo n.º 18
0
 def reproduction(self):
     # combine neural networks of fitest snakes from previous generation
     self.new_population = []
     for network in self.results:
         partner = random.choice(self.results)
         # create weights1 array for new snake
         new_weights1 = np.zeros(
             (network['weights1'].shape[0], network['weights1'].shape[1]))
         for row in range(new_weights1.shape[0]):
             for col in range(new_weights1.shape[1]):
                 gene1 = network['weights1'][row, col]
                 gene2 = partner['weights1'][row, col]
                 new_weights1[row, col] = random.choice([gene1, gene2])
         # create weights2 array for new snake
         new_weights2 = np.zeros(
             (network['weights2'].shape[0], network['weights2'].shape[1]))
         for row in range(new_weights2.shape[0]):
             for col in range(new_weights2.shape[1]):
                 gene1 = network['weights2'][row, col]
                 gene2 = partner['weights2'][row, col]
                 new_weights2[row, col] = random.choice([gene1, gene2])
         # create new neural network for new population
         self.new_population.append(
             NeuralNetwork(self.inputs, self.hidden_nodes, self.outputs))
         self.new_population[-1].weights1 = new_weights1
         self.new_population[-1].weights2 = new_weights2
     self.population = self.new_population
Ejemplo n.º 19
0
def get_trained_stock_neural_network(stock_prices_training_set,
                                     hidden_neuron_count=30,
                                     training_func=None,
                                     threshold=.001,
                                     max_iterations=1000,
                                     activation='HyperbolicTangent',
                                     normalization='Statistical',
                                     learning_rate=0.2,
                                     momentum=0.2):
    number_of_inputs, number_of_outputs = len(
        stock_prices_training_set[0][0]), len(stock_prices_training_set[0][1])

    layers = (
        InputLayer(number_of_inputs,
                   number_of_inputs_per_neuron=1,
                   activation_function='Identity'),
        HiddenLayer(hidden_neuron_count,
                    number_of_inputs_per_neuron=number_of_inputs,
                    activation_function=activation,
                    learning_rate=learning_rate,
                    momentum=momentum),
        OutputLayer(  # The activation function is giving the dot product, so do nothing ...
            number_of_outputs,
            number_of_inputs_per_neuron=hidden_neuron_count,
            activation_function='Identity'))

    return (training_func
            or train)(NeuralNetwork(layers,
                                    allowed_error_threshold=threshold,
                                    max_number_of_iterations=max_iterations,
                                    normalization_class='Statistical'),
                      stock_prices_training_set)
Ejemplo n.º 20
0
def train(input_x, t, epsilon, gamma, weights, epochs):
    local_weights = weights
    new_weights = local_weights.copy()
    for f in range(0, epochs):
        for k in range(len(input_x)):
            for i in range(0, len(local_weights)):
                l = local_weights.copy()
                l[i] = local_weights[i] + epsilon
                y1 = NeuralNetwork(l).feedforward(input_x[k])
                l[i] = local_weights[i] - epsilon
                y2 = NeuralNetwork(l).feedforward(input_x[k])
                new_weights[i] = local_weights[i] - gamma * (
                    error_loss(y1, t[k]) - error_loss(y2, t[k])) / 2 * epsilon

            local_weights = new_weights.copy()
    return local_weights
Ejemplo n.º 21
0
def network_3(input_num, output_num, lr):
    layers = [FullyConnectedLayer(input_num, 14), ReLU()]
    for i in range(27):
        layers.append(FullyConnectedLayer(14, 14))
        layers.append(ReLU())
    layers.append(FullyConnectedLayer(14, output_num))
    layers.append(SoftmaxLayerWithCrossEntropyLoss())
    return NeuralNetwork(name="14-14X28-4", lr=lr, layers=layers)
Ejemplo n.º 22
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim, self.network_config, self.input_tensor)
         self.encoding_network.stack(*self.encoding_layes)
     self.encoding_network.compute(x)
Ejemplo n.º 23
0
def bool_func(data,
              num_on_hidden=2,
              num_epochs=200,
              learning_rate=0.1,
              display_loss=False,
              label=''):
    x = data.drop(['F'], axis=1).values
    y = np.array([data['F'].values]).T
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33)

    nw = NeuralNetwork(x_train, y_train, num_on_hidden)
    loss = nw.train(num_epochs, learning_rate, display_loss, label)
    pred = nw.test(x_test)
    pred = (pred > 0.5).astype("int").ravel()
    score = accuracy_score(y_test, pred) * 100

    print_info(data, loss, num_on_hidden, num_epochs, learning_rate, score)
Ejemplo n.º 24
0
 def test_init_shape(self):
     network = NeuralNetwork((3, 4, 5))
     self.assertEqual(len(network.weights), 2)
     self.assertEqual(len(network.biases), 2)
     self.assertEqual(network.weights[0].shape, (4, 3))
     self.assertEqual(network.weights[1].shape, (5, 4))
     self.assertEqual(network.biases[0].shape, (4, ))
     self.assertEqual(network.biases[1].shape, (5, ))
Ejemplo n.º 25
0
def network_2(input_num, output_num, lr):
    layers = [FullyConnectedLayer(input_num, 28), ReLU()]
    for i in range(5):
        layers.append(FullyConnectedLayer(28, 28))
        layers.append(ReLU())
    layers.append(FullyConnectedLayer(28, output_num))
    layers.append(SoftmaxLayerWithCrossEntropyLoss())
    return NeuralNetwork(name="14-28X6-4", lr=lr, layers=layers)
Ejemplo n.º 26
0
def main():
    network = NeuralNetwork(2, 5, 1)
    input = []
    label = []
    size = 100
    for i in range(size):
        x = random.randint(0, 800)
        y = random.randint(0, 800)
        labelVal = 1 if x >= y else 0
        input.append([x, y])
        label.append([labelVal])
        #print(input)

    for i in range(1000):
        for j in range(size):
            network.train(input[j], label[j])

    # # for i in range(len(network.input_layer.neurons)):
    # #     print(network.input_layer.neurons[i].weight)
    # # for i in range(len(network.hidden_layer.neurons)):
    # #     print(network.hidden_layer.neurons[i].weight)
    # print("--------------------")
    print(network.test([800, 0]))
    print(network.test([0, 800]))
    print(network.test([800, 0]))
    print(network.test([600, 400]))

    print('-----------------')
    for i in range(len(network.input_layer.neurons)):
        print(network.input_layer.neurons[i].weight)
    print('-----------------')
    for i in range(len(network.hidden_layer.neurons)):
        print(network.hidden_layer.neurons[i].weight)
Ejemplo n.º 27
0
 def encode(self, x):
     """
     Encode given input.
     """
     if not self.encoding_network:
         self.encoding_network = NeuralNetwork(self.input_dim, self.input_tensor)
         for layer in self.encoding_layes:
             self.encoding_network.stack_layer(layer, no_setup=True)
     return self.encoding_network.compute(x)
Ejemplo n.º 28
0
 def test_output(self):
     network = NeuralNetwork((3, 4, 5), init=NeuralNetwork.Init.ZERO)
     change_to_weights = [
         np.array([[0.3, 0., 0.], [0., 0., 0.], [0., 0., 0.], [0., 0.,
                                                               0.]]),
         np.array([[0.4, 0., 0., 0.], [0.9, 0., 0., 0.], [0., 0., 0., 0.],
                   [0., 0., 0., 0.], [0., 0., 0., 0.]])
     ]
     change_to_biases = [
         np.array([0., 0., 0., 0.]),
         np.array([0., 0., 0., 0., 0.9])
     ]
     network.update_weights_and_biases(change_to_weights, change_to_biases)
     self.assertTrue(
         np.array_equal(
             network.feed_forward((-2, -4, -6)),
             np.array((0.5353751667092154, 0.5790584231739951, 0.5, 0.5,
                       0.7109495026250039))))
Ejemplo n.º 29
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim, self.network_config)
         self.decoding_network.stack(*self.decoding_layers)
     self.decoding_network.compute(x)
Ejemplo n.º 30
0
 def decode(self, x):
     """
     Decode given representation.
     """
     if not self.rep_dim:
         raise Exception("rep_dim must be set to decode.")
     if not self.decoding_network:
         self.decoding_network = NeuralNetwork(self.rep_dim)
         for layer in self.decoding_layers:
             self.decoding_network.stack_layer(layer, no_setup=True)
     return self.decoding_network.compute(x)
Ejemplo n.º 31
0
def network_1(input_num, output_num, lr):
    return NeuralNetwork(name="14-100-40-4",
                         lr=lr,
                         layers=[
                             FullyConnectedLayer(input_num, 100),
                             ReLU(),
                             FullyConnectedLayer(100, 40),
                             ReLU(),
                             FullyConnectedLayer(40, output_num),
                             SoftmaxLayerWithCrossEntropyLoss()
                         ])
Ejemplo n.º 32
0
	def __init__(self, game):
		# class initialisation - create first generation
		self.game = game
		self.current_generation = 1
		self.results = []
		self.population = []
		self.highscore = 0
		self.dinosaurs = 0
		self.unicorns = 0
		self.cur_snake = 0
		for i in range(POPULATION_SIZE):
			self.population.append(NeuralNetwork())
Ejemplo n.º 33
0
def compare():
    rate = 0.1
    it = 10000
    x = np.array([[x] for x in np.arange(2, 30)])
    y = func(x)
    noised_y = add_noise(y)
    x_train, x_test, y_train, y_test = train_test_split(x,
                                                        noised_y,
                                                        test_size=0.3,
                                                        random_state=0)
    network = NeuralNetwork(input_size=x_train.shape[1],
                            output_size=y_train.shape[1],
                            hidden_size=2)
    network_r = NeuralNetwork(input_size=x_train.shape[1],
                              output_size=y_train.shape[1],
                              hidden_size=2,
                              weights=network.get_weights())
    start_time = time.time()
    tr_e, ts_e, rt = network.fit(x_train=x_train,
                                 y_train=y_train,
                                 x_test=x_test,
                                 y_test=y_test,
                                 rate=rate,
                                 iterations=it)
    tr_e_r, ts_e_r, rt_r = network_r.fit(x_train=x_train,
                                         y_train=y_train,
                                         x_test=x_test,
                                         y_test=y_test,
                                         rate=rate,
                                         iterations=it,
                                         bonus=0.005)
    print("--- %s seconds ---\n" % (time.time() - start_time))

    x_network = x
    y_network = [network.predict(xi) for xi in x]
    y_network_r = [network_r.predict(xi) for xi in x]
    view = View()
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network, "Learning with static rate")
    view.func_graphics(x, y, x_train, y_train, x_test, y_test, x_network,
                       y_network_r, "Learning with dynamic rate")
    view.error_sum_per_epoch(tr_e, tr_e_r, "static rate", "dynamic rate")
    view.func(x_network, y_network, x_network, y_network_r)
    view.rates(rt_r)
    view.show()
Ejemplo n.º 34
0
 def __init__(self, game, population_size):
     # class initialisation - create first generation
     self.game = game
     self.current_generation = 1
     self.population_size = population_size
     self.population = []
     self.results = []
     self.inputs = INPUTS
     self.hidden_nodes = HIDDEN_NODES
     self.outputs = OUTPUTS
     for i in range(population_size):
         self.population.append(
             NeuralNetwork(self.inputs, self.hidden_nodes, self.outputs))
Ejemplo n.º 35
0
def main():
    (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True)

    train_data = [(x, t) for x, t in zip(x_train, t_train)]
    test_data = [(x, t) for x, t in zip(x_test, t_test)]

    labels = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)

    nn = NeuralNetwork(784, (100,), labels)

    manager = NetworkEvaluator(nn, train_data, test_data)

    manager.train(num_epoch=10, plot=True)
Ejemplo n.º 36
0
Archivo: test.py Proyecto: Alerion/ai
            0,4,0,1,4,
            0,0,2,0,4,
            4,4,4,4,0
        ],
        'output': [0, 0, 0, 0, 1]
    }
]

#Example of output
#Check study
#['0.96', '0.02', '0.01', '0.02', '0.03'] ['1.00', '0.00', '0.00', '0.00', '0.00']
#['0.02', '0.97', '0.02', '0.02', '0.01'] ['0.00', '1.00', '0.00', '0.00', '0.00']
#['0.02', '0.01', '0.96', '0.02', '0.02'] ['0.00', '0.00', '1.00', '0.00', '0.00']
#['0.02', '0.02', '0.02', '0.96', '0.02'] ['0.00', '0.00', '0.00', '1.00', '0.00']
#['0.03', '0.01', '0.03', '0.02', '0.96'] ['0.00', '0.00', '0.00', '0.00', '1.00']
#Check study
#['0.94', '0.02', '0.01', '0.01', '0.04'] ['1.00', '0.00', '0.00', '0.00', '0.00']
#['0.03', '0.95', '0.02', '0.01', '0.01'] ['0.00', '1.00', '0.00', '0.00', '0.00']
#['0.02', '0.02', '0.95', '0.02', '0.02'] ['0.00', '0.00', '1.00', '0.00', '0.00']
#['0.03', '0.01', '0.05', '0.87', '0.02'] ['0.00', '0.00', '0.00', '1.00', '0.00']
#['0.03', '0.01', '0.05', '0.02', '0.95'] ['0.00', '0.00', '0.00', '0.00', '1.00']


if __name__ == "__main__":
    network = NeuralNetwork((5 * 7, 5 * 7 / 2, 5))
    network.teach(data, max_retries=1000)
    print 'Check study'
    network.test(data)
    print 'Check working on other data'
    network.test(test_data)
Ejemplo n.º 37
0
from csv import reader
from datetime import datetime
    
# Squares data
x = np.array([[i] for i in range(1,76)], dtype=float)
y = np.array([[i**2] for i in range(1,76)], dtype=float)

# Calculate to display data later
ymax = np.amax(y, axis=0)

# Normalize
x = x/np.amax(x, axis=0)
y = y/ymax


N = NeuralNetwork(1, 5, 1)
T = Trainer(N)
T.train(x, y)
testX = np.array([[i] for i in range(76,101)], dtype=float)
testY = np.array([[i**2] for i in range(76,101)], dtype=float)

# Normalize training data
testX /= np.amax(testX, axis=0)
testYMax = np.amax(testY, axis=0)
testY /= testYMax

yHat = N.forward(testX)

for i in range(len(yHat)):
    print testY[i]*testYMax, yHat[i]*testYMax
Ejemplo n.º 38
0
Archivo: main.py Proyecto: Alerion/ai
        'input': (
            0,1,1,1,1,
            0,0,0,0,1,
            0,0,0,1,0,
            0,0,1,0,0,
            0,1,0,0,0,
            1,0,0,0,0,
            1,1,1,1,1
        )
    },
]


def format(l):
    return ['%.2f' % i for i in l]

if __name__ == "__main__":
    FIRST_LAYER = 5 * 7
    SECOND_LAYER = 14
    OUTPUT_LAYER = 3
    print u'LEARNING_RATE', LEARNING_RATE
    print u'MOMENTUM_RATE', MOMENTUM_RATE
    print u'При вычислении отображается текущая среднеквадратичная ошибка'
    raw_input(u'<ENTER>')
    network = NeuralNetwork((FIRST_LAYER, SECOND_LAYER, OUTPUT_LAYER), learning_rate=LEARNING_RATE, momentum=MOMENTUM_RATE)
    network.teach(data, 10000)

    print u'Проверка обучения'
    for item in data:
        print format(network.calculate(item['input'])), format(item['output'])
Ejemplo n.º 39
0
Archivo: plots.py Proyecto: Alerion/ai
from network import NeuralNetwork
import matplotlib.pyplot as plt

if __name__ == "__main__":
    network = NeuralNetwork((4, 3, 4))

    x = []
    y = []
    xd = []
    yd = []
    for i in range(-50, 50):
        val = i / 10.

        xd.append(val)
        yd.append(network.activate_derivative(network.activate(val)))
    plt.plot(xd, yd, 'b-')
    plt.show()