Ejemplo n.º 1
0
    def test_feed_forward(self) :

        layer0 = Layer(0, 'input')
        layer0.add_nodes(2, 'input')
        layer0.set_activation_type('sigmoid')

        layer1 = Layer(1, 'hidden')
        layer1.add_nodes(1, 'hidden')

        inode1 = layer0.nodes[0]
        inode2 = layer0.nodes[1]

        inode1.set_value(.25)
        inode2.set_value(.5)

        node = layer1.nodes[0]
        node.add_input_connection(
            Connection(inode1, node, .25))
        node.add_input_connection(
            Connection(inode2, node, .5))

        layer1.feed_forward()

        self.assertAlmostEqual(
            sigmoid(.25) * .25 + sigmoid(.5) * .5, node.get_value())
Ejemplo n.º 2
0
    def test_get_node(self):

        layer = Layer(0, 'input')
        layer.add_nodes(6, 'input')

        del(layer.nodes[3])

        node = layer.get_node(4)
        self.assertEqual(node, layer.nodes[3])
Ejemplo n.º 3
0
    def test_add_nodes(self):

        layer = Layer(0, 'input')

        layer.add_nodes(1, 'input')
        layer.add_nodes(1, 'copy')

        self.assertEqual(2, len(layer.nodes))
        self.assertEqual('copy', layer.nodes[1].node_type)
        self.assertNotEqual('copy', layer.nodes[0].node_type)
Ejemplo n.º 4
0
    def test_total_nodes(self):

        layer = Layer(0, 'input')
        layer.add_nodes(2, 'input')
        layer.add_nodes(2, 'copy')
        layer.add_node(BiasNode())

        self.assertEqual(5, layer.total_nodes())
        self.assertEqual(2, layer.total_nodes('input'))
        self.assertEqual(2, layer.total_nodes('copy'))
        self.assertEqual(0, layer.total_nodes('hidden'))
Ejemplo n.º 5
0
    def test_set_activation_type(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')

        self.assertEqual('sigmoid', layer.nodes[0].get_activation_type())

        layer.set_activation_type('linear')

        self.assertEqual('linear', layer.nodes[0].get_activation_type())

        self.failUnlessRaises(ValueError, layer.set_activation_type, 'fail')
Ejemplo n.º 6
0
    def test_unconnected_nodes(self):

        layer = Layer(1, 'hidden')
        conn = Connection(Node(), Node())

        layer.add_nodes(2, 'hidden')

        layer.nodes[0].add_input_connection(Connection(Node(), layer.nodes[0]))
        input_side = layer.unconnected_nodes()

        self.assertEqual(1, input_side[0])
        self.assertNotEqual(0, input_side[0])
Ejemplo n.º 7
0
    def test_values(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(2, 'hidden')

        layer.nodes[0].set_value(.2)
        layer.nodes[1].set_value(.3)

        values = layer.values()

        self.assertEqual(True, isinstance(values, list))
        self.assertEqual(.2, values[0])
        self.assertEqual(.3, values[1])
    def test_unconnected_nodes(self):

        layer = Layer(1, 'hidden')
        conn = Connection(Node(), Node())

        layer.add_nodes(2, 'hidden')

        layer.nodes[0].add_input_connection(
            Connection(Node(), layer.nodes[0]))
        input_side = layer.unconnected_nodes()

        self.assertEqual(1, input_side[0])
        self.assertNotEqual(0, input_side[0])
    def test_set_activation_type(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')

        self.assertEqual('sigmoid', layer.nodes[0].get_activation_type())

        layer.set_activation_type('linear')

        self.assertEqual('linear', layer.nodes[0].get_activation_type())

        self.failUnlessRaises(
            ValueError,
            layer.set_activation_type, 'fail')
Ejemplo n.º 10
0
    def test_activations(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(2, 'hidden')
        layer.set_activation_type('linear')

        layer.nodes[0].set_value(.2)
        layer.nodes[1].set_value(.3)

        activations = layer.activations()

        self.assertEqual(True, isinstance(activations, list))
        self.assertEqual(.2, activations[0])
        self.assertEqual(.3, activations[1])
Ejemplo n.º 11
0
    def setUp(self):

        self.net = NeuralNet()

        layer = Layer(0, 'input')
        layer.add_nodes(1, 'input')
        self.net.layers.append(layer)

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')
        self.net.layers.append(layer)

        layer = Layer(2, 'output')
        layer.add_nodes(1, 'output')
        self.net.layers.append(layer)

        #   Specify connections
        self.net.layers[1].nodes[0].add_input_connection(
            Connection(self.net.layers[0].nodes[0],
                       self.net.layers[1].nodes[0], 1.00))

        self.net.layers[2].nodes[0].add_input_connection(
            Connection(self.net.layers[1].nodes[0],
                       self.net.layers[2].nodes[0], .75))

        self.net._epochs = 1
        self.net.copy_levels = 0
        self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
        self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]

        self.net.input_layer = self.net.layers[0]
        self.net.output_layer = self.net.layers[-1]
Ejemplo n.º 12
0
    def init_layers(self, input_nodes, total_hidden_nodes_list, output_nodes,
                    *recurrent_mods):
        """
        This function initializes the layers.
        The variables:

        * input_nodes: the number of nodes in the input layer
        * total_hidden_nodes_list:  a list of numbers of nodes in the
            hidden layer.  For example, [5, 3]
        * output_nodes: the number of nodes in the output layer

        The initial network is created, and then a series of modifications can
        be made to enable recurrent features.  recurrent_mods are
        configurations for modifications to the neural network that is created
        within init_layers.

        For example, if
            init_layers(input_nodes, total_hidden_nodes_list, output_nodes,
                            ElmanSimpleRecurrent())
        was used, then the initial network structure of input, hidden, and
        output nodes would be created.  After that, the additional copy or
        context nodes that would automatically transfer values from the lowest
        hidden layer would be added to the input layer.

        More than one recurrent scheme can be applied, each one adding to the
        existing network.

        """

        self.layers = []

        #   Input layer
        layer = Layer(len(self.layers), NODE_INPUT)
        layer.add_nodes(input_nodes, NODE_INPUT)

        layer.add_node(BiasNode())

        self.layers.append(layer)
        self.input_layer = layer

        for hid in total_hidden_nodes_list:
            layer = Layer(len(self.layers), NODE_HIDDEN)
            layer.add_nodes(hid, NODE_HIDDEN)

            layer.add_node(BiasNode())

            self.layers.append(layer)

        layer = Layer(len(self.layers), NODE_OUTPUT)
        layer.add_nodes(output_nodes, NODE_OUTPUT)

        self.layers.append(layer)
        self.output_layer = layer

        self._init_connections()

        for recurrent_mod in recurrent_mods:
            recurrent_mod.apply_config(self)
Ejemplo n.º 13
0
    def init_layers(self, input_nodes, total_hidden_nodes_list,
            output_nodes, *recurrent_mods):
        """
        This function initializes the layers.
        The variables:

        * input_nodes: the number of nodes in the input layer
        * total_hidden_nodes_list:  a list of numbers of nodes in the
            hidden layer.  For example, [5, 3]
        * output_nodes: the number of nodes in the output layer

        The initial network is created, and then a series of modifications can
        be made to enable recurrent features.  recurrent_mods are
        configurations for modifications to the neural network that is created
        within init_layers.

        For example, if
            init_layers(input_nodes, total_hidden_nodes_list, output_nodes,
                            ElmanSimpleRecurrent())
        was used, then the initial network structure of input, hidden, and
        output nodes would be created.  After that, the additional copy or
        context nodes that would automatically transfer values from the lowest
        hidden layer would be added to the input layer.

        More than one recurrent scheme can be applied, each one adding to the
        existing network.

        """

        self.layers = []

        #   Input layer
        layer = Layer(len(self.layers), NODE_INPUT)
        layer.add_nodes(input_nodes, NODE_INPUT)

        layer.add_node(BiasNode())

        self.layers.append(layer)
        self.input_layer = layer

        for hid in total_hidden_nodes_list:
            layer = Layer(len(self.layers), NODE_HIDDEN)
            layer.add_nodes(hid, NODE_HIDDEN)

            layer.add_node(BiasNode())

            self.layers.append(layer)

        layer = Layer(len(self.layers), NODE_OUTPUT)
        layer.add_nodes(output_nodes, NODE_OUTPUT)

        self.layers.append(layer)
        self.output_layer = layer

        self._init_connections()

        for recurrent_mod in recurrent_mods:
            recurrent_mod.apply_config(self)
    def setUp(self):

        self.net = NeuralNet()

        layer = Layer(0, 'input')
        layer.add_nodes(1, 'input')
        self.net.layers.append(layer)

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')
        self.net.layers.append(layer)

        layer = Layer(2, 'output')
        layer.add_nodes(1, 'output')
        self.net.layers.append(layer)

        #   Specify connections
        self.net.layers[1].nodes[0].add_input_connection(
            Connection(
                self.net.layers[0].nodes[0],
                self.net.layers[1].nodes[0],
                1.00))

        self.net.layers[2].nodes[0].add_input_connection(
            Connection(
                self.net.layers[1].nodes[0],
                self.net.layers[2].nodes[0],
                .75))

        self.net._epochs = 1
        self.net.copy_levels = 0
        self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
        self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]

        self.net.input_layer = self.net.layers[0]
        self.net.output_layer = self.net.layers[-1]