Example #1
0
    def test_get_node(self):

        layer = Layer(0, 'input')
        layer.add_nodes(6, 'input')

        del(layer.nodes[3])

        node = layer.get_node(4)
        self.assertEqual(node, layer.nodes[3])
Example #2
0
    def test_unconnected_nodes(self):

        layer = Layer(1, 'hidden')
        conn = Connection(Node(), Node())

        layer.add_nodes(2, 'hidden')

        layer.nodes[0].add_input_connection(Connection(Node(), layer.nodes[0]))
        input_side = layer.unconnected_nodes()

        self.assertEqual(1, input_side[0])
        self.assertNotEqual(0, input_side[0])
Example #3
0
    def test_values(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(2, 'hidden')

        layer.nodes[0].set_value(.2)
        layer.nodes[1].set_value(.3)

        values = layer.values()

        self.assertEqual(True, isinstance(values, list))
        self.assertEqual(.2, values[0])
        self.assertEqual(.3, values[1])
    def test_unconnected_nodes(self):

        layer = Layer(1, 'hidden')
        conn = Connection(Node(), Node())

        layer.add_nodes(2, 'hidden')

        layer.nodes[0].add_input_connection(
            Connection(Node(), layer.nodes[0]))
        input_side = layer.unconnected_nodes()

        self.assertEqual(1, input_side[0])
        self.assertNotEqual(0, input_side[0])
    def test_set_activation_type(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')

        self.assertEqual('sigmoid', layer.nodes[0].get_activation_type())

        layer.set_activation_type('linear')

        self.assertEqual('linear', layer.nodes[0].get_activation_type())

        self.failUnlessRaises(
            ValueError,
            layer.set_activation_type, 'fail')
Example #6
0
    def _parse_inputfile_layer(self, config, layer_no):
        """
        This function loads a layer and nodes from the input file. Note that
        it does not load the connections for those nodes here, waiting
        until all the nodes are fully instantiated.  This is because
        the connection objects have nodes as part of the object.

        """

        layer_id = 'layer %s' % (layer_no)
        layer_nodes = config.get(layer_id, 'nodes').split(" ")
        layer_type = config.get(layer_id, 'layer_type')
        layer = Layer(layer_no, layer_type)

        for node_id in layer_nodes:
            node = self._parse_inputfile_node(config, node_id)
            layer.add_node(node)

        self.layers.append(layer)
Example #7
0
    def _parse_inputfile_layer(self, config, layer_no):
        """
        This function loads a layer and nodes from the input file. Note that
        it does not load the connections for those nodes here, waiting
        until all the nodes are fully instantiated.  This is because
        the connection objects have nodes as part of the object.

        """

        layer_id = 'layer %s' % (layer_no)
        layer_nodes = config.get(layer_id, 'nodes').split(" ")
        layer_type = config.get(layer_id, 'layer_type')
        layer = Layer(layer_no, layer_type)

        for node_id in layer_nodes:
            node = self._parse_inputfile_node(config, node_id)
            layer.add_node(node)

        self.layers.append(layer)
Example #8
0
    def test_add_nodes(self):

        layer = Layer(0, 'input')

        layer.add_nodes(1, 'input')
        layer.add_nodes(1, 'copy')

        self.assertEqual(2, len(layer.nodes))
        self.assertEqual('copy', layer.nodes[1].node_type)
        self.assertNotEqual('copy', layer.nodes[0].node_type)
Example #9
0
    def test_activations(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(2, 'hidden')
        layer.set_activation_type('linear')

        layer.nodes[0].set_value(.2)
        layer.nodes[1].set_value(.3)

        activations = layer.activations()

        self.assertEqual(True, isinstance(activations, list))
        self.assertEqual(.2, activations[0])
        self.assertEqual(.3, activations[1])
Example #10
0
    def test_set_activation_type(self):

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')

        self.assertEqual('sigmoid', layer.nodes[0].get_activation_type())

        layer.set_activation_type('linear')

        self.assertEqual('linear', layer.nodes[0].get_activation_type())

        self.failUnlessRaises(ValueError, layer.set_activation_type, 'fail')
Example #11
0
    def test__init__(self):

        self.assertEqual('input', Layer(0, 'input').layer_type)
        self.assertEqual('hidden', Layer(1, 'hidden').layer_type)
        self.assertEqual('output', Layer(2, 'output').layer_type)

        layer = Layer(0, 'input')
        self.assertEqual('linear', layer.default_activation_type)
        layer = Layer(0, 'hidden')
        self.assertEqual('sigmoid', layer.default_activation_type)
        layer = Layer(0, 'output')
        self.assertEqual('linear', layer.default_activation_type)

        self.failUnlessRaises(ValueError, Layer, 0, 'test')
        self.failUnlessRaises(ValueError, Layer, 1, 'input')

        layer = Layer(0, 'input')
    def setUp(self):

        self.net = NeuralNet()

        layer = Layer(0, 'input')
        layer.add_nodes(1, 'input')
        self.net.layers.append(layer)

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')
        self.net.layers.append(layer)

        layer = Layer(2, 'output')
        layer.add_nodes(1, 'output')
        self.net.layers.append(layer)

        #   Specify connections
        self.net.layers[1].nodes[0].add_input_connection(
            Connection(
                self.net.layers[0].nodes[0],
                self.net.layers[1].nodes[0],
                1.00))

        self.net.layers[2].nodes[0].add_input_connection(
            Connection(
                self.net.layers[1].nodes[0],
                self.net.layers[2].nodes[0],
                .75))

        self.net._epochs = 1
        self.net.copy_levels = 0
        self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
        self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]

        self.net.input_layer = self.net.layers[0]
        self.net.output_layer = self.net.layers[-1]
Example #13
0
    def setUp(self):

        self.net = NeuralNet()

        layer = Layer(0, 'input')
        layer.add_nodes(1, 'input')
        self.net.layers.append(layer)

        layer = Layer(1, 'hidden')
        layer.add_nodes(1, 'hidden')
        self.net.layers.append(layer)

        layer = Layer(2, 'output')
        layer.add_nodes(1, 'output')
        self.net.layers.append(layer)

        #   Specify connections
        self.net.layers[1].nodes[0].add_input_connection(
            Connection(self.net.layers[0].nodes[0],
                       self.net.layers[1].nodes[0], 1.00))

        self.net.layers[2].nodes[0].add_input_connection(
            Connection(self.net.layers[1].nodes[0],
                       self.net.layers[2].nodes[0], .75))

        self.net._epochs = 1
        self.net.copy_levels = 0
        self.net._allinputs = [[.1], [.2], [.3], [.4], [.5]]
        self.net._alltargets = [[.2], [.4], [.6], [.8], [1.0]]

        self.net.input_layer = self.net.layers[0]
        self.net.output_layer = self.net.layers[-1]
Example #14
0
    def init_layers(self, input_nodes, total_hidden_nodes_list,
            output_nodes, *recurrent_mods):
        """
        This function initializes the layers.
        The variables:

        * input_nodes: the number of nodes in the input layer
        * total_hidden_nodes_list:  a list of numbers of nodes in the
            hidden layer.  For example, [5, 3]
        * output_nodes: the number of nodes in the output layer

        The initial network is created, and then a series of modifications can
        be made to enable recurrent features.  recurrent_mods are
        configurations for modifications to the neural network that is created
        within init_layers.

        For example, if
            init_layers(input_nodes, total_hidden_nodes_list, output_nodes,
                            ElmanSimpleRecurrent())
        was used, then the initial network structure of input, hidden, and
        output nodes would be created.  After that, the additional copy or
        context nodes that would automatically transfer values from the lowest
        hidden layer would be added to the input layer.

        More than one recurrent scheme can be applied, each one adding to the
        existing network.

        """

        self.layers = []

        #   Input layer
        layer = Layer(len(self.layers), NODE_INPUT)
        layer.add_nodes(input_nodes, NODE_INPUT)

        layer.add_node(BiasNode())

        self.layers.append(layer)
        self.input_layer = layer

        for hid in total_hidden_nodes_list:
            layer = Layer(len(self.layers), NODE_HIDDEN)
            layer.add_nodes(hid, NODE_HIDDEN)

            layer.add_node(BiasNode())

            self.layers.append(layer)

        layer = Layer(len(self.layers), NODE_OUTPUT)
        layer.add_nodes(output_nodes, NODE_OUTPUT)

        self.layers.append(layer)
        self.output_layer = layer

        self._init_connections()

        for recurrent_mod in recurrent_mods:
            recurrent_mod.apply_config(self)
Example #15
0
    def init_layers(self, input_nodes, total_hidden_nodes_list, output_nodes,
                    *recurrent_mods):
        """
        This function initializes the layers.
        The variables:

        * input_nodes: the number of nodes in the input layer
        * total_hidden_nodes_list:  a list of numbers of nodes in the
            hidden layer.  For example, [5, 3]
        * output_nodes: the number of nodes in the output layer

        The initial network is created, and then a series of modifications can
        be made to enable recurrent features.  recurrent_mods are
        configurations for modifications to the neural network that is created
        within init_layers.

        For example, if
            init_layers(input_nodes, total_hidden_nodes_list, output_nodes,
                            ElmanSimpleRecurrent())
        was used, then the initial network structure of input, hidden, and
        output nodes would be created.  After that, the additional copy or
        context nodes that would automatically transfer values from the lowest
        hidden layer would be added to the input layer.

        More than one recurrent scheme can be applied, each one adding to the
        existing network.

        """

        self.layers = []

        #   Input layer
        layer = Layer(len(self.layers), NODE_INPUT)
        layer.add_nodes(input_nodes, NODE_INPUT)

        layer.add_node(BiasNode())

        self.layers.append(layer)
        self.input_layer = layer

        for hid in total_hidden_nodes_list:
            layer = Layer(len(self.layers), NODE_HIDDEN)
            layer.add_nodes(hid, NODE_HIDDEN)

            layer.add_node(BiasNode())

            self.layers.append(layer)

        layer = Layer(len(self.layers), NODE_OUTPUT)
        layer.add_nodes(output_nodes, NODE_OUTPUT)

        self.layers.append(layer)
        self.output_layer = layer

        self._init_connections()

        for recurrent_mod in recurrent_mods:
            recurrent_mod.apply_config(self)
Example #16
0
    def test_add_node(self):

        layer = Layer(0, 'input')
        layer.default_activation_type = 'linear'
        node = Node()
        layer.add_node(node)

        self.assertEqual(1, layer.total_nodes())
        self.assertEqual(0, layer.nodes[0].node_no)
        self.assertEqual('linear', layer.nodes[0].get_activation_type())

        layer.default_activation_type = 'sigmoid'
        node = Node()
        layer.add_node(node)

        self.assertEqual(2, layer.total_nodes())
        self.assertEqual(1, layer.nodes[1].node_no)
        self.assertEqual('sigmoid', layer.nodes[1].get_activation_type())

        node = BiasNode()
        layer.add_node(node)

        self.assertEqual(3, layer.total_nodes())
        self.assertEqual(2, layer.nodes[2].node_no)

        node = Node()
        node.set_activation_type('tanh')
        layer.add_node(node)

        self.assertEqual('tanh', layer.nodes[3].get_activation_type())
Example #17
0
    def test_feed_forward(self) :

        layer0 = Layer(0, 'input')
        layer0.add_nodes(2, 'input')
        layer0.set_activation_type('sigmoid')

        layer1 = Layer(1, 'hidden')
        layer1.add_nodes(1, 'hidden')

        inode1 = layer0.nodes[0]
        inode2 = layer0.nodes[1]

        inode1.set_value(.25)
        inode2.set_value(.5)

        node = layer1.nodes[0]
        node.add_input_connection(
            Connection(inode1, node, .25))
        node.add_input_connection(
            Connection(inode2, node, .5))

        layer1.feed_forward()

        self.assertAlmostEqual(
            sigmoid(.25) * .25 + sigmoid(.5) * .5, node.get_value())
Example #18
0
    def test_total_nodes(self):

        layer = Layer(0, 'input')
        layer.add_nodes(2, 'input')
        layer.add_nodes(2, 'copy')
        layer.add_node(BiasNode())

        self.assertEqual(5, layer.total_nodes())
        self.assertEqual(2, layer.total_nodes('input'))
        self.assertEqual(2, layer.total_nodes('copy'))
        self.assertEqual(0, layer.total_nodes('hidden'))