示例#1
0
    def test_forwardFork(self):
        g = gt.Graph()
        g.add_vertex(3)
        g.add_edge(g.vertex(0), g.vertex(1))
        g.add_edge(g.vertex(0), g.vertex(2))
        n = Net(1, 2, g)

        # Init activation functions
        for v in n.g.vertices():
            n.activation[v] = Identity()

        # All weights and bias == 0
        assert_allclose(n.forward([1.0]), [0.0, 0.0])

        # Init weights
        for e in n.g.edges():
            n.weightProp[e] = 1.0
        assert_allclose(n.forward([0.0]), [0.0, 0.0])
        assert_allclose(n.forward([1.0]), [1.0, 1.0])
        assert_allclose(n.forward([12.0]), [12.0, 12.0])
        assert_allclose(n.forward([-12.0]), [-12.0, -12.0])

        # Init bias
        for v in n.g.vertices():
            n.biasProp[v] = 1.0
        assert_allclose(n.forward([0.0]), [1.0, 1.0])
        assert_allclose(n.forward([1.0]), [2.0, 2.0])
示例#2
0
def mlp(sizes, weightGenerator=np.random.random, biasGenerator=np.random.random, activationFunction=LogSigmoid(-1, 1)):
    r"""Generate Multilayer Perceptron (MLP) and return as a Net

    :param sizes: Size of each layer
    :type sizes: list of integers
    :param functor weightGenerator: Functor for generating weights
    :param functor biasGenerator: Functor for generating bias values
    :param activationFunction: Activation function - the same for all neurons
    :type activationFunction: Activation function
    :returns: Net object with the requested architecture
    :rtype: :class:`~gtnn.network.Net`

    """
    n = Net(sizes[0], sizes[-1])
    layerId = n.addVertexProperty("layerId", "short")
    lastLayer = []
    presentLayer = []

    # Create all layers
    for layerIdx, size in enumerate(sizes):
        layerProp = n.addLayer()  # TODO: test layer
        for i in range(size):
            v = n.g.add_vertex()
            layerProp[v] = True
            layerId[v] = layerIdx
            n.activation[v] = activationFunction
            n.biasProp[v] = biasGenerator()

            presentLayer.append(v)
            for l in lastLayer:
                e = n.g.add_edge(l, v)
                n.weightProp[e] = weightGenerator()

        lastLayer = list(presentLayer)
        presentLayer = list()

    # Create a subgraph
    n.prepare()
    return n
    def test_backwardFork(self):
        g = gt.Graph()
        g.add_vertex(3)
        g.add_edge(g.vertex(0), g.vertex(1))
        g.add_edge(g.vertex(0), g.vertex(2))
        n = Net(1, 2, g)

        # Init activation functions
        for v in n.g.vertices():
            n.activation[v] = Identity()

        # All weights  == 0
        assert_allclose(n.errorProp.a, [0, 0, 0])

        n.backward([1, 1])
        assert_allclose(n.errorProp.a, [0, 1, 1])

        n.backward([-100, -100])
        assert_allclose(n.errorProp.a, [0, -100, -100])

        # Init weights
        for e in n.g.edges():
            n.weightProp[e] = 1.0

        # Tests
        n.backward([1, 1])
        assert_allclose(n.errorProp.a, [2, 1, 1])

        n.backward([30, 30])
        assert_allclose(n.errorProp.a, [60, 30, 30])

        n.backward([0, 0])
        assert_allclose(n.errorProp.a, [0, 0, 0])

        n.backward([-10, -8])
        assert_allclose(n.errorProp.a, [-18, -8, -10])

        n.backward([3, 57])
        assert_allclose(n.errorProp.a, [60, 57, 3])
 def test_construc(self):
     g = gt.Graph()
     g.add_vertex(4)
     g.add_edge(g.vertex(0), g.vertex(1))
     g.add_edge(g.vertex(0), g.vertex(2))
     g.add_edge(g.vertex(1), g.vertex(3))
     g.add_edge(g.vertex(2), g.vertex(3))
     g.add_vertex(1)
     g.add_edge(g.vertex(4), g.vertex(0))
     n = Net(1, 1, g)
     for e in g.edges():
         n.weightProp[e] = 1
     for v in g.vertices():
         n.activation[v] = LogSigmoid(-1, 1)
     n.prepare()
     n.backward([1])
     n.forward([1])
     n.backward([2])