예제 #1
0
    def test_grad(self):
        a = graph.Constant(1)
        b = graph.Constant(2)
        c = graph.Constant(4)

        d = graph.Sum(a, b)
        e = graph.Sum(b, c)
        f = graph.MultiplyByScalar(d, e)

        g = graph.Grad(f, [a, b, c])

        self.assertSequenceEqual(graph.run(g), [6, 9, 3])
예제 #2
0
파일: conv.py 프로젝트: lychanl/WhatTheFood
    def __init__(self,
                 x,
                 filters,
                 filter_size,
                 step,
                 bias=True,
                 activation=None,
                 padding='VALID',
                 *activation_args,
                 **activation_kwargs):
        if isinstance(x, Layer):
            x = x.output

        self.kernel = graph.Variable(
            (filter_size, filter_size, x.shape[2], filters))

        y = graph.Convolution(x, self.kernel, step, padding)

        self.bias = graph.Variable((filters, )) if bias else None

        if bias:
            y = graph.Sum(y, self.bias)

        if activation:
            y = activation(y, *activation_args, **activation_kwargs)

        super(Convolution, self).__init__((x, ), y, [self.kernel, self.bias])
예제 #3
0
    def test_batched(self):
        a = graph.Placeholder(shape=(2,), batched=True)
        b = graph.Constant([1, 2])
        c = graph.Sum(a, b)

        np.testing.assert_array_equal(graph.run(c, {a: np.array([[1, 2]])}), np.array([[2, 4]]))
        np.testing.assert_array_equal(graph.run(c, {a: np.array([[1, 2], [3, 4]])}), np.array([[2, 4], [4, 6]]))
예제 #4
0
    def test_placeholder(self):
        a = graph.Placeholder(shape=(), batched=False)
        b = graph.Constant(2)
        c = graph.Sum(a, b)

        self.assertEqual(graph.run(c, {a: 3}), 5)
        self.assertEqual(graph.run(c, {a: 4}), 6)
예제 #5
0
    def __init__(self,
                 x,
                 n,
                 bias=True,
                 activation=None,
                 *activation_args,
                 **activation_kwargs):
        if isinstance(x, Layer):
            x = x.output

        assert len(x.shape) == 1

        self.kernel = graph.Variable((x.shape[0], n))

        self.bias = graph.Variable((n, )) if bias else None

        y = graph.Matmul(x, self.kernel)

        if bias:
            y = graph.Sum(y, self.bias)

        if activation:
            y = activation(y, *activation_args, **activation_kwargs)

        super(Dense, self).__init__((x, ), y,
                                    (self.kernel, self.bias) if self.bias else
                                    (self.kernel, ))
예제 #6
0
    def test_grad_simple(self):
        a = graph.Constant(1)
        b = graph.Constant(2)
        c = graph.Constant(4)

        d = graph.Sum(a, b)
        e = graph.MultiplyByScalar(d, c)

        g = graph.Grad(e, [a, b, c])

        self.assertSequenceEqual(graph.run(g), [4, 4, 3])
예제 #7
0
    def test_grad_placeholder_variable(self):
        a = graph.Constant(1)
        b = graph.Placeholder(batched=False, shape=())
        c = graph.Variable(shape=())

        c.value = 4

        d = graph.Sum(a, b)
        e = graph.MultiplyByScalar(d, c)

        g = graph.Grad(e, [a, b, c])

        self.assertSequenceEqual(graph.run(g, {b: 2}), [4, 4, 3])
예제 #8
0
    def test_non_trivial(self):
        a = graph.Constant(2)
        b = graph.Constant(3)
        c = graph.Sum(a, b)

        self.assertEqual(graph.run(c), 5)