示例#1
0
 def test_size_changes(self):
     x, y, z = T.matrices('xyz')
     e = T.dot(x, y)
     op = OpFromGraph([x, y], [e], mode='FAST_RUN')
     f = op(x, op(y, z))
     fn = function([x, y, z], f)
     xv = numpy.ones((2, 3), dtype=config.floatX)
     yv = numpy.ones((3, 4), dtype=config.floatX) * 3
     zv = numpy.ones((4, 5), dtype=config.floatX) * 5
     res = fn(xv, yv, zv)
     assert res.shape == (2, 5)
     assert numpy.all(180.0 == res)
     res = fn(xv, yv, zv)
     assert res.shape == (2, 5)
     assert numpy.all(180.0 == res)
示例#2
0
 def test_straightforward(self):
     x, y, z = T.matrices('xyz')
     e = x + y * z
     op = OpFromGraph([x, y, z], [e], mode='FAST_RUN')
     f = op(x, y, z) - op(y, z,
                          x)  # (1+3*5=array of 16) - (3+1*5=array of 8)
     fn = function([x, y, z], f)
     xv = numpy.ones((2, 2), dtype=config.floatX)
     yv = numpy.ones((2, 2), dtype=config.floatX) * 3
     zv = numpy.ones((2, 2), dtype=config.floatX) * 5
     #print function, function.__module__
     #print fn.maker.fgraph.toposort()
     fn(xv, yv, zv)
     assert numpy.all(8.0 == fn(xv, yv, zv))
     assert numpy.all(8.0 == fn(xv, yv, zv))
    def test_infer_shape(self):
        # test infer shape does not need to against inline case
        # since the Op is remove during optimization phase
        x = T.matrix('x')
        y = T.matrix('y')
        o1 = x + y
        o2 = x * y
        op_graph = OpFromGraph([x, y], [o1, o2])

        q = T.matrix('q')
        p = T.matrix('p')
        self._compile_and_check([q, p], op_graph(q, p), [
            np.ones([3, 4], dtype=config.floatX),
            np.ones([3, 4], dtype=config.floatX)
        ], OpFromGraph)
示例#4
0
    def test_connection_pattern(self):
        # Basic case
        x, y, z = T.matrices('xyz')
        out1 = x * y
        out2 = y * z

        op1 = OpFromGraph([x, y, z], [out1, out2])
        results = op1.connection_pattern(None)
        expect_result = [[True, False],
                         [True, True],
                         [False, True]]
        assert results == expect_result

        # Graph with ops that don't have a 'full' connection pattern
        # and with ops that have multiple outputs
        m, n, p, q = T.matrices('mnpq')
        o1, o2 = op1(m, n, p)
        out1, out2 = op1(o1, q, o2)
        op2 = OpFromGraph([m, n, p, q], [out1, out2])

        results = op2.connection_pattern(None)
        expect_result = [[True, False],
                         [True, True],
                         [False, True],
                         [True, True]]
        assert results == expect_result

        # Inner graph where some computation doesn't rely on explicit inputs
        srng = RandomStreams(seed=234)
        rv_u = srng.uniform((2, 2))
        x, y = T.matrices('xy')
        out1 = x + rv_u
        out2 = y + 3
        out3 = 3 + rv_u
        op3 = OpFromGraph([x, y], [out1, out2, out3])

        results = op3.connection_pattern(None)
        expect_result = [[True, False, False],
                         [False, True, False],
                         [True, False, True]]
        assert results == expect_result
示例#5
0
    def test_shared(self):
        x, y, z = T.matrices('xyz')
        s = shared(numpy.random.rand(2, 2).astype(config.floatX))
        e = x + y * z + s
        op = OpFromGraph([x, y, z], [e])
        # (1+3*5=array of 16) - (3+1*5=array of 8)
        f = op(x, y, z) - op(y, z, x)

        fn = function([x, y, z], f)
        xv = numpy.ones((2, 2), dtype=config.floatX)
        yv = numpy.ones((2, 2), dtype=config.floatX) * 3
        zv = numpy.ones((2, 2), dtype=config.floatX) * 5
        # print function, function.__module__
        # print fn.maker.fgraph.toposort()
        assert numpy.allclose(8.0, fn(xv, yv, zv))
        assert numpy.allclose(8.0, fn(xv, yv, zv))
示例#6
0
    def local_transform(fgraph, node):
        if node in nodes_seen:
            return False

        # importing Scan into module scope would be circular
        from theano.compile.builders import OpFromGraph
        from theano.scan.op import Scan

        if isinstance(node.op, (Scan, OpFromGraph)):
            # recurse on the inner graph
            (
                new_inner_inputs,
                new_outer_inputs,
                new_inner_outputs,
            ) = _map_variables_inner(
                wrapped_replacer,
                inner_inputs=node.op.inputs,
                outer_inputs=node.inputs,
                inner_outputs=node.op.outputs,
                containing_op=node.op,
            )
            # reinstantiate the op
            if isinstance(node.op, Scan):
                new_op = Scan(
                    new_inner_inputs,
                    new_inner_outputs,
                    node.op.info,
                    # FIXME: infer this someday?
                    typeConstructor=None,
                )
            elif isinstance(node.op, OpFromGraph):
                new_op = OpFromGraph(
                    new_inner_inputs, new_inner_outputs, **node.op.kwargs
                )
            # make a new node to replace the old one
            new_node = new_op.make_node(*new_outer_inputs)
            nodes_seen.add(new_node)
            return new_node.outputs
        else:
            nodes_seen.add(node)
            replacements = [wrapped_replacer(o) for o in node.outputs]

            # Add inputs to replacement graphs as inputs to this `fgraph`
            for i in gof.graph.inputs(replacements):
                fgraph.add_input(i)

            return replacements
示例#7
0
    def test_shared_grad(self):
        x, y, z = T.matrices('xyz')
        s = shared(numpy.random.rand(2, 2).astype(config.floatX))
        e = x + y * z + s
        op = OpFromGraph([x, y, z], [e])
        f = op(x, y, z)
        f = f - T.grad(T.sum(f), y)
        fn = function([x, y, z], f)
        xv = numpy.ones((2, 2), dtype=config.floatX)
        yv = numpy.ones((2, 2), dtype=config.floatX) * 3
        zv = numpy.ones((2, 2), dtype=config.floatX) * 5
        assert numpy.allclose(11.0 + s.get_value(), fn(xv, yv, zv))

        # grad again the shared variable
        f = op(x, y, z)
        f = f - T.grad(T.sum(f), s)
        fn = function([x, y, z], f)
        assert numpy.allclose(15.0 + s.get_value(), fn(xv, yv, zv))
示例#8
0
    def test_connection_pattern(self):
        # Basic case
        x, y, z = T.matrices('xyz')
        out1 = x * y
        out2 = y * z

        op1 = OpFromGraph([x, y, z], [out1, out2])
        results = op1.connection_pattern(None)
        expect_result = [[True, False],
                         [True, True],
                         [False, True]]
        assert results == expect_result

        # Graph with ops that don't have a 'full' connection pattern
        # and with ops that have multiple outputs
        m, n, p, q = T.matrices('mnpq')
        o1, o2 = op1(m, n, p)
        out1, out2 = op1(o1, q, o2)
        op2 = OpFromGraph([m, n, p, q], [out1, out2])

        results = op2.connection_pattern(None)
        expect_result = [[True, False],
                         [True, True],
                         [False, True],
                         [True, True]]
        assert results == expect_result

        # Inner graph where some computation doesn't rely on explicit inputs
        srng = RandomStreams(seed=234)
        rv_u = srng.uniform((2, 2))
        x, y = T.matrices('xy')
        out1 = x + rv_u
        out2 = y + 3
        out3 = 3 + rv_u
        op3 = OpFromGraph([x, y], [out1, out2, out3])

        results = op3.connection_pattern(None)
        expect_result = [[True, False, False],
                         [False, True, False],
                         [True, False, True]]
        assert results == expect_result
示例#9
0
def MvNormalLogp():
    """Compute the log pdf of a multivariate normal distribution.

    This should be used in MvNormal.logp once Theano#5908 is released.

    Parameters
    ----------
    cov: tt.matrix
        The covariance matrix.
    delta: tt.matrix
        Array of deviations from the mean.
    """
    cov = tt.matrix("cov")
    cov.tag.test_value = floatX(np.eye(3))
    delta = tt.matrix("delta")
    delta.tag.test_value = floatX(np.zeros((2, 3)))

    solve_lower = tt.slinalg.Solve(A_structure="lower_triangular")
    solve_upper = tt.slinalg.Solve(A_structure="upper_triangular")
    cholesky = Cholesky(lower=True, on_error="nan")

    n, k = delta.shape
    n, k = f(n), f(k)
    chol_cov = cholesky(cov)
    diag = tt.nlinalg.diag(chol_cov)
    ok = tt.all(diag > 0)

    chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
    delta_trans = solve_lower(chol_cov, delta.T).T

    result = n * k * tt.log(f(2) * np.pi)
    result += f(2) * n * tt.sum(tt.log(diag))
    result += (delta_trans ** f(2)).sum()
    result = f(-0.5) * result
    logp = tt.switch(ok, result, -np.inf)

    def dlogp(inputs, gradients):
        (g_logp,) = gradients
        cov, delta = inputs

        g_logp.tag.test_value = floatX(1.0)
        n, k = delta.shape

        chol_cov = cholesky(cov)
        diag = tt.nlinalg.diag(chol_cov)
        ok = tt.all(diag > 0)

        chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
        delta_trans = solve_lower(chol_cov, delta.T).T

        inner = n * tt.eye(k) - tt.dot(delta_trans.T, delta_trans)
        g_cov = solve_upper(chol_cov.T, inner)
        g_cov = solve_upper(chol_cov.T, g_cov.T)

        tau_delta = solve_upper(chol_cov.T, delta_trans.T)
        g_delta = tau_delta.T

        g_cov = tt.switch(ok, g_cov, -np.nan)
        g_delta = tt.switch(ok, g_delta, -np.nan)

        return [-0.5 * g_cov * g_logp, -g_delta * g_logp]

    return OpFromGraph([cov, delta], [logp], grad_overrides=dlogp, inline=True)