示例#1
0
 def test_magma_opt_float16(self):
     ops_to_gpu = [
         (MatrixInverse(), GpuMagmaMatrixInverse),
         (SVD(), GpuMagmaSVD),
         (QRFull(mode="reduced"), GpuMagmaQR),
         (QRIncomplete(mode="r"), GpuMagmaQR),
         # TODO: add support for float16 to Eigh numpy
         # (Eigh(), GpuMagmaEigh),
         (Cholesky(), GpuMagmaCholesky),
     ]
     for op, gpu_op in ops_to_gpu:
         A = theano.tensor.matrix("A", dtype="float16")
         fn = theano.function([A], op(A), mode=mode_with_gpu.excluding("cusolver"))
         assert any(
             [isinstance(node.op, gpu_op) for node in fn.maker.fgraph.toposort()]
         )
示例#2
0
from theano.tensor.nlinalg import QRFull

from unification import var

from kanren import eq
from kanren.core import lall
from kanren.graph import applyo
from kanren.constraints import neq

from etuples import etuple, etuplize

from ...theano.meta import mt


mt.nlinalg.qr_full = mt(QRFull("reduced"))
owner_inputs = attrgetter("owner.inputs")
normal_get_size = toolz.compose(itemgetter(2), owner_inputs)
normal_get_rng = toolz.compose(itemgetter(3), owner_inputs)


def update_name_suffix(x, old_x, suffix):  # pragma: no cover
    new_name = old_x.name + suffix
    x.name = new_name
    return x


def normal_normal_regression(Y, X, beta, Y_args_tail=None, beta_args=None):
    """Create a goal for a normal-normal regression of the form `Y ~ N(X * beta, sd**2)`."""
    Y_args_tail = Y_args_tail or var()
    beta_args = beta_args or var()