Esempio n. 1
0
def test_Mode_basic():
    db = SequenceDB()
    mode = Mode(linker="py", optimizer=OptimizationQuery(include=None), db=db)

    assert mode.optdb is db

    assert str(mode).startswith("Mode(linker=py, optimizer=OptimizationQuery")
Esempio n. 2
0
def test_jax_BatchedDot():
    # tensor3 . tensor3
    a = tensor3("a")
    a.tag.test_value = (np.linspace(-1, 1,
                                    10 * 5 * 3).astype(config.floatX).reshape(
                                        (10, 5, 3)))
    b = tensor3("b")
    b.tag.test_value = (np.linspace(1, -1,
                                    10 * 3 * 2).astype(config.floatX).reshape(
                                        (10, 3, 2)))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

    # A dimension mismatch should raise a TypeError for compatibility
    inputs = [get_test_value(a)[:-1], get_test_value(b)]
    opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
    jax_mode = Mode(JAXLinker(), opts)
    aesara_jax_fn = function(fgraph.inputs, fgraph.outputs, mode=jax_mode)
    with pytest.raises(TypeError):
        aesara_jax_fn(*inputs)

    # matrix . matrix
    a = matrix("a")
    a.tag.test_value = np.linspace(-1, 1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    b = matrix("b")
    b.tag.test_value = np.linspace(1, -1, 5 * 3).astype(config.floatX).reshape(
        (5, 3))
    out = aet_blas.BatchedDot()(a, b)
    fgraph = FunctionGraph([a, b], [out])
    compare_jax_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Esempio n. 3
0
    def test_inplace(self):
        """Make sure that in-place optimizations are *not* performed on the output of a ``BroadcastTo``."""
        a = aet.zeros((5, ))
        d = aet.vector("d")
        c = aet.set_subtensor(a[np.r_[0, 1, 3]], d)
        b = broadcast_to(c, (5, ))
        q = b[np.r_[0, 1, 3]]
        e = aet.set_subtensor(q, np.r_[0, 0, 0])

        opts = OptimizationQuery(include=["inplace"])
        py_mode = Mode("py", opts)
        e_fn = function([d], e, mode=py_mode)

        advincsub_node = e_fn.maker.fgraph.outputs[0].owner
        assert isinstance(advincsub_node.op, AdvancedIncSubtensor)
        assert isinstance(advincsub_node.inputs[0].owner.op, BroadcastTo)

        assert advincsub_node.op.inplace is False
Esempio n. 4
0
def optimize_graph(
    fgraph: Union[Variable, FunctionGraph],
    include: Sequence[str] = ["canonicalize"],
    custom_opt=None,
    clone: bool = False,
    **kwargs
) -> Union[Variable, FunctionGraph]:
    """Easily optimize a graph.

    Parameters
    ==========
    fgraph:
        A ``FunctionGraph`` or ``Variable`` to be optimized.
    include:
        String names of the optimizations to be applied.  The default
        optimization is ``"canonicalization"``.
    custom_opt:
        A custom ``Optimization`` to also be applied.
    clone:
        Whether or not to clone the input graph before optimizing.
    **kwargs:
        Keyword arguments passed to the ``aesara.graph.optdb.OptimizationQuery`` object.
    """
    from aesara.compile import optdb

    return_only_out = False
    if not isinstance(fgraph, FunctionGraph):
        fgraph = FunctionGraph(outputs=[fgraph], clone=clone)
        return_only_out = True

    canonicalize_opt = optdb.query(OptimizationQuery(include=include, **kwargs))
    _ = canonicalize_opt.optimize(fgraph)

    if custom_opt:
        custom_opt.optimize(fgraph)

    if return_only_out:
        return fgraph.outputs[0]
    else:
        return fgraph
Esempio n. 5
0
def test_inplace_optimization():

    out = normal(0, 1)

    assert out.owner.op.inplace is False

    inplace_mode = Mode(
        "py", OptimizationQuery(include=["random_make_inplace"], exclude=[]))

    f = function(
        [],
        out,
        mode=inplace_mode,
    )

    (new_out, ) = f.maker.fgraph.outputs
    assert new_out.type == out.type
    assert isinstance(new_out.owner.op, type(out.owner.op))
    assert new_out.owner.op.inplace is True
    assert all(
        np.array_equal(a.data, b.data)
        for a, b in zip(new_out.owner.inputs[1:], out.owner.inputs[1:]))
Esempio n. 6
0
def set_aesara_flags():
    opts = OptimizationQuery(include=[None], exclude=[])
    py_mode = Mode("py", opts)
    with config.change_flags(mode=py_mode, compute_test_value="warn"):
        yield
Esempio n. 7
0
    dirichlet,
    multivariate_normal,
    normal,
    poisson,
    uniform,
)
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.opt import (
    local_dimshuffle_rv_lift,
    local_rv_size_lift,
    local_subtensor_rv_lift,
)
from aesara.tensor.subtensor import AdvancedSubtensor, AdvancedSubtensor1, Subtensor
from aesara.tensor.type import iscalar, vector

no_mode = Mode("py", OptimizationQuery(include=[], exclude=[]))


def apply_local_opt_to_rv(opt, op_fn, dist_op, dist_params, size, rng):
    dist_params_aet = []
    for p in dist_params:
        p_aet = aet.as_tensor(p).type()
        p_aet.tag.test_value = p
        dist_params_aet.append(p_aet)

    size_aet = []
    for s in size:
        s_aet = iscalar()
        s_aet.tag.test_value = s
        size_aet.append(s_aet)
Esempio n. 8
0
    pareto,
    permutation,
    poisson,
    randint,
    standard_normal,
    triangular,
    truncexpon,
    uniform,
    vonmises,
    wald,
    weibull,
)
from aesara.tensor.type import iscalar, scalar, tensor
from tests.unittest_tools import create_aesara_param

opts = OptimizationQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
py_mode = Mode("py", opts)


def fixed_scipy_rvs(rvs_name):
    def _rvs(*args, size=None, **kwargs):
        res = getattr(stats, rvs_name).rvs(*args, size=size, **kwargs)
        res = np.broadcast_to(
            res,
            size if size is not None else broadcast_shapes(
                *[np.shape(a) for a in args]),
        )
        return res

    return _rvs
Esempio n. 9
0

def register_linker(name, linker):
    """Add a `Linker` which can be referred to by `name` in `Mode`."""
    if name in predefined_linkers:
        raise ValueError(f"Linker name already taken: {name}")
    predefined_linkers[name] = linker


# If a string is passed as the optimizer argument in the constructor
# for Mode, it will be used as the key to retrieve the real optimizer
# in this dictionary
exclude = []
if not config.cxx:
    exclude = ["cxx_only"]
OPT_NONE = OptimizationQuery(include=[], exclude=exclude)
# Even if multiple merge optimizer call will be there, this shouldn't
# impact performance.
OPT_MERGE = OptimizationQuery(include=["merge"], exclude=exclude)
OPT_FAST_RUN = OptimizationQuery(include=["fast_run"], exclude=exclude)
OPT_FAST_RUN_STABLE = OPT_FAST_RUN.requiring("stable")
# We need fast_compile_gpu here.  As on the GPU, we don't have all
# operation that exist in fast_compile, but have some that get
# introduced in fast_run, we want those optimization to also run in
# fast_compile+gpu. We can't tag them just as 'gpu', as this would
# exclude them if we exclude 'gpu'.
OPT_FAST_COMPILE = OptimizationQuery(
    include=["fast_compile", "fast_compile_gpu"], exclude=exclude)
OPT_STABILIZE = OptimizationQuery(include=["fast_run"], exclude=exclude)
OPT_STABILIZE.position_cutoff = 1.5000001
OPT_NONE.name = "OPT_NONE"
Esempio n. 10
0
    multivariate_normal,
    normal,
    poisson,
    uniform,
)
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.opt import (
    lift_rv_shapes,
    local_dimshuffle_rv_lift,
    local_subtensor_rv_lift,
)
from aesara.tensor.subtensor import AdvancedSubtensor, AdvancedSubtensor1, Subtensor
from aesara.tensor.type import iscalar, vector

inplace_mode = Mode(
    "py", OptimizationQuery(include=["random_make_inplace"], exclude=[]))
no_mode = Mode("py", OptimizationQuery(include=[], exclude=[]))


def test_inplace_optimization():

    out = normal(0, 1)

    assert out.owner.op.inplace is False

    f = function(
        [],
        out,
        mode=inplace_mode,
    )