Exemplo n.º 1
0
def test_infix_dot_method():
    X = dmatrix("X")
    y = dvector("y")

    res = X @ y
    exp_res = X.dot(y)
    assert equal_computations([res], [exp_res])

    X_val = np.arange(2 * 3).reshape((2, 3))
    res = X_val @ y
    exp_res = dot(X_val, y)
    assert equal_computations([res], [exp_res])
Exemplo n.º 2
0
def marginal_mixture_default_transform(op, rv):
    def transform_warning():
        warnings.warn(
            f"No safe default transform found for Mixture distribution {rv}. This can "
            "happen when components have different supports or default transforms.\n"
            "If appropriate, you can specify a custom transform for more efficient sampling.",
            MixtureTransformWarning,
            stacklevel=2,
        )

    rng, weights, *components = rv.owner.inputs

    default_transforms = [
        _default_transform(component.owner.op, component)
        for component in components
    ]

    # If there are more than one type of default transforms, we do not apply any
    if len({type(transform) for transform in default_transforms}) != 1:
        transform_warning()
        return None

    default_transform = default_transforms[0]

    if default_transform is None:
        return None

    if not isinstance(default_transform, allowed_default_mixture_transforms):
        transform_warning()
        return None

    if isinstance(default_transform, IntervalTransform):
        # If there are more than one component, we need to check the IntervalTransform
        # of the components are actually equivalent (e.g., we don't have an
        # Interval(0, 1), and an Interval(0, 2)).
        if len(default_transforms) > 1:
            value = rv.type()
            backward_expressions = [
                transform.backward(value, *component.owner.inputs)
                for transform, component in zip(default_transforms, components)
            ]
            for expr1, expr2 in zip(backward_expressions[:-1],
                                    backward_expressions[1:]):
                if not equal_computations([expr1], [expr2]):
                    transform_warning()
                    return None

        # We need to create a new IntervalTransform that expects the Mixture inputs
        args_fn = default_transform.args_fn

        def mixture_args_fn(rng, weights, *components):
            # We checked that the interval transforms of each component are equivalent,
            # so we can just pass the inputs of the first component
            return args_fn(*components[0].owner.inputs)

        return IntervalTransform(args_fn=mixture_args_fn)

    else:
        return default_transform
Exemplo n.º 3
0
    def test_clone(self):
        x, y, z = matrices("xyz")

        ofg = OpFromGraph([x], [2 * x])

        ofg_clone = ofg.clone()

        assert ofg_clone.fgraph is not ofg.fgraph
        assert ofg_clone.fgraph.outputs != ofg.fgraph.outputs
        assert equal_computations(ofg_clone.fgraph.outputs, ofg.fgraph.outputs)
Exemplo n.º 4
0
    def test_outputs_consistency(self):
        """Make sure that `OpFromGraph.fn` doesn't change the value of `OpFromGraph.inner_outputs`."""

        x = scalar("x")
        op = OpFromGraph([x], [x**2 / x], mode="FAST_RUN")

        # Confirm that the inner-graph is as expected
        assert equal_computations(op.inner_outputs, [x**2 / x],
                                  op.inner_inputs, [x])

        # These outputs of the compiled `op.fgraph` should differ from the
        # original, uncompiled `op.fgraph` outputs
        fn = op.fn
        new_inputs = fn.maker.fgraph.inputs
        new_outputs = fn.maker.fgraph.outputs
        assert not equal_computations(new_outputs, [x**2 / x], new_inputs, [x])

        # The original `op.fgraph` outputs should stay the same, though
        assert equal_computations(op.inner_outputs, [x**2 / x],
                                  op.inner_inputs, [x])
Exemplo n.º 5
0
    def test_both_assert_merge_2_reverse(self):
        # Test case "test_both_assert_merge_2" but in reverse order
        x1 = matrix("x1")
        x2 = matrix("x2")
        x3 = matrix("x3")
        e = dot(x1, assert_op(x2, (x2 > x3).all())) + dot(
            assert_op(x1, (x1 > x3).all()), x2)
        g = FunctionGraph([x1, x2, x3], [e], clone=False)
        MergeOptimizer().optimize(g)

        assert g.outputs[0].owner.op == add
        add_inputs = g.outputs[0].owner.inputs
        assert isinstance(add_inputs[0].owner.op, Dot)
        # Confirm that the `Assert`s are correct
        assert_var_1, assert_var_2 = add_inputs[0].owner.inputs
        assert_ref_1 = assert_op(x2, (x2 > x3).all())
        assert equal_computations([assert_var_1], [assert_ref_1])
        assert_ref_2 = assert_op(x1, (x1 > x3).all())
        assert equal_computations([assert_var_2], [assert_ref_2])
        # Confirm the merge
        assert add_inputs[0] is add_inputs[1]
Exemplo n.º 6
0
    def test_one_assert_merge(self):
        # Merge two nodes, one has assert, the other not.
        x1 = matrix("x1")
        x2 = matrix("x2")
        e = dot(x1, x2) + dot(assert_op(x1, (x1 > x2).all()), x2)
        g = FunctionGraph([x1, x2], [e], clone=False)
        MergeOptimizer().optimize(g)

        assert g.outputs[0].owner.op == add
        add_inputs = g.outputs[0].owner.inputs
        assert isinstance(add_inputs[0].owner.op, Dot)
        # Confirm that the `Assert`s are correct
        assert_var = add_inputs[0].owner.inputs[0]
        assert_ref = assert_op(x1, (x1 > x2).all())
        assert equal_computations([assert_var], [assert_ref])
        # Confirm the merge
        assert add_inputs[0] is add_inputs[1]
Exemplo n.º 7
0
def forced_replace(out, x, y):
    """
    Check all internal values of the graph that compute the variable ``out``
    for occurrences of values identical with ``x``. If such occurrences are
    encountered then they are replaced with variable ``y``.

    Parameters
    ----------
    out : Aesara Variable
    x : Aesara Variable
    y : Aesara Variable

    Examples
    --------
    out := sigmoid(wu)*(1-sigmoid(wu))
    x := sigmoid(wu)
    forced_replace(out, x, y) := y*(1-y)

    Notes
    -----
    When it find a match, it don't continue on the corresponding inputs.
    """
    if out is None:
        return None

    # ``visited`` is a set of nodes that are already known and don't need to be
    # checked again, speeding up the traversal of multiply-connected graphs.
    visited = set()
    from collections import deque

    q = deque()
    q.append(out)
    to_replace = []
    while q:
        graph = q.popleft()
        if graph in visited:
            continue
        visited.add(graph)
        if equal_computations([graph], [x]):
            to_replace.append((graph, y))
        elif graph.owner:
            q.extendleft(graph.owner.inputs)

    if len(to_replace) == 0:
        return out
    return clone_replace(out, replace=to_replace)
Exemplo n.º 8
0
    def test_both_assert_merge_identical(self):
        """Merge two nodes, both have `Assert`s on the same node with the same conditions."""
        x1 = matrix("x1")
        x2 = matrix("x2")
        e = dot(assert_op(x1, (x1 > x2).all()), x2) + dot(
            assert_op(x1, (x1 > x2).all()), x2)
        g = FunctionGraph([x1, x2], [e], clone=False)
        MergeOptimizer().optimize(g)

        assert g.outputs[0].owner.op == add
        add_inputs = g.outputs[0].owner.inputs
        assert isinstance(add_inputs[0].owner.op, Dot)
        # Confirm that the `Assert`s are correct
        assert_var = add_inputs[0].owner.inputs[0]
        assert_ref = assert_op(x1, (x1 > x2).all())
        assert equal_computations([assert_var], [assert_ref])
        # Confirm the merge
        assert add_inputs[0] is add_inputs[1]
Exemplo n.º 9
0
def test_rvs_to_value_vars_nested():
    # Test that calling rvs_to_value_vars in models with nested transformations
    # does not change the original rvs in place. See issue #5172
    with pm.Model() as m:
        one = pm.LogNormal("one", mu=0)
        two = pm.LogNormal("two", mu=at.log(one))

        # We add potentials or deterministics that are not in topological order
        pm.Potential("two_pot", two)
        pm.Potential("one_pot", one)

        before = aesara.clone_replace(m.free_RVs)

        # This call would change the model free_RVs in place in #5172
        res, _ = rvs_to_value_vars(m.potentials, apply_transforms=True)

        after = aesara.clone_replace(m.free_RVs)

        assert equal_computations(before, after)
Exemplo n.º 10
0
def test_clone_get_equiv():
    x = vector("x")
    y = vector("y")
    z = vector("z")
    a = x * y
    a_node = a.owner
    b = a + 1.0

    memo = {a: z}
    _ = clone_get_equiv([x, y], [b], copy_inputs=False, copy_orphans=False, memo=memo)

    assert x in memo
    assert y in memo
    assert memo[a] is z
    # All the outputs of `a` already had replacements/clones in the map, so
    # there is no need to re-clone it (unless another replacement/clone
    # re-introduces `a.owner` somehow).
    assert a_node not in memo
    assert equal_computations([memo[b]], [z + 1.0])
Exemplo n.º 11
0
    def test_clone_inner_graph(self):
        r1, r2, r3 = MyVariable(1), MyVariable(2), MyVariable(3)
        o1 = MyOp(r1, r2)
        o1.name = "o1"

        # Inner graph
        igo_in_1 = MyVariable(4)
        igo_in_2 = MyVariable(5)
        igo_out_1 = MyOp(igo_in_1, igo_in_2)
        igo_out_1.name = "igo1"

        igo = MyInnerGraphOp([igo_in_1, igo_in_2], [igo_out_1])

        o2 = igo(r3, o1)
        o2.name = "o1"

        o2_node = o2.owner
        o2_node_clone = o2_node.clone(clone_inner_graph=True)

        assert o2_node_clone is not o2_node
        assert o2_node_clone.op.fgraph is not o2_node.op.fgraph
        assert equal_computations(
            o2_node_clone.op.fgraph.outputs, o2_node.op.fgraph.outputs
        )
Exemplo n.º 12
0
def test_unify_Variable():
    x_at = at.vector("x")
    y_at = at.vector("y")

    z_at = x_at + y_at

    # `Variable`, `Variable`
    s = unify(z_at, z_at)
    assert s == {}

    # These `Variable`s have no owners
    v1 = MyType()()
    v2 = MyType()()

    assert v1 != v2

    s = unify(v1, v2)
    assert s is False

    op_lv = var()
    z_pat_et = etuple(op_lv, x_at, y_at)

    # `Variable`, `ExpressionTuple`
    s = unify(z_at, z_pat_et, {})

    assert op_lv in s
    assert s[op_lv] == z_at.owner.op

    res = reify(z_pat_et, s)

    assert isinstance(res, ExpressionTuple)
    assert equal_computations([res.evaled_obj], [z_at])

    z_et = etuple(at.add, x_at, y_at)

    # `ExpressionTuple`, `ExpressionTuple`
    s = unify(z_et, z_pat_et, {})

    assert op_lv in s
    assert s[op_lv] == z_et[0]

    res = reify(z_pat_et, s)

    assert isinstance(res, ExpressionTuple)
    assert equal_computations([res.evaled_obj], [z_et.evaled_obj])

    # `ExpressionTuple`, `Variable`
    s = unify(z_et, x_at, {})
    assert s is False

    # This `Op` doesn't expand into an `ExpressionTuple`
    op1_np = CustomOpNoProps(1)

    q_at = op1_np(x_at, y_at)

    a_lv = var()
    b_lv = var()
    # `Variable`, `ExpressionTuple`
    s = unify(q_at, etuple(op1_np, a_lv, b_lv))

    assert s[a_lv] == x_at
    assert s[b_lv] == y_at
Exemplo n.º 13
0
def is_same_graph(var1, var2, givens=None):
    """
    Return True iff Variables `var1` and `var2` perform the same computation.

    By 'performing the same computation', we mean that they must share the same
    graph, so that for instance this function will return False when comparing
    (x * (y * z)) with ((x * y) * z).

    The current implementation is not efficient since, when possible, it
    verifies equality by calling two different functions that are expected to
    return the same output. The goal is to verify this assumption, to
    eventually get rid of one of them in the future.

    Parameters
    ----------
    var1
        The first Variable to compare.
    var2
        The second Variable to compare.
    givens
        Similar to the `givens` argument of `aesara.function`, it can be used
        to perform substitutions in the computational graph of `var1` and
        `var2`. This argument is associated to neither `var1` nor `var2`:
        substitutions may affect both graphs if the substituted variable
        is present in both.

    Examples
    --------

        ======  ======  ======  ======
        var1    var2    givens  output
        ======  ======  ======  ======
        x + 1   x + 1   {}      True
        x + 1   y + 1   {}      False
        x + 1   y + 1   {x: y}  True
        ======  ======  ======  ======

    """
    use_equal_computations = True

    if givens is None:
        givens = {}

    if not isinstance(givens, dict):
        givens = dict(givens)

    # Get result from the merge-based function.
    rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)

    if givens:
        # We need to build the `in_xs` and `in_ys` lists. To do this, we need
        # to be able to tell whether a variable belongs to the computational
        # graph of `var1` or `var2`.
        # The typical case we want to handle is when `to_replace` belongs to
        # one of these graphs, and `replace_by` belongs to the other one. In
        # other situations, the current implementation of `equal_computations`
        # is probably not appropriate, so we do not call it.
        ok = True
        in_xs = []
        in_ys = []
        # Compute the sets of all variables found in each computational graph.
        inputs_var = list(map(graph_inputs, ([var1], [var2])))
        all_vars = [
            set(vars_between(v_i, v_o))
            for v_i, v_o in ((inputs_var[0], [var1]), (inputs_var[1], [var2]))
        ]

        def in_var(x, k):
            # Return True iff `x` is in computation graph of variable `vark`.
            return x in all_vars[k - 1]

        for to_replace, replace_by in givens.items():
            # Map a substitution variable to the computational graphs it
            # belongs to.
            inside = {
                v: [in_var(v, k) for k in (1, 2)] for v in (to_replace, replace_by)
            }
            if (
                inside[to_replace][0]
                and not inside[to_replace][1]
                and inside[replace_by][1]
                and not inside[replace_by][0]
            ):
                # Substitute variable in `var1` by one from `var2`.
                in_xs.append(to_replace)
                in_ys.append(replace_by)
            elif (
                inside[to_replace][1]
                and not inside[to_replace][0]
                and inside[replace_by][0]
                and not inside[replace_by][1]
            ):
                # Substitute variable in `var2` by one from `var1`.
                in_xs.append(replace_by)
                in_ys.append(to_replace)
            else:
                ok = False
                break
        if not ok:
            # We cannot directly use `equal_computations`.
            use_equal_computations = False
    else:
        in_xs = None
        in_ys = None
    if use_equal_computations:
        rval2 = equal_computations(xs=[var1], ys=[var2], in_xs=in_xs, in_ys=in_ys)
        assert rval2 == rval1
    return rval1
Exemplo n.º 14
0
def test_CheckAndRaise_equal():
    x, y = at.vectors("xy")
    g1 = assert_op(x, (x > y).all())
    g2 = assert_op(x, (x > y).all())

    assert equal_computations([g1], [g2])
Exemplo n.º 15
0
def test_equal_computations():

    a, b = iscalars(2)

    with pytest.raises(ValueError):
        equal_computations([a], [a, b])

    assert equal_computations([a], [a])
    assert equal_computations([at.as_tensor(1)], [at.as_tensor(1)])
    assert not equal_computations([b], [a])
    assert not equal_computations([at.as_tensor(1)], [at.as_tensor(2)])

    assert equal_computations([2], [2])
    assert equal_computations([np.r_[2, 1]], [np.r_[2, 1]])
    assert equal_computations([np.r_[2, 1]], [at.as_tensor(np.r_[2, 1])])
    assert equal_computations([at.as_tensor(np.r_[2, 1])], [np.r_[2, 1]])

    assert not equal_computations([2], [a])
    assert not equal_computations([np.r_[2, 1]], [a])
    assert not equal_computations([a], [2])
    assert not equal_computations([a], [np.r_[2, 1]])

    assert equal_computations([NoneConst], [NoneConst])

    m = matrix()
    max_argmax1 = max_and_argmax(m)
    max_argmax2 = max_and_argmax(m)
    assert equal_computations(max_argmax1, max_argmax2)
Exemplo n.º 16
0
def broadcast_shape_iter(arrays, **kwargs):
    """Compute the shape resulting from broadcasting arrays.

    Parameters
    ----------
    arrays: Iterable[TensorVariable] or Iterable[Tuple[Variable]]
        An iterable of tensors, or a tuple of shapes (as tuples),
        for which the broadcast shape is computed.
        XXX: Do not call this with a generator/iterator; this function will not
        make copies!
    arrays_are_shapes: bool (Optional)
        Indicates whether or not the `arrays` contains shape tuples.
        If you use this approach, make sure that the broadcastable dimensions
        are (scalar) constants with the value `1` or `1` exactly.

    """
    one = aesara.scalar.ScalarConstant(aesara.scalar.int64, 1)

    arrays_are_shapes = kwargs.pop("arrays_are_shapes", False)
    if arrays_are_shapes:
        max_dims = max(len(a) for a in arrays)

        array_shapes = [(one, ) * (max_dims - len(a)) +
                        tuple(one if getattr(sh, "value", sh) == 1 else sh
                              for sh in a) for a in arrays]
    else:
        max_dims = max(a.ndim for a in arrays)

        array_shapes = [(one, ) * (max_dims - a.ndim) +
                        tuple(one if bcast else sh
                              for sh, bcast in zip(a.shape, a.broadcastable))
                        for a in arrays]

    result_dims = []

    for dim_shapes in zip(*array_shapes):
        non_bcast_shapes = [shape for shape in dim_shapes if shape != one]

        if len(non_bcast_shapes) > 0:
            # Either there's only one non-broadcastable dimensions--and that's
            # what determines the dimension size, or there are multiple
            # non-broadcastable dimensions that must be equal
            i_dim = non_bcast_shapes.pop()

            potentially_unequal_dims = [
                dim for dim in non_bcast_shapes
                # TODO FIXME: This is a largely deficient means of comparing graphs
                # (and especially shapes)
                if not equal_computations([i_dim], [dim])
            ]

            if potentially_unequal_dims:
                # In this case, we can't tell whether or not the dimensions are
                # equal, so we'll need to assert their equality and move the error
                # handling to evaluation time.
                assert_dim = Assert("Could not broadcast dimensions")
                eq_condition = aet_all([
                    or_(eq(dim, one), eq(i_dim, dim))
                    for dim in potentially_unequal_dims
                ])
                eq_condition = or_(eq(i_dim, one), eq_condition)
                result_dims.append(assert_dim(i_dim, eq_condition))
            else:
                result_dims.append(i_dim)
        else:
            # Every array was broadcastable in this dimension
            result_dims.append(one)

    return tuple(result_dims)