Ejemplo n.º 1
0
def test_get_vector_length():
    # Test `Shape`s
    x = aesara.shared(np.zeros((2, 3, 4, 5)))
    assert get_vector_length(x.shape) == 4

    # Test `SpecifyShape`
    x = specify_shape(ivector(), (10, ))
    assert get_vector_length(x) == 10
Ejemplo n.º 2
0
def test_get_vector_length():
    x = TensorVariable(TensorType("int64", (4, )))
    res = get_vector_length(x)
    assert res == 4

    x = TensorVariable(TensorType("int64", (None, )))
    with pytest.raises(ValueError):
        get_vector_length(x)
Ejemplo n.º 3
0
def change_rv_size(
    rv_var: TensorVariable,
    new_size: PotentialShapeType,
    expand: Optional[bool] = False,
) -> TensorVariable:
    """Change or expand the size of a `RandomVariable`.

    Parameters
    ==========
    rv_var
        The `RandomVariable` output.
    new_size
        The new size.
    expand:
        Expand the existing size by `new_size`.

    """
    # Check the dimensionality of the `new_size` kwarg
    new_size_ndim = np.ndim(new_size)
    if new_size_ndim > 1:
        raise ShapeError("The `new_size` must be ≤1-dimensional.",
                         actual=new_size_ndim)
    elif new_size_ndim == 0:
        new_size = (new_size, )

    # Extract the RV node that is to be resized, together with its inputs, name and tag
    if isinstance(rv_var.owner.op, SpecifyShape):
        rv_var = rv_var.owner.inputs[0]
    rv_node = rv_var.owner
    rng, size, dtype, *dist_params = rv_node.inputs
    name = rv_var.name
    tag = rv_var.tag

    if expand:
        if rv_node.op.ndim_supp == 0 and at.get_vector_length(size) == 0:
            size = rv_node.op._infer_shape(size, dist_params)
        new_size = tuple(new_size) + tuple(size)

    # Make sure the new size is a tensor. This dtype-aware conversion helps
    # to not unnecessarily pick up a `Cast` in some cases (see #4652).
    new_size = at.as_tensor(new_size, ndim=1, dtype="int64")

    new_rv_node = rv_node.op.make_node(rng, new_size, dtype, *dist_params)
    rv_var = new_rv_node.outputs[-1]
    rv_var.name = name
    for k, v in tag.__dict__.items():
        rv_var.tag.__dict__.setdefault(k, v)

    if config.compute_test_value != "off":
        compute_test_value(new_rv_node)

    return rv_var
Ejemplo n.º 4
0
def test_local_rv_size_lift(dist_op, dist_params, size):
    rng = shared(np.random.RandomState(1233532), borrow=False)

    new_out, f_inputs, dist_st, f_opt = apply_local_opt_to_rv(
        local_rv_size_lift,
        lambda rv: rv,
        dist_op,
        dist_params,
        size,
        rng,
    )

    assert aet.get_vector_length(new_out.owner.inputs[1]) == 0
Ejemplo n.º 5
0
def reshape(x, newshape, ndim=None):
    if ndim is None:
        newshape = at.as_tensor_variable(newshape)
        if newshape.ndim != 1:
            raise TypeError(
                "New shape in reshape must be a vector or a list/tuple of"
                f" scalar. Got {newshape} after conversion to a vector.")
        try:
            ndim = get_vector_length(newshape)
        except ValueError:
            raise ValueError(
                f"The length of the provided shape ({newshape}) cannot "
                "be automatically determined, so Aesara is not able "
                "to know what the number of dimensions of the reshaped "
                "variable will be. You can provide the 'ndim' keyword "
                "argument to 'reshape' to avoid this problem.")
    op = Reshape(ndim)
    rval = op(x, newshape)
    return rval
Ejemplo n.º 6
0
def specify_shape(
    x: Union[np.ndarray, Number, Variable],
    shape: Union[int, List[Union[int, Variable]], Tuple[Union[int, Variable]],
                 Variable],
):
    """Specify a fixed shape for a `Variable`."""

    if not isinstance(x, Variable):
        x = at.as_tensor_variable(x)

    if np.ndim(shape) == 0:
        shape = at.as_tensor_variable([shape])

    try:
        _ = get_vector_length(shape)
    except ValueError:
        raise ValueError("Shape must have fixed dimensions")

    if isinstance(shape, Constant):
        shape = tuple(shape.data)

    return _specify_shape(x, shape)
Ejemplo n.º 7
0
def normalize_size_param(size):
    """Create an Aesara value for a ``RandomVariable`` ``size`` parameter."""
    if size is None:
        size = constant([], dtype="int64")
    elif isinstance(size, int):
        size = as_tensor_variable([size], ndim=1)
    elif not isinstance(size, (np.ndarray, Variable, Sequence)):
        raise TypeError(
            "Parameter size must be None, an integer, or a sequence with integers."
        )
    else:
        size = cast(as_tensor_variable(size, ndim=1), "int64")

        if not isinstance(size, Constant):
            # This should help ensure that the length of non-constant `size`s
            # will be available after certain types of cloning (e.g. the kind
            # `Scan` performs)
            size = specify_shape(size, (get_vector_length(size), ))

    assert size.dtype in int_dtypes

    return size
Ejemplo n.º 8
0
 def new(cls, rstate, ndim, dtype, size):
     v_size = as_tensor_variable(size)
     if ndim is None:
         ndim = get_vector_length(v_size)
     op = cls(TensorType(dtype, (False, ) * ndim))
     return op(rstate, v_size)
Ejemplo n.º 9
0
def test_get_vector_length():
    x = aesara.shared(np.array((2, 3, 4, 5)))
    assert get_vector_length(x) == 4
Ejemplo n.º 10
0
def _infer_ndim_bcast(ndim, shape, *args):
    """
    Infer the number of dimensions from the shape or the other arguments.

    Returns
    -------
    (int, variable, tuple) triple, where the variable is an integer vector,
    and the tuple contains Booleans
        The first element returned is the inferred number of dimensions.
        The second element is the shape inferred (combining symbolic and
        constant informations from shape and args).
        The third element is a broadcasting pattern corresponding to that shape.

    """

    # Find the minimum value of ndim required by the *args
    if args:
        args_ndim = max(arg.ndim for arg in args)
    else:
        args_ndim = 0

    if isinstance(shape, (tuple, list)):
        # there is a convention that -1 means the corresponding shape of a
        # potentially-broadcasted symbolic arg
        #
        # This case combines together symbolic and non-symbolic shape
        # information
        shape_ndim = len(shape)
        if ndim is None:
            ndim = shape_ndim
        else:
            if shape_ndim != ndim:
                raise ValueError(
                    "ndim should be equal to len(shape), but\n",
                    "ndim = %s, len(shape) = %s, shape = %s" %
                    (ndim, shape_ndim, shape),
                )

        bcast = []
        pre_v_shape = []
        for i, s in enumerate(shape):
            if hasattr(s, "type"):  # s is symbolic
                bcast.append(False)  # todo - introspect further
                pre_v_shape.append(s)
            else:
                if s >= 0:
                    pre_v_shape.append(tensor.as_tensor_variable(s))
                    bcast.append(s == 1)
                elif s == -1:
                    n_a_i = 0
                    for a in args:
                        # ndim: _   _   _   _   _   _
                        # ashp:         s0  s1  s2  s3
                        #           i
                        if i >= ndim - a.ndim:
                            n_a_i += 1
                            a_i = i + a.ndim - ndim
                            if not a.broadcastable[a_i]:
                                pre_v_shape.append(a.shape[a_i])
                                bcast.append(False)
                                break
                    else:
                        if n_a_i == 0:
                            raise ValueError(
                                "Auto-shape of -1 must overlap"
                                "with the shape of one of the broadcastable"
                                "inputs")
                        else:
                            pre_v_shape.append(tensor.as_tensor_variable(1))
                            bcast.append(True)
                else:
                    ValueError("negative shape", s)
        # post-condition: shape may still contain both symbolic and
        # non-symbolic things
        if len(pre_v_shape) == 0:
            v_shape = tensor.constant([], dtype="int64")
        else:
            v_shape = tensor.stack(pre_v_shape)

    elif shape is None:
        # The number of drawn samples will be determined automatically,
        # but we need to know ndim
        if not args:
            raise TypeError("_infer_ndim_bcast cannot infer shape without"
                            " either shape or args")
        template = reduce(lambda a, b: a + b, args)
        v_shape = template.shape
        bcast = template.broadcastable
        ndim = template.ndim
    else:
        v_shape = tensor.as_tensor_variable(shape)
        if v_shape.ndim != 1:
            raise TypeError(
                "shape must be a vector or list of scalar, got '%s'" % v_shape)

        if ndim is None:
            ndim = tensor.get_vector_length(v_shape)
        bcast = [False] * ndim

    if v_shape.ndim != 1:
        raise TypeError("shape must be a vector or list of scalar, got '%s'" %
                        v_shape)

    if v_shape.dtype not in aesara.tensor.integer_dtypes:
        raise TypeError("shape must be an integer vector or list",
                        v_shape.dtype)

    if args_ndim > ndim:
        raise ValueError(
            "ndim should be at least as big as required by args value",
            (ndim, args_ndim),
            args,
        )

    assert ndim == len(bcast)
    return ndim, tensor.cast(v_shape, "int64"), tuple(bcast)