示例#1
0
def unravel_index(indices, dims, order="C", ndim=None):
    """
    Converts a flat index or array of flat indices into a tuple
    of coordinate arrays.

    This method is similar to the NumPy version, except for the
    additional ``ndim`` parameter. This parameter is required if
    the length of ``dims`` cannot be determined automatically.

    Parameters
    ----------
    indices : Theano or NumPy array
        An integer array whose elements are indices into the flattened
        version of an array of dimensions ``dims``.
    dims : tuple of ints
        The shape of the array to use for unraveling ``indices``.
    order : {'C', 'F'}, optional
        Determines whether the indices should be viewed as indexing in
        row-major (C-style) or column-major (Fortran-style) order.
    ndim : int, optional
        Specifies the number of dimensions, i.e., the length of
        ``dims``. This is required if the dimensions cannot be determined
        automatically from ``dims`` itself.

    Returns
    -------
    unraveled_coords : tuple of ndarray
        Each array in the tuple has the same shape as the ``indices``
        array.

    See Also
    --------
    ravel_multi_index

    """
    if ndim is None:
        try:
            ndim = basic.get_vector_length(dims)
        except ValueError:
            raise ValueError(
                "The length of the provided dimension list (%s) cannot "
                "be automatically determined, so Theano is not able "
                "to know what the number of dimensions of the unraveled "
                "index will be. You can provide the 'ndim' keyword "
                "argument to 'unravel_index' to avoid this problem." % str(dims)
            )

    res = UnravelIndex(ndim=ndim, order=order)(indices, dims)
    if ndim == 1:
        return (res,)
    else:
        return tuple(res)
示例#2
0
def unravel_index(indices, dims, order='C', ndim=None):
    """
    Converts a flat index or array of flat indices into a tuple
    of coordinate arrays.

    This method is similar to the NumPy version, except for the
    additional ``ndim`` parameter. This parameter is required if
    the length of ``dims`` cannot be determined automatically.

    Parameters
    ----------
    indices : Theano or NumPy array
        An integer array whose elements are indices into the flattened
        version of an array of dimensions ``dims``.
    dims : tuple of ints
        The shape of the array to use for unraveling ``indices``.
    order : {'C', 'F'}, optional
        Determines whether the indices should be viewed as indexing in
        row-major (C-style) or column-major (Fortran-style) order.
    ndim : int, optional
        Specifies the number of dimensions, i.e., the length of
        ``dims``. This is required if the dimensions cannot be determined
        automatically from ``dims`` itself.

    Returns
    -------
    unraveled_coords : tuple of ndarray
        Each array in the tuple has the same shape as the ``indices``
        array.

    See Also
    --------
    ravel_multi_index

    """
    if ndim is None:
        try:
            ndim = basic.get_vector_length(dims)
        except ValueError:
            raise ValueError(
                "The length of the provided dimension list (%s) cannot "
                "be automatically determined, so Theano is not able "
                "to know what the number of dimensions of the unraveled "
                "index will be. You can provide the 'ndim' keyword "
                "argument to 'unravel_index' to avoid this problem." % str(dims))

    res = UnravelIndex(ndim=ndim, order=order)(indices, dims)
    if ndim == 1:
        return (res,)
    else:
        return tuple(res)
示例#3
0
    def make_node(self, indices, dims):
        indices = basic.as_tensor_variable(indices)
        dims = basic.as_tensor_variable(dims)

        if indices.dtype not in basic.int_dtypes:
            raise TypeError(
                f"'{indices.dtype}' object cannot be interpreted as an index")
        if dims.dtype not in basic.int_dtypes:
            raise TypeError(
                f"'{dims.dtype}' object cannot be interpreted as an index")
        if dims.ndim != 1:
            raise TypeError("dims must be a 1D array")

        return Apply(
            self,
            [indices, dims],
            [
                basic.TensorType(dtype="int64",
                                 broadcastable=(False, ) * indices.ndim)()
                for i in range(basic.get_vector_length(dims))
            ],
        )
示例#4
0
    def _infer_shape(self, size, dist_params, param_shapes=None):
        """Compute the output shape given the size and distribution parameters.

        Parameters
        ----------
        size : TensorVariable
            The size parameter specified for this `RandomVariable`.
        dist_params : list of TensorVariable
            The symbolic parameter for this `RandomVariable`'s distribution.
        param_shapes : list of tuples of TensorVariable (optional)
            The shapes of the `dist_params` as given by `ShapeFeature`'s
            via `Op.infer_shape`'s `input_shapes` argument.  This parameter's
            values are essentially more accurate versions of ``[d.shape for d
            in dist_params]``.

        Outputs
        -------
        shape : tuple of `ScalarVariable`

        """

        size_len = get_vector_length(size)

        if self.ndim_supp == 0 and size_len > 0:
            # In this case, we have a univariate distribution with a non-empty
            # `size` parameter, which means that the `size` parameter
            # completely determines the shape of the random variable.  More
            # importantly, the `size` parameter may be the only correct source
            # of information for the output shape, in that we would be misled
            # by the `dist_params` if we tried to infer the relevant parts of
            # the output shape from those.
            return size

        # Broadcast the parameters
        param_shapes = params_broadcast_shapes(
            param_shapes or [p.shape for p in dist_params], self.ndims_params
        )

        def slice_ind_dims(p, ps, n):
            shape = tuple(ps)

            if n == 0:
                return (p, shape)

            ind_slice = (slice(None),) * (p.ndim - n) + (0,) * n
            ind_shape = [
                s if b is False else constant(1, "int64")
                for s, b in zip(shape[:-n], p.broadcastable[:-n])
            ]
            return (
                p[ind_slice],
                ind_shape,
            )

        # These are versions of our actual parameters with the anticipated
        # dimensions (i.e. support dimensions) removed so that only the
        # independent variate dimensions are left.
        params_ind_slice = tuple(
            slice_ind_dims(p, ps, n)
            for p, ps, n in zip(dist_params, param_shapes, self.ndims_params)
        )

        if len(params_ind_slice) == 1:
            ind_param, ind_shape = params_ind_slice[0]
            ndim_ind = len(ind_shape)
            shape_ind = ind_shape
        elif len(params_ind_slice) > 1:
            # If there are multiple parameters, the dimensions of their
            # independent variates should broadcast together.
            p_slices, p_shapes = zip(*params_ind_slice)

            shape_ind = theano.tensor.extra_ops.broadcast_shape_iter(
                p_shapes, arrays_are_shapes=True
            )

            ndim_ind = len(shape_ind)
        else:
            ndim_ind = 0

        if self.ndim_supp == 0:
            shape_supp = tuple()
            shape_reps = tuple(size)

            if ndim_ind > 0:
                shape_reps = shape_reps[:-ndim_ind]

            ndim_reps = len(shape_reps)
        else:
            shape_supp = self._shape_from_params(
                dist_params,
                param_shapes=param_shapes,
            )

            ndim_reps = size_len
            shape_reps = size

        ndim_shape = self.ndim_supp + ndim_ind + ndim_reps

        if ndim_shape == 0:
            shape = constant([], dtype="int64")
        else:
            shape = tuple(shape_reps) + tuple(shape_ind) + tuple(shape_supp)

        # if shape is None:
        #     raise ShapeError()

        return shape