Exemple #1
0
 def broadcast_to(self, shape, **kw):
     """
     Return a view of the array broadcasted to another shape. See
     numpy.broadcast_to.
     """
     numpy.broadcast_shapes(self.shape,
                            shape)  # raises if not broadcastable
     d = {
         name: broadcast_to(x, shape + self.dtype[name].shape, **kw)
         for name, x in self._dict.items()
     }
     return self._array(shape, self.dtype, d)
Exemple #2
0
    def _set_values(cls, lower, upper, size, shape, initval):
        if size is None:
            size = shape

        lower = np.asarray(lower)
        lower = floatX(np.where(lower == None, -np.inf, lower))
        upper = np.asarray(upper)
        upper = floatX(np.where(upper == None, np.inf, upper))

        if initval is None:
            _size = np.broadcast_shapes(to_tuple(size), np.shape(lower),
                                        np.shape(upper))
            _lower = np.broadcast_to(lower, _size)
            _upper = np.broadcast_to(upper, _size)
            initval = np.where(
                (_lower == -np.inf) & (_upper == np.inf),
                0,
                np.where(
                    _lower == -np.inf,
                    _upper - 1,
                    np.where(_upper == np.inf, _lower + 1,
                             (_lower + _upper) / 2),
                ),
            )

        lower = as_tensor_variable(floatX(lower))
        upper = as_tensor_variable(floatX(upper))
        return lower, upper, initval
 def __mul__(self, other):
     if isinstance(other, gpuarray.GPUArray):
         result = type(self)(np.broadcast_shapes(self.shape, other.shape),
                             gpuarray._get_common_dtype(self, other))
         return self._elwise_multiply(other, result)
     else:
         return super().__mul__(other)
Exemple #4
0
def copy_from_usm_ndarray_to_usm_ndarray(dst, src):
    if type(dst) is not dpt.usm_ndarray or type(src) is not dpt.usm_ndarray:
        raise TypeError

    if dst.ndim == src.ndim and dst.shape == src.shape:
        copy_same_shape(dst, src)

    try:
        common_shape = np.broadcast_shapes(dst.shape, src.shape)
    except ValueError:
        raise ValueError

    if dst.size < src.size:
        raise ValueError

    if len(common_shape) > dst.ndim:
        ones_count = len(common_shape) - dst.ndim
        for k in range(ones_count):
            if common_shape[k] != 1:
                raise ValueError
        common_shape = common_shape[ones_count:]

    if src.ndim < len(common_shape):
        new_src_strides = (0, ) * (len(common_shape) - src.ndim) + src.strides
        src_same_shape = dpt.usm_ndarray(common_shape,
                                         dtype=src.dtype,
                                         buffer=src,
                                         strides=new_src_strides)
    else:
        src_same_shape = src
        src_same_shape.shape = common_shape

    copy_same_shape(dst, src_same_shape)
Exemple #5
0
    def __init__(self, ir: array, ishape: Shape, dim: int, name: str = "Conv"):
        """ND convolution on last `N` axis.

        Parameters
        ----------
        ir : array
            The impulse responses. Must be at least of `ndim==dim`.
        ishape : tuple of int
            The shape of the input images. Images are on the last two axis.
        dim : int
            The last `dim` axis where convolution apply.
        """
        super().__init__(
            ishape=ishape,
            oshape=np.broadcast_shapes(ishape, ir.shape[:-dim] + ishape[-dim:]),
            name=name,
        )

        self.dim = dim
        self.imp_resp = ir
        self.freq_resp = udft.ir2fr(ir, self.ishape[-dim:])

        self.margins = ir.shape[-dim:]
        self._slices = [slice(None) for _ in range(len(ishape) - dim)]
        for idx in reversed(range(dim)):
            self._slices.append(
                slice(
                    ir.shape[idx] // 2,
                    ishape[idx] - ir.shape[idx] // 2 + ir.shape[idx] % 2,
                )
            )
def _make_casadi_types_broadcastable(x1, x2):
    def shape_2D(object: Union[float, int, Iterable, _onp.ndarray]) -> Tuple:
        shape = _onp.shape(object)
        if len(shape) == 0:
            return (1, 1)
        elif len(shape) == 1:
            return (1, shape[0])
        elif len(shape) == 2:
            return shape
        else:
            raise ValueError(
                "CasADi can't handle arrays with >2 dimensions, unfortunately."
            )

    x1_shape = shape_2D(x1)
    x2_shape = shape_2D(x2)
    shape = _onp.broadcast_shapes(x1_shape, x2_shape)

    x1_tiled = _cas.repmat(
        x1,
        shape[0] // x1_shape[0],
        shape[1] // x1_shape[1],
    )
    x2_tiled = _cas.repmat(
        x2,
        shape[0] // x2_shape[0],
        shape[1] // x2_shape[1],
    )

    return x1_tiled, x2_tiled
Exemple #7
0
def broadcasted_axes(*dfs):
    """
    Helper function which, from a collection of arrays, series, frames and other
    values, retrieves the axes of series and frames which result from
    broadcasting operations. It checks whether index and columns of given
    series and frames, repespectively, are aligned. Using this function allows
    to subsequently use pure numpy operations and keep the axes in the
    background.

    """
    axes = []
    shape = (1, )

    if set(map(type, dfs)) == {tuple}:
        dfs = sum(dfs, ())

    for df in dfs:
        shape = np.broadcast_shapes(shape, np.asarray(df).shape)
        if isinstance(df, (pd.Series, pd.DataFrame)):
            if len(axes):
                assert (axes[-1] == df.axes[-1]).all(), (
                    'Series or DataFrames '
                    'are not aligned. Please make sure that all indexes and '
                    'columns of Series and DataFrames going into the linear '
                    'expression are equally sorted.')
            axes = df.axes if len(df.axes) > len(axes) else axes
    return axes, shape
Exemple #8
0
def broadcast_arrays(*arrays, **kw):
    """
    Version of numpy.broadcast_arrays that works with StructuredArray and JAX
    arrays.
    """
    shapes = [a.shape for a in arrays]
    shape = numpy.broadcast_shapes(*shapes)
    return [broadcast_to(a, shape, **kw) for a in arrays]
Exemple #9
0
def test_string_comparisons_empty(op, ufunc, sym, dtypes):
    arr = np.empty((1, 0, 1, 5), dtype=dtypes[0])
    arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1])

    expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool)
    assert_array_equal(op(arr, arr2), expected)
    assert_array_equal(ufunc(arr, arr2), expected)
    assert_array_equal(np.compare_chararrays(arr, arr2, sym, False), expected)
Exemple #10
0
    def _check_shapes(
        self,
        x0_shape: ShapeType,
        x1_shape: Optional[ShapeType] = None,
    ) -> ShapeType:
        """Checks input argument shapes and computes the broadcast batch shape of both
        inputs.

        This function checks the shapes of the inputs to the :meth:`__call__` method and
        it computes the `bcast_batch_shape` mentioned in the docstring.

        Parameters
        ----------
        x0_shape :
            Shape of the first input to the covariance function.
        x1_shape :
            Shape of the (optional) second input to the covariance function.

        Returns
        -------
        broadcast_batch_shape :
            The `batch_shape` after broadcasting the inputs to a common shape.

        Raises
        -------
        ValueError
            If one of the input shapes is not of the form ``batch_shape_{0,1} +``
            :attr:`input_shape`.
        ValueError
            If the inputs can not be broadcast to a common shape.
        """

        err_msg = (
            "The shape of the input array `{argname}` must match the `input_shape` "
            f"`{self.input_shape}` of the kernel along its last dimension, but an "
            "array with shape `{shape}` was given.")

        if x0_shape[len(x0_shape) - self._input_ndim:] != self.input_shape:
            raise ValueError(err_msg.format(argname="x0", shape=x0_shape))

        broadcast_batch_shape = x0_shape[:len(x0_shape) - self._input_ndim]

        if x1_shape is not None:
            if x1_shape[len(x1_shape) - self._input_ndim:] != self.input_shape:
                raise ValueError(err_msg.format(argname="x1", shape=x1_shape))

            try:
                broadcast_batch_shape = np.broadcast_shapes(
                    broadcast_batch_shape,
                    x1_shape[:len(x1_shape) - self._input_ndim],
                )
            except ValueError as ve:
                err_msg = (
                    f"The input arrays `x0` and `x1` with shapes {x0_shape} and "
                    f"{x1_shape} can not be broadcast to a common shape.")
                raise ValueError(err_msg) from ve

        return broadcast_batch_shape
Exemple #11
0
 def branch_lengths(self) -> np.ndarray:
     batch_shape = np.broadcast_shapes(
         self.heights.shape[:-1], self.topology.parent_indices.shape[:-1]
     )
     heights_b = np.broadcast_to(self.heights, batch_shape + self.heights.shape[-1:])
     parent_indices_b = np.broadcast_to(
         self.topology.parent_indices,
         batch_shape + self.topology.parent_indices.shape[-1:],
     )
     return (
         np.take_along_axis(heights_b, parent_indices_b, axis=-1)
         - heights_b[..., :-1]
     )
Exemple #12
0
    def __pow__(
        self, other
    ) -> "Tensor":  # With power, there are some limitations. The exponent must not be differentiable. Future versions of my library will correct for this, however.
        if isinstance(other, Tensor):
            common_shape = np.broadcast_shapes(self.shape, other.shape)
            x, y = broadcast(self, common_shape), broadcast(other, common_shape)

            output = Tensor(
                x.data ** y.data, x.requires_grad or y.requires_grad, Pow(x, y),
            )
            return output

        return self ** Tensor(other)
Exemple #13
0
    def __mul__(self, other) -> "Tensor":
        if isinstance(other, Tensor):
            common_shape = np.broadcast_shapes(self.shape, other.shape)
            x, y = broadcast(self, common_shape), broadcast(other, common_shape)

            output = Tensor(
                x.data * y.data,
                x.requires_grad or y.requires_grad,
                Mul(x, y),  # Pass the multiplication operation
            )
            return output

        return self * Tensor(other)
 def __add__(self, other, sub=False, rsub=False):
     """Add an array with an array or an array with a scalar."""
     if isinstance(other, gpuarray.GPUArray):
         # add another vector
         result = type(self)(np.broadcast_shapes(self.shape, other.shape),
                             gpuarray._get_common_dtype(self, other))
         return self._axpbyz(-1 if rsub else 1, other, -1 if sub else 1,
                             result)
     else:
         if sub:
             return super().__sub__(other)
         elif rsub:
             return super().__rsub__(other)
         else:
             return super().__add__(other)
Exemple #15
0
    def __sub__(self, other) -> "Tensor":
        if isinstance(other, Tensor):
            common_shape = np.broadcast_shapes(self.shape, other.shape)
            x, y = (
                broadcast(self, common_shape),
                broadcast(other, common_shape),
            )  # Same pattern here, like with addition

            output = Tensor(
                x.data - y.data,
                x.requires_grad or y.requires_grad,
                Sub(x, y),  # Pass the subtraction operation
            )
            return output

        return self - Tensor(other)
Exemple #16
0
    def sample(
            self,
            l: ArrayLike,
            m: ArrayLike,
            frequency: u.Quantity,  # noqa: E741
            frame: Union[AltAzFrame, RADecFrame],
            output_type: OutputType,
            *,
            out: Optional[np.ndarray] = None) -> np.ndarray:
        _check_out(out, output_type)
        l_ = _asarray(l)
        m_ = _asarray(m)
        l_, m_ = np.broadcast_arrays(l_, m_)
        # numba seems to trigger a FutureWarning when it checks the writeable
        # flag on these broadcast arrays. Suppress it by making them explicitly
        # readonly. Explicitly take views so that we don't modify the input
        # arrays (which could otherwise happen if the transformations above are
        # no-ops).
        l_ = l_.view()
        m_ = m_.view()
        l_.flags.writeable = False
        m_.flags.writeable = False
        if isinstance(frame, RADecFrame):
            transform = frame.lm_to_hv()
            # Broadcast transform with l, m
            shape = np.broadcast_shapes(transform.shape[:-2], l_.shape)
            l_, m_ = self._transform_lm(
                np.broadcast_to(transform, shape + (2, 2)),
                np.broadcast_to(l_, shape), np.broadcast_to(m_, shape))
        elif not isinstance(frame, AltAzFrame):
            raise TypeError(
                f'frame must be RADecFrame or AltAzFrame, not {type(frame)}')
        l_ = _asarray(l_, np.float32)
        m_ = _asarray(m_, np.float32)

        if output_type in {OutputType.JONES_XY, OutputType.JONES_HV}:
            return self._sample_altaz_jones(l_,
                                            m_,
                                            frequency,
                                            frame,
                                            output_type,
                                            out=out)
        else:
            jones = self._sample_altaz_jones(l_, m_, frequency, frame,
                                             output_type)
            return self._finalize(jones, output_type, out=out)
Exemple #17
0
def align_shape(*polys: PolyLike) -> Tuple[ndpoly, ...]:
    """
    Align polynomial by shape.

    Args:
        polys:
            Polynomial to make adjustment to.

    Returns:
        Same as ``polys``, but internal adjustments made to make them
        compatible for further operations.

    Examples:
        >>> q0, q1 = numpoly.variable(2)
        >>> poly1 = 4*q0
        >>> poly2 = numpoly.polynomial([[2*q0+1, 3*q0-q1]])
        >>> poly1.shape
        ()
        >>> poly2.shape
        (1, 2)
        >>> poly1, poly2 = numpoly.align_shape(poly1, poly2)
        >>> poly1
        polynomial([[4*q0, 4*q0]])
        >>> poly2
        polynomial([[2*q0+1, -q1+3*q0]])
        >>> poly1.shape
        (1, 2)
        >>> poly2.shape
        (1, 2)

    """
    # return tuple(numpoly.broadcast_arrays(*polys))
    polys_ = [numpoly.aspolynomial(poly) for poly in polys]
    common = numpy.ones(
        numpy.broadcast_shapes(*[poly.shape for poly in polys_]), dtype=int)

    for idx, poly in enumerate(polys_):
        if poly.shape != common.shape:
            polys_[idx] = poly.from_attributes(
                exponents=poly.exponents,
                coefficients=tuple(coeff * common
                                   for coeff in poly.coefficients),
                names=poly.indeterminants,
            )
    return tuple(polys_)
Exemple #18
0
    def __add__(
        self, other
    ) -> "Tensor":  # Add tensors together, in these element-wise functions, we'll have a broadcast precursor
        if isinstance(other, Tensor):
            common_shape = np.broadcast_shapes(self.shape, other.shape)
            x, y = (
                broadcast(self, common_shape),
                broadcast(other, common_shape),
            )  # This broadcasting is essential to make sure gradients are accumulated and scatted properly

            output = Tensor(  # If either operand required gradients, the output will too.
                x.data + y.data,
                x.requires_grad or y.requires_grad,
                Add(x, y),  # Create the output tensor and pass the addition operation.
            )
            return output

        return self + Tensor(other)  # If it's not a tensor, make it one.
Exemple #19
0
 def sample(self,
            l: ArrayLike,
            m: ArrayLike,
            frequency: u.Quantity,
            frame: Union[AltAzFrame, RADecFrame],
            output_type: OutputType,
            *,
            out: Optional[np.ndarray] = None) -> np.ndarray:
     l_ = np.asarray(l)
     m_ = np.asarray(m)
     if output_type != OutputType.UNPOLARIZED_POWER:
         raise NotImplementedError(
             'Only UNPOLARIZED_POWER is currently implemented')
     in_shape = np.broadcast_shapes(l_.shape, m_.shape, frame.shape)
     out_shape = frequency.shape + in_shape
     if out is not None:
         if out.shape != out_shape:
             raise ValueError(
                 f'out must have shape {out_shape}, not {out.shape}')
         if out.dtype != np.float32:
             raise TypeError(
                 f'out must have dtype float32, not {out.dtype}')
         if not out.flags.c_contiguous:
             raise ValueError('out must be C contiguous')
     else:
         out = np.empty(out_shape, np.float32)
     frequency_Hz = frequency.to_value(u.Hz).astype(np.float32,
                                                    copy=False,
                                                    casting='same_kind')
     samples = self._interp_samples(frequency_Hz)
     # Create view with l/m axis flattened to 1D for benefit of numba
     l_view = np.broadcast_to(l_, in_shape).ravel()
     m_view = np.broadcast_to(m_, in_shape).ravel()
     out_view = out.view()
     out_view.shape = frequency.shape + l_view.shape
     _sample_impl(l_view, m_view, samples, self._step, out_view)
     return out
Exemple #20
0
def multiply(
    x1: PolyLike,
    x2: PolyLike,
    out: Optional[ndpoly] = None,
    where: numpy.typing.ArrayLike = True,
    **kwargs: Any,
) -> ndpoly:
    """
    Multiply arguments element-wise.

    Args:
        x1, x2:
            Input arrays to be multiplied. If ``x1.shape != x2.shape``, they
            must be broadcastable to a common shape (which becomes the shape of
            the output).
        out:
            A location into which the result is stored. If provided, it must
            have a shape that the inputs broadcast to. If not provided or
            `None`, a freshly-allocated array is returned. A tuple (possible
            only as a keyword argument) must have length equal to the number of
            outputs.
        where:
            This condition is broadcast over the input. At locations where the
            condition is True, the `out` array will be set to the ufunc result.
            Elsewhere, the `out` array will retain its original value. Note
            that if an uninitialized `out` array is created via the default
            ``out=None``, locations within it where the condition is False will
            remain uninitialized.
        kwargs:
            Keyword args passed to numpy.ufunc.

    Returns:
        The product of `x1` and `x2`, element-wise. This is a scalar if
        both `x1` and `x2` are scalars.

    Examples:
        >>> poly = numpy.arange(9.0).reshape((3, 3))
        >>> q0q1q2 = numpoly.variable(3)
        >>> numpoly.multiply(poly, q0q1q2)
        polynomial([[0.0, q1, 2.0*q2],
                    [3.0*q0, 4.0*q1, 5.0*q2],
                    [6.0*q0, 7.0*q1, 8.0*q2]])

    """
    x1, x2 = numpoly.align_indeterminants(x1, x2)
    dtype = numpy.find_common_type([x1.dtype, x2.dtype], [])
    shape = numpy.broadcast_shapes(x1.shape, x2.shape)

    where = numpy.asarray(where)
    exponents = numpy.unique(numpy.tile(x1.exponents, (len(x2.exponents), 1)) +
                             numpy.repeat(x2.exponents, len(x1.exponents), 0),
                             axis=0)
    out_ = numpoly.ndpoly(
        exponents=exponents,
        shape=shape,
        names=x1.indeterminants,
        dtype=dtype,
    ) if out is None else out

    seen = set()
    for expon1, coeff1 in zip(x1.exponents, x1.coefficients):
        for expon2, coeff2 in zip(x2.exponents, x2.coefficients):
            key = (expon1 + expon2 + x1.KEY_OFFSET).ravel()
            key = key.view(f"U{len(expon1)}").item()
            if key in seen:
                out_.values[key] += numpy.multiply(coeff1,
                                                   coeff2,
                                                   where=where,
                                                   **kwargs)
            else:
                numpy.multiply(coeff1,
                               coeff2,
                               out=out_.values[key],
                               where=where,
                               **kwargs)
            seen.add(key)

    if out is None:
        out_ = numpoly.clean_attributes(out_)
    return out_
Exemple #21
0
def numpy_broadcast_shapes(*args):
    return np.broadcast_shapes(*args)
Exemple #22
0
from typing import List, Dict, Any
import numpy as np
import numpy.typing as npt

AR_f8: npt.NDArray[np.float64]
AR_LIKE_f: List[float]
interface_dict: Dict[str, Any]

reveal_type(np.lib.stride_tricks.DummyArray(interface_dict))  # E: numpy.lib.stride_tricks.DummyArray

reveal_type(np.lib.stride_tricks.as_strided(AR_f8))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.lib.stride_tricks.as_strided(AR_LIKE_f))  # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5)))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20]))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]

reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5)))  # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]

reveal_type(np.broadcast_to(AR_f8, 5))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(np.broadcast_to(AR_LIKE_f, (1, 5)))  # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.broadcast_to(AR_f8, [4, 6], subok=True))  # E: numpy.ndarray[Any, numpy.dtype[{float64}]]

reveal_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)))  # E: tuple[builtins.int]
reveal_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)))  # E: tuple[builtins.int]

reveal_type(np.broadcast_arrays(AR_f8, AR_f8))  # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
reveal_type(np.broadcast_arrays(AR_f8, AR_LIKE_f))  # E: list[numpy.ndarray[Any, numpy.dtype[Any]]]
Exemple #23
0
    def rng_fn(
        cls,
        rng: np.random.RandomState,
        mu: Union[np.ndarray, float],
        sigma: Union[np.ndarray, float],
        init: float,
        steps: int,
        size: Tuple[int],
    ) -> np.ndarray:
        """Gaussian Random Walk generator.

        The init value is drawn from the Normal distribution with the same sigma as the
        innovations.

        Notes
        -----
        Currently does not support custom init distribution

        Parameters
        ----------
        rng: np.random.RandomState
           Numpy random number generator
        mu: array_like
           Random walk mean
        sigma: np.ndarray
            Standard deviation of innovation (sigma > 0)
        init: float
            Initialization value for GaussianRandomWalk
        steps: int
            Length of random walk, must be greater than 1. Returned array will be of size+1 to
            account as first value is initial value
        size: int
            The number of Random Walk time series generated

        Returns
        -------
        ndarray
        """

        if steps < 1:
            raise ValueError("Steps must be greater than 0")

        # If size is None then the returned series should be (*implied_dims, 1+steps)
        if size is None:
            # broadcast parameters with each other to find implied dims
            bcast_shape = np.broadcast_shapes(
                np.asarray(mu).shape,
                np.asarray(sigma).shape,
                np.asarray(init).shape,
            )
            dist_shape = (*bcast_shape, int(steps))

        # If size is None then the returned series should be (*size, 1+steps)
        else:
            dist_shape = (*size, int(steps))

        # Add one dimension to the right, so that mu and sigma broadcast safely along
        # the steps dimension
        innovations = rng.normal(loc=mu[..., None], scale=sigma[..., None], size=dist_shape)
        grw = np.concatenate([init[..., None], innovations], axis=-1)
        return np.cumsum(grw, axis=-1)
Exemple #24
0
def chol_solve_numpy(t, b, diageps=None):
    """
    t (..., n)
    b (..., n, m) or (n,)
    t[0] += diageps
    m = toeplitz(t)
    l = chol(m)
    return solve(l, b)
    pure numpy, object arrays supported
    """

    t = numpy.copy(t, subok=True)
    n = t.shape[-1]

    b = numpy.asanyarray(b)
    vec = b.ndim < 2
    if vec:
        b = b[:, None]
    assert b.shape[-2] == n

    if n == 0:
        shape = numpy.broadcast_shapes(t.shape[:-1], b.shape[:-2])
        shape += (n, ) if vec else b.shape[-2:]
        dtype = numpy.result_type(t.dtype, b.dtype)
        return numpy.empty(shape, dtype)

    if diageps is not None:
        t[..., 0] += diageps

    if numpy.any(t[..., 0] <= 0):
        msg = '1-th leading minor is not positive definite'
        raise numpy.linalg.LinAlgError(msg)

    norm = numpy.copy(t[..., 0, None], subok=True)
    t /= norm
    invLb = numpy.copy(numpy.broadcast_arrays(b, t[..., None])[0], subok=True)
    prevLi = t
    g = numpy.stack([numpy.roll(t, 1, -1), t], -2)

    for i in range(1, n):

        assert numpy.all(g[..., 0, i] > 0)
        rho = -g[..., 1, i, None, None] / g[..., 0, i, None, None]

        if numpy.any(numpy.abs(rho) >= 1):
            msg = '{}-th leading minor is not positive definite'.format(i + 1)
            raise numpy.linalg.LinAlgError(msg)

        gamma = numpy.sqrt((1 - rho) * (1 + rho))
        g[..., :, i:] += g[..., ::-1, i:] * rho
        g[..., :, i:] /= gamma
        Li = g[..., 0, i:]  # i-th column of L from row i
        invLb[..., i:, :] -= invLb[..., i - 1, None, :] * prevLi[..., i:, None]
        invLb[..., i, :] /= Li[..., 0, None]
        prevLi[..., i:] = Li
        g[..., 0, i:] = numpy.roll(g[..., 0, i:], 1, -1)

    invLb /= numpy.sqrt(norm[..., None])
    if vec:
        invLb = numpy.squeeze(invLb, -1)
    return invLb
Exemple #25
0
 def __init__(self, *arrays):
     self.shape = numpy.broadcast_shapes(*(a.shape for a in arrays))
Exemple #26
0
def share_figure(
    x,
    probability,
    choices=None,
    weights=1,
    xlabel=None,
    bins=None,
    pct_bins=20,
    figsize=(12, 4),
    style='stacked',
    discrete=None,
    xlim=None,
    xscale=None,
    xmajorticks=None,
    xminorticks=None,
    include_nests=False,
    exclude_alts=None,
    format='figure',
    **kwargs,
):
    """
	Generate a figure of variables over a range of variable values.

	Parameters
	----------
	x : array-like, 1-d
		An array giving values for some variable.
	probability : array-like, 2-d
		The pre-calculated probability array for all cases in this analysis.
		First dimension must be the same shape as `x`.  The second dimension
		represents the alternatives (or similar).
	choices : array-like, optional
		The observed choices array for all cases in this analysis. If provided,
		the first dimension must be the same shape as `x`.  The second dimension
		represents the alternatives (or similar).
	weights : array-like, 1-d, optional
		The case weights for all cases in this analysis. If provided,
		the shape must be the same shape as `x`.
	xlabel : str, optional
		A label to use for the x-axis of the resulting figure.  If not given,
		the value of `x.name` is used if it exists.  Set to `False` to omit the
		x-axis label.
	bins : int, optional
		The number of equal-sized bins to use.
	pct_bins : int or array-like, default 20
		The number of equal-mass bins to use.
	style : {'stacked', 'dataframe', 'many'}
		The type of output to generate.
	discrete : bool, default False
		Whether to treat the data values explicitly as discrete (vs continuous)
		data.  This will change the styling and automatic bin generation.  If
		there are very few unique values, the data will be assumed to be
		discrete anyhow.
	xlim : 2-tuple, optional
		Explicitly set the range of values shown on the x axis of generated
		figures.  This can truncate long tails.  The actual histogram bins
		are not changed.
	include_nests : bool, default False
		Whether to include nests in the figure.
	exclude_alts : Collection, optional
		Alternatives to exclude from the figure.
	filter : str, optional
		A filter that will be used to select only a subset of cases.
	format : {'figure','svg'}, default 'figure'
		How to return the result if it is a figure. The default is to return
		the raw matplotlib Figure instance, ot set to `svg` to get a SVG
		rendering as an xmle.Elem.

	Returns
	-------
	Figure, DataFrame, or Elem
	"""

    if style not in {'stacked', 'dataframe', 'many'}:
        raise ValueError("style must be in {'stacked', 'dataframe', 'many'}")

    if include_nests and style == 'stacked' and exclude_alts is None:
        import warnings
        warnings.warn(
            "including nests in a stacked figure is likely to give "
            "misleading results unless constituent alternatives are omitted")

    if exclude_alts is None:
        exclude_alts = set()

    if xlabel is None:
        try:
            xlabel = x.name
        except AttributeError:
            pass

    filter_ = slice(None)

    h_pr = {}
    h_ch = {}

    discrete_values = None
    if discrete:
        discrete_values = numpy.unique(x)
    elif discrete is None:
        from .histograms import seems_like_discrete_data
        discrete, discrete_values = seems_like_discrete_data(
            x, return_uniques=True)

    pr = numpy.asarray(probability)
    if choices is not None:
        ch = numpy.asarray(choices)
    else:
        ch = None
    wt = numpy.asarray(weights)

    x_discrete_labels = None if discrete_values is None else [
        str(i) for i in discrete_values
    ]

    if bins is None:
        if isinstance(x.dtype, pandas.CategoricalDtype):
            discrete_values = numpy.arange(len(x_discrete_labels))
            bins = numpy.arange(len(x_discrete_labels) + 1)
            x = x.cat.codes
        elif isinstance(pct_bins, int):
            bins = numpy.percentile(x, numpy.linspace(0, 100, pct_bins + 1))
        else:
            bins = numpy.percentile(x, pct_bins)

    try:
        columns = probability.columns
    except AttributeError:
        columns = None
    else:
        columns = dict(enumerate(columns))

    # check for correct array shapes, raise helpful message if not compatible
    pr_w_shape = numpy.broadcast_shapes(pr[:, 0].shape, wt.shape)
    if x.shape != pr_w_shape:
        raise ValueError(f"incompatible shapes, "
                         f"x.shape={x.shape}, "
                         f"pr.shape={pr.shape}, "
                         f"wt.shape={wt.shape}, "
                         f"(pr[:,i]*wt).shape={pr_w_shape}")
    if ch is not None:
        ch_w_shape = numpy.broadcast_shapes(ch[:, 0].shape, wt.shape)
        if x.shape != ch_w_shape:
            raise ValueError(f"incompatible shapes, "
                             f"x.shape={x.shape}, "
                             f"ch.shape={ch.shape}, "
                             f"wt.shape={wt.shape}, "
                             f"(ch[:,i]*wt).shape={ch_w_shape}")

    for i in range(pr.shape[1]):

        h_pr[i], _ = numpy.histogram(
            x,
            weights=pr[:, i] * wt,
            bins=bins,
        )
        if ch is not None:
            h_ch[i], _ = numpy.histogram(
                x,
                weights=ch[:, i] * wt,
                bins=bins,
            )

    h_pr = pandas.DataFrame(h_pr)
    h_pr.index = pandas.IntervalIndex.from_breaks(bins)  # bins[:-1]
    h_pr.rename(columns=columns, inplace=True)
    _denominator, _ = numpy.histogram(
        x,
        weights=pr.sum(1) * wt,
        bins=bins,
    )
    h_pr_share = (h_pr / _denominator.reshape(-1, 1))
    if ch is not None:
        _denominator_ch, _ = numpy.histogram(
            x,
            weights=ch.sum(1) * wt,
            bins=bins,
        )
        h_ch = pandas.DataFrame(h_ch)
        h_ch.index = h_pr.index
        h_ch.rename(columns=columns, inplace=True)
        h_ch_share = (h_ch / _denominator_ch.reshape(-1, 1))
    else:
        h_ch_share = None

    if discrete:
        x_placement = numpy.arange(len(bins) - 1)
        x_alignment = 'center'
        bin_widths = 0.8
    else:
        x_placement = bins[:-1]
        x_alignment = 'edge'
        bin_widths = bins[1:] - bins[:-1]

    if xlabel is False:
        xlabel = None

    if xlim is None:
        xlim = (bins[0], bins[-1])

    if style == 'dataframe':

        if ch is not None:
            result = pandas.concat(
                {
                    'Modeled Shares': h_pr_share,
                    'Observed Shares': h_ch_share,
                },
                axis=1,
                sort=False)
        else:
            result = pandas.concat({
                'Modeled Shares': h_pr_share,
            },
                                   axis=1,
                                   sort=False)
        result['Count', '*'] = h_pr.sum(1)

        if xlabel:
            result.index.name = xlabel

    elif style == 'stacked':

        fig, (ax0, ax1) = plt.subplots(1, 2, figsize=figsize)

        bottom0 = 0
        bottom1 = 0

        for i in h_pr_share.columns:
            ax0.bar(
                x_placement,
                height=h_pr_share[i],
                bottom=bottom0,
                width=bin_widths,
                align=x_alignment,
                label=i,
            )
            bottom0 = h_pr_share[i].fillna(0).values + bottom0
            ax1.bar(
                x_placement,
                height=h_ch_share[i],
                bottom=bottom1,
                width=bin_widths,
                align=x_alignment,
            )
            bottom1 = h_ch_share[i].fillna(0).values + bottom1

        ax0.set_ylim(0, 1)
        if not discrete:
            ax0.set_xlim(*xlim)
            if xscale:
                if isinstance(xscale, str):
                    ax0.set_xscale(xscale)
                elif isinstance(xscale, dict):
                    ax0.set_xscale(**xscale)
                else:
                    raise ValueError(
                        f"xscale must be str or dict, not {type(xscale)}")
            if xmajorticks is not None:
                ax0.set_xticks(xmajorticks)
                ax0.set_xticklabels(xmajorticks)
            if xminorticks is not None:
                ax0.set_xticks(xminorticks, minor=True)
        if x_discrete_labels is not None:
            ax0.set_xticks(numpy.arange(len(x_discrete_labels)))
            ax0.set_xticklabels(x_discrete_labels)
        ax0.set_title('Modeled Shares')

        ax1.set_ylim(0, 1)
        if not discrete:
            ax1.set_xlim(*xlim)
            if xscale:
                if isinstance(xscale, str):
                    ax1.set_xscale(xscale)
                elif isinstance(xscale, dict):
                    ax1.set_xscale(**xscale)
                else:
                    raise ValueError(
                        f"xscale must be str or dict, not {type(xscale)}")
            if xmajorticks is not None:
                ax1.set_xticks(xmajorticks)
                ax1.set_xticklabels(xmajorticks)
            if xminorticks is not None:
                ax1.set_xticks(xminorticks, minor=True)
        if x_discrete_labels is not None:
            ax1.set_xticks(numpy.arange(len(x_discrete_labels)))
            ax1.set_xticklabels(x_discrete_labels)
        ax1.set_title('Observed Shares')
        if xlabel:
            ax0.set_xlabel(xlabel)
            ax1.set_xlabel(xlabel)

        fig.legend(loc='center right', )

        # fig.tight_layout(pad=0.5)
        if format == 'svg':
            result = plot_as_svg_xhtml(fig, **kwargs)
            fig.clf()
            plt.close(fig)
        elif format == 'png':
            from .png import make_png
            result = make_png(fig, **kwargs)
            fig.clf()
            plt.close(fig)
        else:
            result = fig

    else:

        fig, axes = plt.subplots(len(h_pr_share.columns), 1, figsize=figsize)

        shift = 0.4 if discrete else 0

        for n, i in enumerate(h_pr_share.columns):
            x_, y_ = pseudo_bar_data(bins - shift,
                                     h_pr_share[i],
                                     gap=0.2 if discrete else 0)
            axes[n].plot(x_, y_, label='Modeled' if n == 0 else None, lw=1.5)

            x_ch_, y_ch_ = pseudo_bar_data(bins - shift,
                                           h_ch_share[i],
                                           gap=0.2 if discrete else 0)
            axes[n].fill_between(
                x_ch_,
                y_ch_,
                label='Observed' if n == 0 else None,
                step=None,
                facecolor='#ffbe4d',
                edgecolor='#ffa200',
                lw=1.5,
            )
            if not discrete:
                axes[n].set_xlim(*xlim)
                if xscale:
                    if isinstance(xscale, str):
                        axes[n].set_xscale(xscale)
                    elif isinstance(xscale, dict):
                        axes[n].set_xscale(**xscale)
                    else:
                        raise ValueError(
                            f"xscale must be str or dict, not {type(xscale)}")
                if xmajorticks is not None:
                    axes[n].set_xticks(xmajorticks)
                    axes[n].set_xticklabels(xmajorticks)
                if xminorticks is not None:
                    axes[n].set_xticks(xminorticks, minor=True)
            if x_discrete_labels is not None:
                axes[n].set_xticks(numpy.arange(len(x_discrete_labels)))
                axes[n].set_xticklabels(x_discrete_labels)
            axes[n].set_ylabel(i)

            # axes[n].legend(
            # 	# loc='center right',
            # )

        legnd = axes[0].legend(loc='lower center',
                               ncol=2,
                               borderaxespad=0,
                               bbox_to_anchor=(0.5, 1.08))

        if xlabel:
            axes[-1].set_xlabel(xlabel)
        #fig.tight_layout(pad=0.5)
        if format == 'svg':
            result = plot_as_svg_xhtml(fig,
                                       bbox_extra_artists=[legnd],
                                       **kwargs)
            fig.clf()
            plt.close(fig)
        elif format == 'png':
            from .png import make_png
            result = make_png(fig, **kwargs)
            fig.clf()
            plt.close(fig)
        else:
            result = fig

    return result