Example #1
0
 def __init__(
     self,
     input_dim: IntArgType,
     output_dim: IntArgType = 1,
 ):
     self._input_dim = np.int_(_utils.as_numpy_scalar(input_dim))
     self._output_dim = np.int_(_utils.as_numpy_scalar(output_dim))
Example #2
0
def _apply(
    op_registry: _BinaryOperatorRegistryType,
    op1: LinearOperator,
    op2: LinearOperator,
    fallback_operator: Optional[
        Callable[
            [LinearOperator, LinearOperator],
            Union[LinearOperator, NotImplementedType],
        ]
    ] = None,
) -> Union[LinearOperator, NotImplementedType]:
    if np.ndim(op1) == 0:
        key1 = np.number
        op1 = utils.as_numpy_scalar(op1)
    else:
        key1 = type(op1)

    if np.ndim(op2) == 0:
        key2 = np.number
        op2 = utils.as_numpy_scalar(op2)
    else:
        key2 = type(op2)

    key = (key1, key2)

    if key in op_registry:
        res = op_registry[key](op1, op2)
    else:
        res = NotImplemented

    if res is NotImplemented and fallback_operator is not None:
        res = fallback_operator(op1, op2)

    return res
Example #3
0
 def __init__(
     self,
     input_dim: IntArgType,
     output_dim: IntArgType,
     dtype: DTypeArgType,
 ):
     self._input_dim = np.int_(_utils.as_numpy_scalar(input_dim))
     self._output_dim = np.int_(_utils.as_numpy_scalar(output_dim))
     self._dtype = np.dtype(dtype)
Example #4
0
 def __init__(
     self,
     input_shape: ShapeLike,
     constant: ScalarLike = 0.0,
     exponent: IntLike = 1.0,
 ):
     self.constant = _utils.as_numpy_scalar(constant)
     self.exponent = _utils.as_numpy_scalar(exponent)
     super().__init__(input_shape=input_shape)
Example #5
0
 def __init__(
     self,
     input_dim: IntArgType,
     constant: ScalarArgType = 0.0,
     exponent: IntArgType = 1.0,
 ):
     self.constant = _utils.as_numpy_scalar(constant)
     self.exponent = _utils.as_numpy_scalar(exponent)
     super().__init__(input_dim=input_dim, output_dim=1)
Example #6
0
 def __init__(
     self,
     input_shape: ShapeLike,
     lengthscale: ScalarLike = 1.0,
     alpha: ScalarLike = 1.0,
 ):
     self.lengthscale = _utils.as_numpy_scalar(lengthscale)
     self.alpha = _utils.as_numpy_scalar(alpha)
     if not self.alpha > 0:
         raise ValueError(f"Scale mixture alpha={self.alpha} must be positive.")
     super().__init__(input_shape=input_shape)
 def __init__(
     self,
     input_dim: IntArgType,
     lengthscale: ScalarArgType = 1.0,
     alpha: ScalarArgType = 1.0,
 ):
     self.lengthscale = _utils.as_numpy_scalar(lengthscale)
     self.alpha = _utils.as_numpy_scalar(alpha)
     if not self.alpha > 0:
         raise ValueError(
             f"Scale mixture alpha={self.alpha} must be positive.")
     super().__init__(input_dim=input_dim, output_dim=1)
Example #8
0
    def __init__(
        self,
        input_dim: IntArgType,
        lengthscale: ScalarArgType = 1.0,
        nu: ScalarArgType = 1.5,
    ):
        self.lengthscale = _utils.as_numpy_scalar(lengthscale)
        if not self.lengthscale > 0:
            raise ValueError(
                f"Lengthscale l={self.lengthscale} must be positive.")
        self.nu = _utils.as_numpy_scalar(nu)
        if not self.nu > 0:
            raise ValueError(f"Hyperparameter nu={self.nu} must be positive.")

        super().__init__(input_dim=input_dim)
Example #9
0
    def __init__(
        self,
        support: _ValueType,
    ):
        if np.isscalar(support):
            support = _utils.as_numpy_scalar(support)

        self._support = support

        support_floating = self._support.astype(
            np.promote_types(self._support.dtype, np.float_))

        if config.matrix_free:
            cov = lambda: (linops.Scaling(
                0.0,
                shape=(self._support.size, self._support.size),
                dtype=support_floating.dtype,
            ) if self._support.ndim > 0 else _utils.as_numpy_scalar(
                0.0, support_floating.dtype))
        else:
            cov = lambda: np.broadcast_to(
                _utils.as_numpy_scalar(0.0, support_floating.dtype),
                shape=((self._support.size, self._support.size)
                       if self._support.ndim > 0 else ()),
            )

        var = lambda: np.broadcast_to(
            _utils.as_numpy_scalar(0.0, support_floating.dtype),
            shape=self._support.shape,
        )

        super().__init__(
            shape=self._support.shape,
            dtype=self._support.dtype,
            parameters={"support": self._support},
            sample=self._sample,
            in_support=lambda x: np.all(x == self._support),
            pmf=lambda x: np.float_(1.0
                                    if np.all(x == self._support) else 0.0),
            cdf=lambda x: np.float_(1.0
                                    if np.all(x >= self._support) else 0.0),
            mode=lambda: self._support,
            median=lambda: support_floating,
            mean=lambda: support_floating,
            cov=cov,
            var=var,
            std=var,
        )
Example #10
0
    def __init__(
        self,
        support: _ValueType,
        random_state: RandomStateArgType = None,
    ):
        if np.isscalar(support):
            support = _utils.as_numpy_scalar(support)

        self._support = support

        support_floating = self._support.astype(
            np.promote_types(self._support.dtype, np.float_))

        super().__init__(
            shape=self._support.shape,
            dtype=self._support.dtype,
            random_state=random_state,
            parameters={"support": self._support},
            sample=self._sample,
            in_support=lambda x: np.all(x == self._support),
            pmf=lambda x: np.float_(1.0
                                    if np.all(x == self._support) else 0.0),
            cdf=lambda x: np.float_(1.0
                                    if np.all(x >= self._support) else 0.0),
            mode=lambda: self._support,
            median=lambda: support_floating,
            mean=lambda: support_floating,
            cov=lambda: np.zeros_like(  # pylint: disable=unexpected-keyword-arg
                support_floating,
                shape=((self._support.size, self._support.size)
                       if self._support.ndim > 0 else ()),
            ),
            var=lambda: np.zeros_like(support_floating),
        )
Example #11
0
    def _ensure_numpy_float(
            cls,
            name: str,
            value: Any,
            force_scalar: bool = False) -> Union[np.float_, np.ndarray]:
        if np.isscalar(value):
            if not isinstance(value, np.float_):
                try:
                    value = _utils.as_numpy_scalar(value, dtype=np.float_)
                except TypeError as err:
                    raise TypeError(
                        f"The function `{name}` specified via the constructor of "
                        f"`{cls.__name__}` must return a scalar value that can be "
                        f"converted to a `np.float_`, which is not possible for "
                        f"{value} of type {type(value)}.") from err
        elif not force_scalar:
            try:
                value = np.asarray(value, dtype=np.float_)
            except TypeError as err:
                raise TypeError(
                    f"The function `{name}` specified via the constructor of "
                    f"`{cls.__name__}` must return a value that can be converted "
                    f"to a `np.ndarray` of type `np.float_`, which is not possible "
                    f"for {value} of type {type(value)}.") from err
        else:
            raise TypeError(
                f"The function `{name}` specified via the constructor of "
                f"`{cls.__name__}` must return a scalar value, but {value} of type "
                f"{type(value)} is not scalar.")

        assert isinstance(value, (np.float_, np.ndarray))

        return value
Example #12
0
    def _wrapper(*args, **kwargs):
        res = fun(*args, **kwargs)

        if np.isscalar(res):
            return _utils.as_numpy_scalar(res, dtype=dtype)

        return np.asarray(res, dtype=dtype)
Example #13
0
 def _dense_entropy(self) -> np.float_:
     return _utils.as_numpy_scalar(
         scipy.stats.multivariate_normal.entropy(
             mean=self.dense_mean.ravel(),
             cov=self.dense_cov,
         ),
         dtype=np.float_,
     )
Example #14
0
    def __init__(self, input_shape: ShapeLike, sigma_sq: ScalarLike = 1.0):

        if sigma_sq < 0:
            raise ValueError(
                f"Noise level sigma_sq={sigma_sq} must be non-negative.")

        self.sigma_sq = _utils.as_numpy_scalar(sigma_sq)

        super().__init__(input_shape=input_shape)
Example #15
0
    def __init__(self, kernel: Kernel, scalar: ScalarLike):

        if not isinstance(kernel, Kernel):
            raise TypeError("`kernel` must be a `Kernel`")

        if np.ndim(scalar) != 0:
            raise TypeError("`scalar` must be a scalar.")

        self._kernel = kernel
        self._scalar = utils.as_numpy_scalar(scalar)

        super().__init__(input_shape=kernel.input_shape,
                         output_shape=kernel.output_shape)
Example #16
0
    def _univariate_sample(
        self, size: ShapeType = ()) -> Union[np.floating, np.ndarray]:
        sample = scipy.stats.norm.rvs(loc=self._mean,
                                      scale=self.std,
                                      size=size,
                                      random_state=self.random_state)

        if np.isscalar(sample):
            sample = _utils.as_numpy_scalar(sample, dtype=self.dtype)
        else:
            sample = sample.astype(self.dtype)

        assert sample.shape == size

        return sample
Example #17
0
    def quantile(self, p: FloatArgType) -> _ValueType:
        """Quantile function.

        The quantile function :math:`Q \\colon [0, 1] \\to \\mathbb{R}` of a random
        variable :math:`X` is defined as
        :math:`Q(p) = \\inf\\{ x \\in \\mathbb{R} \\colon p \\le F_X(x) \\}`, where
        :math:`F_X \\colon \\mathbb{R} \\to [0, 1]` is the :meth:`cdf` of the random
        variable. From the definition it follows that the quantile function always
        returns values of the same dtype as the random variable. For instance, for a
        discrete distribution over the integers, the returned quantiles will also be
        integers. This means that, in general, :math:`Q(0.5)` is not equal to the
        :attr:`median` as it is defined in this class. See
        https://en.wikipedia.org/wiki/Quantile_function for more details and examples.
        """
        if self.__shape != ():
            raise NotImplementedError(
                "The quantile function is only defined for scalar random variables."
            )

        if self.__quantile is None:
            raise NotImplementedError

        try:
            p = _utils.as_numpy_scalar(p, dtype=np.floating)
        except TypeError as exc:
            raise TypeError(
                "The given argument `p` can not be cast to a `np.floating` object."
            ) from exc

        quantile = self.__quantile(p)

        if quantile.shape != self.__shape:
            raise ValueError(
                f"The quantile function should return values of the same shape as the "
                f"random variable, i.e. {self.__shape}, but it returned a value with "
                f"{quantile.shape}."
            )

        if quantile.dtype != self.__dtype:
            raise ValueError(
                f"The quantile function should return values of the same dtype as the "
                f"random variable, i.e. `{self.__dtype.name}`, but it returned a value "
                f"with dtype `{quantile.dtype.name}`."
            )

        return quantile
Example #18
0
def test_kernel_matrix_against_naive(kernel: kernels.Kernel,
                                     kernmat: np.ndarray, x0: np.ndarray,
                                     x1: np.ndarray):
    """Test the computation of the kernel matrix against a naive computation."""
    if x1 is None:
        x1 = x0
    np.testing.assert_allclose(
        kernmat,
        scipy.spatial.distance.cdist(
            x0,
            x1,
            metric=lambda x0, x1, k=kernel: _utils.as_numpy_scalar(
                k(x0, x1).item()),
        ),
        rtol=10**-12,
        atol=10**-12,
    )
Example #19
0
def function_evaluation(fun: Callable[[FloatArgType], FloatArgType],
                        action: FloatArgType) -> np.float_:
    """Observe a (noisy) function evaluation of the quadratic objective.

    Parameters
    ----------
    fun :
        Quadratic objective function to optimize.
    action :
        Input to the objective function.
    """
    observation = fun(action)
    try:
        return utils.as_numpy_scalar(observation, dtype=np.floating)
    except TypeError as exc:
        raise TypeError(
            "The given argument `p` can not be cast to a `np.floating` object."
        ) from exc
Example #20
0
    def _reshape_kernelmatrix(kerneval: np.ndarray,
                              newshape: ShapeArgType) -> np.ndarray:
        """Reshape the evaluation of the covariance function.

        Reshape the given evaluation of the covariance function to the correct shape,
        determined by the inputs x0 and x1. This method is designed to be called by
        subclasses of :class:`Kernel` in their :meth:`__call__` function to ensure
        the returned quantity has the correct shape independent of the implementation of
        the kernel.

        Parameters:
        -----------
        kerneval
            Covariance function evaluated at ``x0`` and ``x1``.
        newshape :
            New shape of the evaluation of the covariance function.
        """
        if newshape[0] == 0:
            return _utils.as_numpy_scalar(kerneval.squeeze())
        else:
            return kerneval.reshape(newshape)
Example #21
0
 def __init__(self, input_dim: IntArgType, constant: ScalarArgType = 0.0):
     self.constant = _utils.as_numpy_scalar(constant)
     super().__init__(input_dim=input_dim)
Example #22
0
    def __init__(
        self,
        mean: Union[float, np.floating, np.ndarray, linops.LinearOperator],
        cov: Union[float, np.floating, np.ndarray, linops.LinearOperator],
        cov_cholesky: Optional[Union[float, np.floating, np.ndarray,
                                     linops.LinearOperator]] = None,
        random_state: RandomStateArgType = None,
    ):
        # Type normalization
        if np.isscalar(mean):
            mean = _utils.as_numpy_scalar(mean)

        if np.isscalar(cov):
            cov = _utils.as_numpy_scalar(cov)

        if np.isscalar(cov_cholesky):
            cov_cholesky = _utils.as_numpy_scalar(cov_cholesky)

        # Data type normalization
        dtype = np.promote_types(mean.dtype, cov.dtype)

        if not np.issubdtype(dtype, np.floating):
            dtype = np.dtype(np.double)

        mean = mean.astype(dtype, order="C", casting="safe", copy=False)
        cov = cov.astype(dtype, order="C", casting="safe", copy=False)

        # Shape checking
        if not 0 <= mean.ndim <= 2:
            raise ValueError(
                f"Gaussian random variables must either be scalars, vectors, or "
                f"matrices (or linear operators), but the given mean is a {mean.ndim}-"
                f"dimensional tensor.")

        expected_cov_shape = (np.prod(mean.shape), ) * 2 if len(
            mean.shape) > 0 else ()

        if cov.shape != expected_cov_shape:
            raise ValueError(
                f"The covariance matrix must be of shape {expected_cov_shape}, but "
                f"shape {cov.shape} was given.")

        self._mean = mean
        self._cov = cov

        self._compute_cov_cholesky: Callable[[], _ValueType] = None
        self._cov_cholesky = cov_cholesky  # recall: None if not provided

        # Method selection
        univariate = mean.ndim == 0
        dense = isinstance(mean, np.ndarray) and isinstance(cov, np.ndarray)
        cov_operator = isinstance(cov, linops.LinearOperator)

        if univariate:
            # Univariate Gaussian
            sample = self._univariate_sample
            in_support = Normal._univariate_in_support
            pdf = self._univariate_pdf
            logpdf = self._univariate_logpdf
            cdf = self._univariate_cdf
            logcdf = self._univariate_logcdf
            quantile = self._univariate_quantile

            median = lambda: self._mean
            var = lambda: self._cov
            entropy = self._univariate_entropy

            self._compute_cov_cholesky = self._univariate_cov_cholesky

        elif dense or cov_operator:
            # Multi- and matrixvariate Gaussians
            sample = self._dense_sample
            in_support = Normal._dense_in_support
            pdf = self._dense_pdf
            logpdf = self._dense_logpdf
            cdf = self._dense_cdf
            logcdf = self._dense_logcdf
            quantile = None

            median = None
            var = self._dense_var
            entropy = self._dense_entropy

            self._compute_cov_cholesky = self.dense_cov_cholesky

            # Ensure that the Cholesky factor has the same type as the covariance,
            # and, if necessary, promote data types. Check for (in this order): type, shape, dtype.
            if self._cov_cholesky is not None:

                if not isinstance(self._cov_cholesky, type(self._cov)):
                    raise TypeError(
                        f"The covariance matrix is of type `{type(self._cov)}`, so its "
                        f"Cholesky decomposition must be of the same type, but an "
                        f"object of type `{type(self._cov_cholesky)}` was given."
                    )

                if self._cov_cholesky.shape != self._cov.shape:
                    raise ValueError(
                        f"The cholesky decomposition of the covariance matrix must "
                        f"have the same shape as the covariance matrix, i.e. "
                        f"{self._cov.shape}, but shape {self._cov_cholesky.shape} was given"
                    )

                if self._cov_cholesky.dtype != self._cov.dtype:
                    self._cov_cholesky = self._cov_cholesky.astype(
                        self._cov.dtype, casting="safe", copy=False)

            if isinstance(cov, linops.SymmetricKronecker):
                m, n = mean.shape

                if m != n or n != cov.A.shape[0] or n != cov.B.shape[1]:
                    raise ValueError(
                        "Normal distributions with symmetric Kronecker structured "
                        "kernels must have square mean and square kernels factors with "
                        "matching dimensions.")

                if cov.identical_factors:
                    sample = self._symmetric_kronecker_identical_factors_sample

                    # pylint: disable=redefined-variable-type
                    self._compute_cov_cholesky = (
                        self.
                        _symmetric_kronecker_identical_factors_cov_cholesky)
            elif isinstance(cov, linops.Kronecker):
                m, n = mean.shape

                if (m != cov.A.shape[0] or m != cov.A.shape[1]
                        or n != cov.B.shape[0] or n != cov.B.shape[1]):
                    raise ValueError(
                        "Kronecker structured kernels must have factors with the same "
                        "shape as the mean.")

                self._compute_cov_cholesky = self._kronecker_cov_cholesky

        else:
            raise ValueError(
                f"Cannot instantiate normal distribution with mean of type "
                f"{mean.__class__.__name__} and kernels of type "
                f"{cov.__class__.__name__}.")

        super().__init__(
            shape=mean.shape,
            dtype=mean.dtype,
            random_state=random_state,
            parameters={
                "mean": self._mean,
                "cov": self._cov
            },
            sample=sample,
            in_support=in_support,
            pdf=pdf,
            logpdf=logpdf,
            cdf=cdf,
            logcdf=logcdf,
            quantile=quantile,
            mode=lambda: self._mean,
            median=median,
            mean=lambda: self._mean,
            cov=lambda: self._cov,
            var=var,
            entropy=entropy,
        )
Example #23
0
 def __init__(self, input_dim: IntArgType, sigma: ScalarArgType = 1.0):
     self.sigma = _utils.as_numpy_scalar(sigma)
     super().__init__(input_dim=input_dim, output_dim=1)
Example #24
0
 def _univariate_entropy(self: _ValueType) -> np.float_:
     return _utils.as_numpy_scalar(
         scipy.stats.norm.entropy(loc=self._mean, scale=self.std),
         dtype=np.float_,
     )
 def __init__(self, input_shape: ShapeLike, lengthscale: ScalarLike = 1.0):
     self.lengthscale = _utils.as_numpy_scalar(lengthscale)
     super().__init__(input_shape=input_shape)
Example #26
0
 def expand_array(x, ndim):
     return np.full((ndim, ), _utils.as_numpy_scalar(x))
Example #27
0
def test_as_numpy_scalar_scalar_is_good(scalar):
    """All sorts of scalars are transformed into a np.generic."""
    as_scalar = pnut.as_numpy_scalar(scalar)
    assert isinstance(as_scalar, np.generic)
    np.testing.assert_allclose(as_scalar, scalar, atol=0.0, rtol=1e-12)
Example #28
0
def test_as_numpy_scalar_bad_sequence_is_bad(sequence):
    """Sequence types give rise to ValueErrors in `as_numpy_scalar`."""
    with pytest.raises(ValueError):
        pnut.as_numpy_scalar(sequence)
Example #29
0
 def __init__(self, input_shape: ShapeLike, constant: ScalarLike = 0.0):
     self.constant = _utils.as_numpy_scalar(constant)
     super().__init__(input_shape=input_shape)
 def __init__(self,
              input_dim: IntArgType,
              lengthscale: ScalarArgType = 1.0):
     self.lengthscale = _utils.as_numpy_scalar(lengthscale)
     super().__init__(input_dim=input_dim, output_dim=1)