def _matmul_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[0] == 1): if norm_rv.cov_cholesky_is_precomputed: cov_cholesky = _utils.linalg.cholesky_update( constant_rv.support.T @ norm_rv.cov_cholesky) else: cov_cholesky = None return _Normal( mean=norm_rv.mean @ constant_rv.support, cov=constant_rv.support.T @ (norm_rv.cov @ constant_rv.support), cov_cholesky=cov_cholesky, random_state=_utils.derive_random_seed(norm_rv.random_state, constant_rv.random_state), ) elif norm_rv.ndim == 2 and norm_rv.shape[0] > 1: # This part does not do the Cholesky update, # because of performance configurations: currently, there is no way of switching # the Cholesky updates off, which might affect (large, potentially sparse) covariance matrices # of matrix-variate Normal RVs. See Issue #335. cov_update = _linear_operators.Kronecker( _linear_operators.Identity(constant_rv.shape[0]), constant_rv.support) return _Normal( mean=norm_rv.mean @ constant_rv.support, cov=cov_update.T @ (norm_rv.cov @ cov_update), random_state=_utils.derive_random_seed(norm_rv.random_state, constant_rv.random_state), ) else: raise TypeError( "Currently, matrix multiplication is only supported for vector- and " "matrix-variate Gaussians.")
def _matmul_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[0] == 1): return _Normal( mean=norm_rv.mean @ constant_rv.support, cov=constant_rv.support.T @ (norm_rv.cov @ constant_rv.support), random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), ) elif norm_rv.ndim == 2 and norm_rv.shape[0] > 1: cov_update = _linear_operators.Kronecker( _linear_operators.Identity(constant_rv.shape[0]), constant_rv.support ) return _Normal( mean=norm_rv.mean @ constant_rv.support, cov=cov_update.T @ (norm_rv.cov @ cov_update), random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), ) else: raise TypeError( "Currently, matrix multiplication is only supported for vector- and " "matrix-variate Gaussians." )
def _sub_dirac_normal(dirac_rv: _Dirac, norm_rv: _Normal) -> _Normal: return _Normal( mean=dirac_rv.support - norm_rv.mean, cov=norm_rv.cov, random_state=_utils.derive_random_seed(dirac_rv.random_state, norm_rv.random_state), )
def transpose(self, *axes: int) -> "RandomVariable": """ Transpose the random variable. Parameters ---------- axes : None, tuple of ints, or n ints See documentation of numpy.ndarray.transpose. Returns ------- transposed_rv : The transposed random variable. """ return RandomVariable( shape=np.empty(shape=self.shape).transpose(*axes).shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).transpose(*axes), mode=lambda: self.mode.transpose(*axes), median=lambda: self.median.transpose(*axes), mean=lambda: self.mean.transpose(*axes), cov=lambda: self.cov, var=lambda: self.var.transpose(*axes), std=lambda: self.std.transpose(*axes), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )
def reshape(self, newshape: ShapeArgType) -> "RandomVariable": """Give a new shape to a random variable. Parameters ---------- newshape : New shape for the random variable. It must be compatible with the original shape. """ newshape = _utils.as_shape(newshape) return RandomVariable( shape=newshape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size).reshape(size + newshape), mode=lambda: self.mode.reshape(newshape), median=lambda: self.median.reshape(newshape), mean=lambda: self.mean.reshape(newshape), cov=lambda: self.cov, var=lambda: self.var.reshape(newshape), std=lambda: self.std.reshape(newshape), entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )
def __abs__(self) -> "RandomVariable": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: abs(self.sample(size=size)), )
def _sub_normal_dirac(norm_rv: _Normal, dirac_rv: _Dirac) -> _Normal: return _Normal( mean=norm_rv.mean - dirac_rv.support, cov=norm_rv.cov, random_state=_utils.derive_random_seed(norm_rv.random_state, dirac_rv.random_state), )
def _sub_constant_normal(constant_rv: _Constant, norm_rv: _Normal) -> _Normal: return _Normal( mean=constant_rv.support - norm_rv.mean, cov=norm_rv.cov, random_state=_utils.derive_random_seed( constant_rv.random_state, norm_rv.random_state ), )
def _sub_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: return _Normal( mean=norm_rv.mean - constant_rv.support, cov=norm_rv.cov, random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), )
def _sub_constant_normal(constant_rv: _Constant, norm_rv: _Normal) -> _Normal: cov_cholesky = norm_rv.cov_cholesky if norm_rv.cov_cholesky_is_precomputed else None return _Normal( mean=constant_rv.support - norm_rv.mean, cov=norm_rv.cov, cov_cholesky=cov_cholesky, random_state=_utils.derive_random_seed(constant_rv.random_state, norm_rv.random_state), )
def _add_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: cov_cholesky = norm_rv.cov_cholesky if norm_rv.cov_cholesky_is_precomputed else None return _Normal( mean=norm_rv.mean + constant_rv.support, cov=norm_rv.cov, cov_cholesky=cov_cholesky, random_state=_utils.derive_random_seed(norm_rv.random_state, constant_rv.random_state), )
def _dirac_binary_operator(dirac_rv1: Dirac, dirac_rv2: Dirac) -> Dirac: return Dirac( support=operator(dirac_rv1.support, dirac_rv2.support), random_state=_utils.derive_random_seed( dirac_rv1.random_state, dirac_rv2.random_state, ), )
def _constant_rv_binary_operator(constant_rv1: Constant, constant_rv2: Constant) -> Constant: return Constant( support=operator(constant_rv1.support, constant_rv2.support), random_state=_utils.derive_random_seed( constant_rv1.random_state, constant_rv2.random_state, ), )
def _mul_normal_dirac( norm_rv: _Normal, dirac_rv: _Dirac ) -> Union[_Normal, _Dirac, type(NotImplemented)]: if dirac_rv.size == 1: if dirac_rv.support == 0: return _Dirac( support=np.zeros_like(norm_rv.mean), random_state=_utils.derive_random_seed(norm_rv.random_state, dirac_rv.random_state), ) else: return _Normal( mean=dirac_rv.support * norm_rv.mean, cov=(dirac_rv.support**2) * norm_rv.cov, random_state=_utils.derive_random_seed(norm_rv.random_state, dirac_rv.random_state), ) return NotImplemented
def _rv_binary_op(rv1: _RandomVariable, rv2: _RandomVariable) -> _RandomVariable: shape, dtype, sample = _make_rv_binary_op_result_shape_dtype_sample_fn( op_fn, rv1, rv2 ) return _RandomVariable( shape=shape, dtype=dtype, random_state=_utils.derive_random_seed(rv1.random_state, rv2.random_state), sample=sample, )
def _matmul_dirac_normal(dirac_rv: _Dirac, norm_rv: _Normal) -> _Normal: if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[1] == 1): return _Normal( mean=dirac_rv.support @ norm_rv.mean, cov=dirac_rv.support @ (norm_rv.cov @ dirac_rv.support.T), random_state=_utils.derive_random_seed(dirac_rv.random_state, norm_rv.random_state), ) else: raise TypeError( "Currently, matrix multiplication is only supported for vector-variate " "Gaussians.")
def _sub_normal(self, other: "Normal") -> "Normal": if other.shape != self.shape: raise ValueError( "Subtraction of two normally distributed random variables is only " "possible if both operands have the same shape.") return Normal( mean=self._mean - other._mean, cov=self._cov + other._cov, random_state=_utils.derive_random_seed(self.random_state, other.random_state), )
def _mul_normal_constant( norm_rv: _Normal, constant_rv: _Constant ) -> Union[_Normal, _Constant, type(NotImplemented)]: if constant_rv.size == 1: if constant_rv.support == 0: return _Constant( support=np.zeros_like(norm_rv.mean), random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), ) else: return _Normal( mean=constant_rv.support * norm_rv.mean, cov=(constant_rv.support ** 2) * norm_rv.cov, random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), ) return NotImplemented
def _generic_rv_add(rv1: _RandomVariable, rv2: _RandomVariable) -> _RandomVariable: shape, dtype, sample = _make_rv_binary_op_result_shape_dtype_sample_fn( operator.add, rv1, rv2 ) return _RandomVariable( shape=shape, dtype=dtype, random_state=_utils.derive_random_seed(rv1.random_state, rv2.random_state), sample=sample, mean=lambda: rv1.mean + rv2.mean, )
def __getitem__(self, key: ArrayLikeGetitemArgType) -> "RandomVariable": return RandomVariable( shape=np.empty(shape=self.shape)[key].shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: self.sample(size)[key], mode=lambda: self.mode[key], mean=lambda: self.mean[key], var=lambda: self.var[key], std=lambda: self.std[key], entropy=lambda: self.entropy, as_value_type=self.__as_value_type, )
def _truediv_normal_dirac(norm_rv: _Normal, dirac_rv: _Dirac) -> _Normal: if dirac_rv.size == 1: if dirac_rv.support == 0: raise ZeroDivisionError return _Normal( mean=norm_rv.mean / dirac_rv.support, cov=norm_rv.cov / (dirac_rv.support**2), random_state=_utils.derive_random_seed(norm_rv.random_state, dirac_rv.random_state), ) return NotImplemented
def _truediv_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: if constant_rv.size == 1: if constant_rv.support == 0: raise ZeroDivisionError return _Normal( mean=norm_rv.mean / constant_rv.support, cov=norm_rv.cov / (constant_rv.support ** 2), random_state=_utils.derive_random_seed( norm_rv.random_state, constant_rv.random_state ), ) return NotImplemented
def __pos__(self) -> "RandomVariable": return RandomVariable( shape=self.shape, dtype=self.dtype, random_state=_utils.derive_random_seed(self.random_state), sample=lambda size: +self.sample(size=size), in_support=lambda x: self.in_support(+x), mode=lambda: +self.mode, median=lambda: +self.median, mean=lambda: +self.mean, cov=lambda: self.cov, var=lambda: self.var, std=lambda: self.std, as_value_type=self.__as_value_type, )
def reshape(self, newshape: ShapeArgType) -> "Normal": try: reshaped_mean = self.dense_mean.reshape(newshape) except ValueError as exc: raise ValueError( f"Cannot reshape this normal random variable to the given shape: " f"{newshape}") from exc reshaped_cov = self.dense_cov if reshaped_mean.ndim > 0 and reshaped_cov.ndim == 0: reshaped_cov = reshaped_cov.reshape(1, 1) return Normal( mean=reshaped_mean, cov=reshaped_cov, random_state=_utils.derive_random_seed(self.random_state), )
def _matmul_constant_normal(constant_rv: _Constant, norm_rv: _Normal) -> _Normal: if norm_rv.ndim == 1 or (norm_rv.ndim == 2 and norm_rv.shape[1] == 1): if norm_rv.cov_cholesky_is_precomputed: cov_cholesky = _utils.linalg.cholesky_update( constant_rv.support @ norm_rv.cov_cholesky) else: cov_cholesky = None return _Normal( mean=constant_rv.support @ norm_rv.mean, cov=constant_rv.support @ (norm_rv.cov @ constant_rv.support.T), cov_cholesky=cov_cholesky, random_state=_utils.derive_random_seed(constant_rv.random_state, norm_rv.random_state), ) else: raise TypeError( "Currently, matrix multiplication is only supported for vector-variate " "Gaussians.")
def _truediv_normal_constant(norm_rv: _Normal, constant_rv: _Constant) -> _Normal: if constant_rv.size == 1: if constant_rv.support == 0: raise ZeroDivisionError if norm_rv.cov_cholesky_is_precomputed: cov_cholesky = norm_rv.cov_cholesky / constant_rv.support else: cov_cholesky = None return _Normal( mean=norm_rv.mean / constant_rv.support, cov=norm_rv.cov / (constant_rv.support**2), cov_cholesky=cov_cholesky, random_state=_utils.derive_random_seed(norm_rv.random_state, constant_rv.random_state), ) return NotImplemented
def transpose(self, *axes: int) -> "Normal": if len(axes) == 1 and isinstance(axes[0], tuple): axes = axes[0] elif (len(axes) == 1 and axes[0] is None) or len(axes) == 0: axes = tuple(reversed(range(self.ndim))) mean_t = self.dense_mean.transpose(*axes).copy() # Transpose covariance cov_axes = axes + tuple(mean_t.ndim + axis for axis in axes) cov_t = self.dense_cov.reshape(self.shape + self.shape) cov_t = cov_t.transpose(*cov_axes).copy() if mean_t.ndim > 0: cov_t = cov_t.reshape(mean_t.size, mean_t.size) return Normal( mean=mean_t, cov=cov_t, random_state=_utils.derive_random_seed(self.random_state), )
def __getitem__(self, key: ArrayLikeGetitemArgType) -> "Normal": """ Marginalization in multi- and matrixvariate normal distributions, expressed by means of (advanced) indexing, masking and slicing. We support all modes of array indexing presented in https://numpy.org/doc/1.19/reference/arrays.indexing.html. Note that, currently, this method does not work for normal distributions other than the multi- and matrixvariate versions. Parameters ---------- key : int or slice or ndarray or tuple of None, int, slice, or ndarray Indices, slice objects and/or boolean masks specifying which entries to keep while marginalizing over all other entries. """ if not isinstance(key, tuple): key = (key, ) # Select entries from mean mean = self.dense_mean[key] # Select submatrix from covariance matrix cov = self.dense_cov.reshape(self.shape + self.shape) cov = cov[key][tuple([slice(None)] * mean.ndim) + key] if mean.ndim > 0: cov = cov.reshape(mean.size, mean.size) return Normal( mean=mean, cov=cov, random_state=_utils.derive_random_seed(self.random_state), )
def __pos__(self) -> "Normal": return Normal( mean=+self._mean, cov=self._cov, random_state=_utils.derive_random_seed(self.random_state), )
def __abs__(self) -> "Constant": return Constant( support=abs(self.support), random_state=_utils.derive_random_seed(self.random_state), )