def cov(self, relative=False) -> np.ndarray: """ Return covariance matrix :math:`\\mathrm{Cov}(d^{(n)}_i, d^{(n)}_j)` If no errors have been added, a zero matrix is returned. Args: relative: "Relative to data", i.e. :math:`\\mathrm{Cov}(d^{(n)}_i, d^{(n)}_j) / (d^{(n)}_i \\cdot d^{(n)}_j)` Returns: ``self.n x self.nbins x self.nbins`` array """ data = self.data() cov = np.tile(self.abs_cov, (self.n, 1, 1)) cov += np.einsum("ij,ki,kj->kij", self.rel_cov, data, data) if self.poisson_errors: cov += corr2cov( np.tile(np.eye(self.nbins), (self.n, 1, 1)), # Normal poisson errors are sqrt(data_i). What happens if # data is normalized from N to N', i.e. # Sum(data_normalized) = N'? # Let N/N' = scale # Then the errors should be # sqrt(data) / scale = sqrt(data/scale) / sqrt(scale) = # = sqrt(data_normalized) / sqrt(scale) # Hence: np.sqrt(data) / np.sqrt(self.poisson_errors_scale), ) if not relative: return cov else: return abs2rel_cov(cov, data)
def add_rel_err_corr(self, err, corr) -> None: """ Add error from relative errors and correlation matrix. Args: err: see argument of :py:meth:`.add_err_corr` corr: see argument of :py:meth:`.add_err_corr` """ err = self._interpret_input(err, "err") corr = self._interpret_input(corr, "corr") self.add_rel_err_cov(corr2cov(corr, err))
def add_err_corr(self, err, corr) -> None: """ Add error from errors vector and correlation matrix. Args: err: ``self.n x self.nbins`` vector of errors for each data point and bin or self.nbins vector of uniform errors per data point or float (uniform error per bin and datapoint) corr: ``self.n x self.nbins x self.nbins`` correlation matrices or ``self.nbins x self.nbins`` correlation matrix """ err = self._interpret_input(err, "err") corr = self._interpret_input(corr, "corr") self.add_err_cov(corr2cov(corr, err))
def test_corr2cov(self): for dataset, data in self.data.items(): with self.subTest(dataset=dataset): self.assertAllClose( corr2cov(data["corr"], data["err"]), data["cov"] )