def test_independent_uniform(self) -> None:
        # We have to use independent random numbers instead of linspace,
        # because the algorithm has trouble with evenly spaced values
        rng = np.random.default_rng(1)
        x = rng.uniform(0.0, 1.0, 1024)
        y = rng.uniform(0.0, 1.0, 1024)

        actual = _estimate_single_mi(x, y, k=8)
        actual2 = _estimate_single_mi(y, x, k=8)
        self.assertAlmostEqual(actual, 0, delta=0.04)
        self.assertAlmostEqual(actual, actual2, delta=0.00001)
    def test_bivariate_gaussian(self) -> None:
        cases = [
            (0, 40, 3, 0.1),
            (0, 200, 3, 0.06),
            (0, 2000, 3, 0.005),
            (0, 2000, 5, 0.006),
            (0, 2000, 20, 0.003),
            (0.5, 200, 3, 0.05),
            (0.5, 200, 5, 0.02),
            (0.5, 2000, 3, 0.02),
            (-0.9, 200, 3, 0.05),
            (-0.9, 2000, 3, 0.05),
            (-0.9, 2000, 5, 0.02),
        ]
        for (rho, n, k, delta) in cases:
            with self.subTest(rho=rho, n=n, k=k):
                rng = np.random.default_rng(0)
                cov = np.array([[1, rho], [rho, 1]])

                data = rng.multivariate_normal([0, 0], cov, size=n)
                x = data[:, 0]
                y = data[:, 1]

                actual = _estimate_single_mi(x, y, k=k)
                expected = -0.5 * log(1 - rho**2)
                self.assertAlmostEqual(actual, expected, delta=delta)
    def test_independent_transformed_uniform(self) -> None:
        # Very non-uniform density, but MI should still be zero
        rng = np.random.default_rng(1)
        x = rng.uniform(0.0, 10.0, 1024)
        y = np.exp(rng.uniform(0.0, 1.0, 1024))

        actual = _estimate_single_mi(x, y, k=8)
        self.assertAlmostEqual(actual, 0, delta=0.02)
    def test_gamma_exponential(self) -> None:
        # Kraskov et al. mention that this distribution is hard to estimate
        # without logarithming the values.
        # The analytical result is due to doi:10.1109/18.825848.
        #
        # x1      ~ Gamma(rate, shape)
        # x2 | x1 ~ Exp(t * x1)
        rng = np.random.default_rng(2)
        r = 1.2
        s = 3.4
        t = 0.56

        x1 = rng.gamma(shape=s, scale=1 / r, size=1000)
        x2 = rng.exponential(x1 * t)

        raw = _estimate_single_mi(x1, x2)
        trans = _estimate_single_mi(np.log(x1), np.log(x2))

        expected = psi(s) - np.log(s) + 1 / s
        self.assertAlmostEqual(raw, expected, delta=0.04)
        self.assertAlmostEqual(trans, expected, delta=0.005)
    def test_sum_of_exponentials(self) -> None:
        # We define X ~ Exp(1), W ~ Exp(2) and Y = X + W.
        # Now by arXiv:1609.02911, Y has known, closed-form entropy.
        cases = [(1, 2), (0.2, 0.3), (3, 3.1)]
        for (a, b) in cases:
            with self.subTest(a=a, b=b):
                rng = np.random.default_rng(20200302)
                x = rng.exponential(1 / a, 1000)
                w = rng.exponential(1 / b, 1000)
                y = x + w

                actual = _estimate_single_mi(x, y, k=5)
                expected = np.euler_gamma + log((b - a) / a) + psi(b / (b - a))

                self.assertAlmostEqual(actual, expected, delta=0.025)