Esempio n. 1
0
    def test_tf_while_var_with_dynamic_shape(self):
        rng = test_util.test_np_rng()
        x = rng.rand(100, 10)
        tensor_x = tf.convert_to_tensor(x, dtype=tf.float32)
        running_var = tfp.experimental.stats.RunningVariance((10, ))

        def _loop_body(i, state):
            if not tf.executing_eagerly():
                sample = tf1.placeholder_with_default(tensor_x[i], shape=None)
            else:
                sample = tensor_x[i]
            return (i + 1, running_var.update(state, sample))

        _, state = tf.while_loop(
            lambda i, _: i < 100,
            _loop_body,
            (tf.constant(0, dtype=tf.int32), running_var.initialize()),
            shape_invariants=(None,
                              tfp.experimental.stats.RunningCovarianceState(
                                  None, tf.TensorShape(None),
                                  tf.TensorShape(None))))
        final_mean, final_var = self.evaluate(
            [state.mean, running_var.finalize(state)])
        self.assertEqual(final_mean.shape, (10, ))
        self.assertEqual(final_var.shape, (10, ))
        self.assertAllClose(final_var, np.var(x, axis=0), rtol=1e-5)
Esempio n. 2
0
    def test_windowed_mean_corner_cases(self):
        rng = test_util.test_np_rng()
        x = rng.rand(7)
        # Test mean of an empty set
        mean = tfp.stats.windowed_mean(x,
                                       low_indices=[4],
                                       high_indices=4,
                                       axis=0)
        mean = self.evaluate(mean)
        self.assertAllClose(mean, tf.zeros_like(mean))

        # Test mean of a "negative" set.  It's the same
        # as the mean of the same set spelled "positively", but we
        # need to be careful about the inclusive/exclusive semantics of
        # the indices.
        mean_neg = tfp.stats.windowed_mean(x,
                                           low_indices=[3, 5],
                                           high_indices=[1, 2])
        mean_pos = tfp.stats.windowed_mean(x,
                                           low_indices=[1, 2],
                                           high_indices=[3, 5])
        mean_neg, mean_pos = self.evaluate([mean_neg, mean_pos])
        self.assertAllClose(mean_neg, mean_pos)

        # Test default windows: [0, 1), [1, 2), [1, 3), [2, 4), etc
        y = [0., 1., 2., 3.]
        self.assertAllClose([0., 1., 1.5, 2.5],
                            self.evaluate(tfp.stats.windowed_mean(y)))
Esempio n. 3
0
    def test_step_function_sequence(self):
        if tf.executing_eagerly() and not self.use_static_shape:
            # TODO(b/122840816): Modify this test so that it runs in eager mode with
            # dynamic shapes, or document that this is the intended behavior.
            return

        rng = test_util.test_np_rng()
        # x jumps to new random value every 10 steps.  So correlation length = 10.
        x = (rng.randint(-10, 10, size=(1000, 1)) * np.ones(
            (1, 10))).ravel().astype(self.dtype)
        x_ph = tf1.placeholder_with_default(
            x, shape=(1000 * 10, ) if self.use_static_shape else None)
        rxx = tfp.stats.auto_correlation(x_ph,
                                         max_lags=1000 * 10 // 2,
                                         center=True,
                                         normalize=False)
        if self.use_static_shape:
            self.assertAllEqual((1000 * 10 // 2 + 1, ), rxx.shape)
        rxx_ = self.evaluate(rxx)
        rxx_ /= rxx_[0]
        # Expect positive correlation for the first 10 lags, then significantly
        # smaller negative.
        self.assertGreater(rxx_[:10].min(), 0)

        # TODO(b/138375951): Re-enable this assertion once we know why its
        # failing.
        # self.assertGreater(rxx_[9], 5 * rxx_[10:20].mean())

        # RXX should be decreasing for the first 10 lags.
        diff = np.diff(rxx_)
        self.assertLess(diff[:10].max(), 0)
Esempio n. 4
0
    def test_dynamic_shape_running_covariance(self):
        rng = test_util.test_np_rng()
        x = rng.rand(100, 3, 5, 2)
        running_cov = tfp.experimental.stats.RunningCovariance(tf.TensorShape(
            [5, 2]),
                                                               event_ndims=1)
        state = running_cov.initialize()
        for sample in x:
            if not tf.executing_eagerly():
                sample = tf1.placeholder_with_default(sample, shape=None)
            state = running_cov.update(state, sample, axis=0)
        final_mean, final_cov = self.evaluate(
            [state.mean, running_cov.finalize(state)])
        self.assertAllClose(np.mean(x.reshape(300, 5, 2), axis=0),
                            final_mean,
                            rtol=1e-5)
        self.assertEqual(final_cov.shape, (5, 2, 2))

        # manual computation with loops
        manual_cov = np.zeros((5, 2, 2))
        x_reshaped = x.reshape((300, 5, 2))
        delta_mean = x_reshaped - np.mean(x_reshaped, axis=0)
        for residual in delta_mean:
            for i in range(5):
                for j in range(2):
                    for k in range(2):
                        manual_cov[i][j][k] += residual[i][j] * residual[i][k]
        manual_cov /= 300
        self.assertAllClose(manual_cov, final_cov, rtol=1e-5)
Esempio n. 5
0
    def test_batch_vector_sampaxis02_eventaxis1(self):
        rng = test_util.test_np_rng()
        # x.shape = sample, event, sample, batch
        x = rng.randn(2, 3, 4, 5)
        y = x + 0.1 * rng.randn(2, 3, 4, 5)

        cov = tfp.stats.covariance(x, y, sample_axis=[0, 2], event_axis=[1])
        self.assertAllEqual((3, 3, 5), cov.shape)
        cov = self.evaluate(cov)

        cov_kd = tfp.stats.covariance(x,
                                      y,
                                      sample_axis=[0, 2],
                                      event_axis=[1],
                                      keepdims=True)
        self.assertAllEqual((1, 3, 3, 1, 5), cov_kd.shape)
        cov_kd = self.evaluate(cov_kd)
        self.assertAllClose(cov, cov_kd[0, :, :, 0, :])

        for i in range(5):  # Iterate over batch index.
            # Get ith batch of samples, and permute/reshape to [n_samples, n_events]
            x_i = np.reshape(np.transpose(x[:, :, :, i], [0, 2, 1]),
                             [2 * 4, 3])
            y_i = np.reshape(np.transpose(y[:, :, :, i], [0, 2, 1]),
                             [2 * 4, 3])
            # Will compare with ith batch of covariance.
            cov_i = cov[:, :, i]
            for m in range(3):  # Iterate over row of matrix
                for n in range(3):  # Iterate over column of matrix
                    self.assertAllClose(self._np_cov_1d(x_i[:, m], y_i[:, n]),
                                        cov_i[m, n])
Esempio n. 6
0
    def test_batch_vector_sampaxis13_eventaxis2(self):
        rng = test_util.test_np_rng()
        # x.shape = batch, sample, event, sample
        x = rng.randn(4, 10, 2, 10)
        y = x + 0.1 * rng.randn(10, 2, 10)
        x[:, :, 0, :] += 0.1 * rng.randn(4, 10, 10)

        cov = tfp.stats.covariance(x, y, sample_axis=[1, 3], event_axis=[2])
        self.assertAllEqual((4, 2, 2), cov.shape)
        cov = self.evaluate(cov)

        cov_kd = tfp.stats.covariance(x,
                                      y,
                                      sample_axis=[1, 3],
                                      event_axis=[2],
                                      keepdims=True)
        self.assertAllEqual((4, 1, 2, 2, 1), cov_kd.shape)
        cov_kd = self.evaluate(cov_kd)
        self.assertAllClose(cov, cov_kd[:, 0, :, :, 0])

        for i in range(4):  # Iterate over batch index.
            # Get ith batch of samples, and permute/reshape to [n_samples, n_events]
            x_i = np.reshape(np.transpose(x[i, :, :, :], [0, 2, 1]),
                             [10 * 10, 2])
            y_i = np.reshape(np.transpose(y[i, :, :, :], [0, 2, 1]),
                             [10 * 10, 2])
            # Will compare with ith batch of covariance.
            cov_i = cov[i, :, :]
            for m in range(2):  # Iterate over row of matrix
                for n in range(2):  # Iterate over column of matrix
                    self.assertAllClose(self._np_cov_1d(x_i[:, m], y_i[:, n]),
                                        cov_i[m, n])
Esempio n. 7
0
    def test_batch_vector_sampaxis0_eventaxisn1(self):
        rng = test_util.test_np_rng()
        # X and Y are correlated, albeit less so in the first component.
        # They both are both 100 samples of 3-batch vectors in R^2.
        x = rng.randn(100, 3, 2)
        y = x + 0.1 * rng.randn(100, 3, 2)
        x[:, :, 0] += 0.1 * rng.randn(100, 3)

        corr = tfp.stats.correlation(x, y, event_axis=-1)
        self.assertAllEqual((3, 2, 2), corr.shape)
        corr = self.evaluate(corr)

        corr_kd = tfp.stats.correlation(x, y, event_axis=-1, keepdims=True)
        self.assertAllEqual((1, 3, 2, 2), corr_kd.shape)
        corr_kd = self.evaluate(corr_kd)
        self.assertAllClose(corr, corr_kd[0, ...])

        for i in range(3):  # Iterate over batch index.
            x_i = x[:, i, :]  # Pick out ith batch of samples.
            y_i = y[:, i, :]
            corr_i = corr[i, :, :]
            for m in range(2):  # Iterate over row of matrix
                for n in range(2):  # Iterate over column of matrix
                    self.assertAllClose(self._np_corr_1d(x_i[:, m], y_i[:, n]),
                                        corr_i[m, n])
    def _build_model_and_params(self,
                                num_timesteps,
                                param_batch_shape,
                                num_posterior_draws=10):
        seed = test_util.test_seed_stream()
        rng = test_util.test_np_rng(seed())
        observed_time_series = self._build_tensor(
            rng.randn(*(param_batch_shape + [num_timesteps])))

        # Build an STS model with multiple components
        day_of_week = tfp.sts.Seasonal(
            num_seasons=7,
            observed_time_series=observed_time_series,
            name='day_of_week')
        local_linear_trend = tfp.sts.LocalLinearTrend(
            observed_time_series=observed_time_series,
            name='local_linear_trend')
        model = tfp.sts.Sum(components=[day_of_week, local_linear_trend],
                            observed_time_series=observed_time_series)

        # Sample test params from the prior (faster than posterior samples).
        param_samples = [
            p.prior.sample([num_posterior_draws], seed=seed())
            for p in model.parameters
        ]

        return model, observed_time_series, param_samples
Esempio n. 9
0
    def test_batch_vector_sampaxis1_eventaxis2(self):
        rng = test_util.test_np_rng()
        # x.shape = [2, 5000, 2],
        # 2-batch members, 5000 samples each, events in R^2.
        x0 = rng.randn(5000, 2)
        x1 = 2 * rng.randn(5000, 2)
        x = np.stack((x0, x1), axis=0)

        # chol.shape = [2 (batch), 2x2 (event x event)]
        chol = tfp.stats.cholesky_covariance(x, sample_axis=1)
        chol_kd = tfp.stats.cholesky_covariance(x,
                                                sample_axis=1,
                                                keepdims=True)

        # Make sure static shape of keepdims works
        self.assertAllEqual((2, 2, 2), chol.shape)
        self.assertAllEqual((2, 1, 2, 2), chol_kd.shape)

        chol, chol_kd = self.evaluate([chol, chol_kd])

        # keepdims should not change the numbers in the result.
        self.assertAllEqual(chol, np.squeeze(chol_kd, axis=1))

        # Covariance is trivial since these are independent normals.
        # Tolerance chosen to be 2x the lowest passing atol.
        self.assertAllClose(np.eye(2), chol[0, ...], atol=0.06)
        self.assertAllClose(2 * np.eye(2), chol[1, ...], atol=0.06)
Esempio n. 10
0
 def test_random_mean(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100)
     running_mean = tfp.experimental.stats.RunningMean.from_shape(shape=())
     running_mean = consume(running_mean, x)
     mean = self.evaluate(running_mean.mean)
     self.assertAllClose(np.mean(x), mean, rtol=1e-6)
Esempio n. 11
0
    def setUp(self):
        super(MutualInformationTest, self).setUp()
        self.seed = tfp_test_util.test_seed()

        self.scores = tfp_test_util.test_np_rng().normal(loc=1.0,
                                                         scale=2.0,
                                                         size=[13, 17])

        batch_size = 1000
        rho = 0.8
        dim = 2
        x, eps = tf.split(value=tf.random.normal(shape=(2 * batch_size, dim),
                                                 seed=self.seed),
                          num_or_size_splits=2,
                          axis=0)
        mean = rho * x
        stddev = tf.sqrt(1. - tf.square(rho))
        y = mean + stddev * eps
        conditional_dist = tfd.MultivariateNormalDiag(
            mean, scale_identity_multiplier=stddev)
        marginal_dist = tfd.MultivariateNormalDiag(tf.zeros(dim), tf.ones(dim))

        # The conditional_scores has its shape [y_batch_dim, distibution_batch_dim]
        # as the `lower_bound_info_nce` requires `scores[i, j] = f(x[i], y[j])
        # = log p(x[i] | y[j])`.
        self.conditional_scores = conditional_dist.log_prob(y[:,
                                                              tf.newaxis, :])
        self.marginal_scores = marginal_dist.log_prob(y)[:, tf.newaxis]
        self.optimal_critic = 1 + self.conditional_scores - self.marginal_scores
        self.theoretical_mi = np.float32(-0.5 * np.log(1. - rho**2) * dim)
        # Y is N-D standard normal distributed.
        self.differential_entropy_y = 0.5 * np.log(2 * np.pi * np.e) * dim
Esempio n. 12
0
 def test_running_variance(self, ddof):
   rng = test_util.test_np_rng()
   x = rng.rand(100)
   var = tfp.experimental.stats.RunningVariance.from_shape()
   var = consume(var, x)
   final_mean, final_var = self.evaluate([var.mean, var.variance(ddof=ddof)])
   self.assertNear(np.mean(x), final_mean, err=1e-6)
   self.assertNear(np.var(x, ddof=ddof), final_var, err=1e-6)
Esempio n. 13
0
 def test_chunking(self):
   rng = test_util.test_np_rng()
   x = rng.rand(100, 10, 5)
   running_mean = tfp.experimental.stats.RunningMean.from_shape(
       shape=(5,))
   running_mean = consume(running_mean, x, chunk_axis=0)
   mean = self.evaluate(running_mean.mean)
   self.assertAllClose(np.mean(x.reshape(1000, 5), axis=0), mean, rtol=1e-6)
Esempio n. 14
0
 def test_manual_dtype(self):
     rng = test_util.test_np_rng()
     x = rng.rand(3, 10)
     cov = tfp.experimental.stats.RunningCovariance.from_shape(
         (10, ), dtype=tf.float64)
     cov = consume(cov, x)
     final_cov = cov.covariance()
     self.assertEqual(final_cov.dtype, tf.float64)
Esempio n. 15
0
    def test_overlapping_axis_raises(self):
        rng = test_util.test_np_rng()
        # They both are both 100 samples of 3-batch vectors in R^2.
        x = rng.randn(100, 3, 2)
        y = x + 0.1 * rng.randn(100, 3, 2)

        with self.assertRaisesRegexp(ValueError, 'overlapped'):
            tfp.stats.covariance(x, y, sample_axis=[0, 1], event_axis=[1, 2])
Esempio n. 16
0
    def test_non_contiguous_event_axis_raises(self):
        rng = test_util.test_np_rng()
        # They both are both 100 samples of 3-batch vectors in R^2.
        x = rng.randn(100, 3, 2)
        y = x + 0.1 * rng.randn(100, 3, 2)

        with self.assertRaisesRegexp(ValueError, 'must be contiguous'):
            tfp.stats.covariance(x, y, sample_axis=1, event_axis=[0, 2])
    def testPrecomputedWithMasking(self):
        amplitude = np.array([1., 2.], np.float64)
        length_scale = np.array([[.1], [.2], [.3]], np.float64)
        observation_noise_variance = np.array([[1e-2], [1e-4], [1e-6]],
                                              np.float64)

        rng = test_util.test_np_rng()
        observations_is_missing = np.array([
            [False, True, False, True, False, True],
            [False, False, False, False, False, False],
            [True, True, False, False, True, True],
        ]).reshape((3, 1, 6))
        observation_index_points = np.where(
            observations_is_missing[..., np.newaxis], np.nan,
            rng.uniform(-1., 1., (3, 1, 6, 2)).astype(np.float64))
        observations = np.where(
            observations_is_missing, np.nan,
            rng.uniform(-1., 1., (3, 1, 6)).astype(np.float64))

        index_points = rng.uniform(-1., 1., (5, 2)).astype(np.float64)

        kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
        gprm = tfd.GaussianProcessRegressionModel.precompute_regression_model(
            kernel=kernel,
            index_points=index_points,
            observation_index_points=observation_index_points,
            observations=observations,
            observations_is_missing=observations_is_missing,
            observation_noise_variance=observation_noise_variance,
            validate_args=True)

        self.assertAllNotNan(gprm.mean())
        self.assertAllNotNan(gprm.variance())
        self.assertAllNotNan(gprm.covariance())

        # For each batch member of `gprm`, check that the distribution is the same
        # as a GaussianProcessRegressionModel with no masking but conditioned on
        # only the not-masked-out index points.
        x = gprm.sample(seed=test_util.test_seed())
        for i in range(3):
            observation_index_points_i = tf.gather(
                observation_index_points[i, 0],
                (~observations_is_missing[i, 0]).nonzero()[0])
            observations_i = tf.gather(
                observations[i,
                             0], (~observations_is_missing[i, 0]).nonzero()[0])
            gprm_i = tfd.GaussianProcessRegressionModel.precompute_regression_model(
                kernel=kernel[i],
                index_points=index_points,
                observation_index_points=observation_index_points_i,
                observations=observations_i,
                observation_noise_variance=observation_noise_variance[i, 0],
                validate_args=True)

            self.assertAllClose(gprm.mean()[i], gprm_i.mean())
            self.assertAllClose(gprm.variance()[i], gprm_i.variance())
            self.assertAllClose(gprm.covariance()[i], gprm_i.covariance())
            self.assertAllClose(gprm.log_prob(x)[i], gprm_i.log_prob(x[i]))
Esempio n. 18
0
 def test_sorted_descending_running_covariance(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100, 10)
     x[::-1].sort(axis=0)  # sorts in descending order
     cov = tfp.experimental.stats.RunningCovariance.from_shape((10, ))
     for sample in x:
         cov = cov.update(sample)
     final_cov = self.evaluate(cov.covariance())
     self.assertAllClose(final_cov, np.cov(x.T, ddof=0), rtol=1e-5)
Esempio n. 19
0
    def test_diagonal_of_correlation_matrix_x_with_x_is_one(self):
        rng = test_util.test_np_rng()
        # Some big numbers, to test stability.
        x = np.float32(1e10 * rng.randn(100, 3))

        corr = tfp.stats.correlation(x, sample_axis=0, event_axis=1)
        self.assertAllEqual((3, 3), corr.shape)
        corr = self.evaluate(corr)
        self.assertAllClose([1., 1., 1.], np.diag(corr))
Esempio n. 20
0
 def test_random_mean(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100)
     running_mean = tfp.experimental.stats.RunningMean(shape=(), )
     state = running_mean.initialize()
     for sample in x:
         state = running_mean.update(state, sample)
     mean = self.evaluate(running_mean.finalize(state))
     self.assertAllClose(np.mean(x), mean, rtol=1e-6)
Esempio n. 21
0
 def _random_batch_psd(self, dim):
   rng = test_util.test_np_rng()
   matrix = rng.random_sample([2, dim, dim])
   matrix = np.matmul(matrix, np.swapaxes(matrix, -2, -1))
   matrix = (matrix + np.diag(np.arange(dim) * .1)).astype(self.dtype)
   masked_shape = (
       matrix.shape if self.use_static_shape else [None] * len(matrix.shape))
   matrix = tf1.placeholder_with_default(matrix, shape=masked_shape)
   return matrix
Esempio n. 22
0
 def test_manual_dtype(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100, 10)
     cov = tfp.experimental.stats.RunningCovariance.from_shape(
         (10, ), dtype=tf.float64)
     for sample in x:
         cov = cov.update(sample)
     final_cov = cov.covariance()
     self.assertEqual(final_cov.dtype, tf.float64)
Esempio n. 23
0
 def test_running_covariance(self, ddof):
     rng = test_util.test_np_rng()
     x = rng.rand(100, 10)
     cov = tfp.experimental.stats.RunningCovariance.from_shape((10, ))
     for sample in x:
         cov = cov.update(sample)
     final_mean, final_cov = self.evaluate(
         [cov.mean, cov.covariance(ddof=ddof)])
     self.assertAllClose(np.mean(x, axis=0), final_mean, rtol=1e-5)
     self.assertAllClose(np.cov(x.T, ddof=ddof), final_cov, rtol=1e-5)
Esempio n. 24
0
 def test_running_covariance(self, ddof):
   rng = test_util.test_np_rng()
   x = rng.rand(100, 10)
   cov = tfp.experimental.stats.RunningCovariance.from_shape((10,))
   cov = consume(cov, x)
   final_mean, final_cov = self.evaluate([cov.mean, cov.covariance(ddof=ddof)])
   self.assertAllClose(np.mean(x, axis=0), final_mean, rtol=1e-5)
   self.assertAllClose(np.cov(x.T, ddof=ddof), final_cov, rtol=1e-5)
   self.assertEqual(cov.event_ndims, 1)
   self.assertEqual(cov.mean.dtype, tf.float32)
Esempio n. 25
0
 def test_axis_2_center_true_max_lags_1(self):
     rng = test_util.test_np_rng()
     x = rng.randn(3, 4, 5).astype(self.dtype)
     if self.dtype in [np.complex64]:
         x = 1j * rng.randn(3, 4, 5).astype(self.dtype)
     self.check_results_versus_brute_force(x,
                                           axis=2,
                                           max_lags=1,
                                           center=True,
                                           normalize=False)
Esempio n. 26
0
 def test_axis_n1_center_false_max_lags_none_normalize_true(self):
     rng = test_util.test_np_rng()
     x = rng.randn(2, 3, 4).astype(self.dtype)
     if self.dtype in [np.complex64]:
         x = 1j * rng.randn(2, 3, 4).astype(self.dtype)
     self.check_results_versus_brute_force(x,
                                           axis=-1,
                                           max_lags=None,
                                           center=False,
                                           normalize=True)
Esempio n. 27
0
 def test_manual_dtype(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100, 10)
     running_cov = tfp.experimental.stats.RunningCovariance(
         (10, ), dtype=tf.float64)
     state = running_cov.initialize()
     for sample in x:
         state = running_cov.update(state, sample)
     final_cov = running_cov.finalize(state)
     self.assertEqual(final_cov.dtype, tf.float64)
 def test_iid_normal_passes(self):
   n_samples = 500
   # five scalar chains taken from iid Normal(0, 1)
   rng = test_util.test_np_rng()
   iid_normal_samples = rng.randn(n_samples, 5)
   rhat_reducer = tfp.experimental.mcmc.PotentialScaleReductionReducer(
       independent_chain_ndims=1)
   rhat = self.evaluate(test_fixtures.reduce(rhat_reducer, iid_normal_samples))
   self.assertAllEqual((), rhat.shape)
   self.assertAllClose(1., rhat, rtol=0.02)
Esempio n. 29
0
 def test_sorted_ascending_running_covariance(self):
     rng = test_util.test_np_rng()
     x = rng.rand(100, 10)
     x.sort(axis=0)
     running_cov = tfp.experimental.stats.RunningCovariance((10, ))
     state = running_cov.initialize()
     for sample in x:
         state = running_cov.update(state, sample)
     final_cov = self.evaluate(running_cov.finalize(state))
     self.assertAllClose(final_cov, np.cov(x.T, ddof=0), rtol=1e-5)
Esempio n. 30
0
 def test_random_higher_rank_samples(self):
     rng = test_util.test_np_rng()
     x_orig = rng.rand(100, 10)
     x = tf.convert_to_tensor(x_orig, dtype=tf.float32)
     running_moments = tfp.experimental.stats.RunningCentralMoments.from_shape(
         shape=(10, ), moment=np.arange(5) + 1)
     running_moments = consume(running_moments, x)
     moments = self.evaluate(running_moments.moments())
     self.assertAllClose(stats.moment(x_orig, moment=[1, 2, 3, 4, 5]),
                         moments,
                         rtol=1e-6)