Exemple #1
0
 def _propagate_distr_impl(self, distr0, time_delta, assume_distr=False):
     if not isinstance(distr0, distrs.NormalDistr) and not assume_distr:
         raise ValueError(
             'Do not know how to propagate a distribution that is not normal'
         )
     mean = distr0.mean + self._mean * time_delta
     cov = distr0.cov + time_delta * self._cov
     return distrs.NormalDistr(mean=mean, cov=cov)
Exemple #2
0
 def _propagate_distr_impl(self, time_delta, distr0):
     value0 = distr0.mean
     mrf = self.mean_reversion_factor(time_delta)
     eye_minus_mrf = np.eye(self.process_dim) - mrf
     m = np.dot(mrf, value0) + np.dot(eye_minus_mrf, self._mean)
     c = np.dot(np.dot(mrf, distr0.cov),
                mrf.T) + self.noise_covariance(time_delta)
     return distrs.NormalDistr(mean=m, cov=c)
Exemple #3
0
 def _propagatedistrimpl(self, timedelta, distr0):
     value0 = distr0.mean
     mrf = self.meanreversionfactor(timedelta)
     eyeminusmrf = np.eye(self.processdim) - mrf
     m = np.dot(mrf, value0) + np.dot(eyeminusmrf, self.__mean)
     c = np.dot(np.dot(mrf, distr0.cov),
                mrf.T) + self.noisecovariance(timedelta)
     return distrs.NormalDistr(mean=m, cov=c)
Exemple #4
0
 def _propagate_distr_impl(self, distr0, time_delta, assume_distr=False):
     if not isinstance(distr0, distrs.NormalDistr) and not assume_distr:
         raise ValueError('Do not know how to propagate a distribution that is not normal')
     value0 = distr0.mean
     mrf = self.mean_reversion_factor(time_delta)
     eye_minus_mrf = np.eye(self.process_dim) - mrf
     m = np.dot(mrf, value0) + np.dot(eye_minus_mrf, self._mean)
     c = np.dot(np.dot(mrf, distr0.cov), mrf.T) + self.noise_covariance(time_delta)
     return distrs.NormalDistr(mean=m, cov=c)
Exemple #5
0
 def _propagate_distr_impl(self, distr0, time_delta, assume_distr=False):
     if not isinstance(distr0, distrs.NormalDistr) and not assume_distr:
         raise ValueError('Do not know how to propagate a distribution that is not normal')
     mean = value0 + time_delta / (self.__final_time - time0) * (self.__final_value - value0)
     cov_factor = (time - time0) * (self.__final_time - time) / (self.__final_time - time0)
     vol_factor = np.sqrt(cov_factor)
     vol = vol_factor * self.__vol
     mean = distr0.mean + self._mean * time_delta
     cov = distr0.cov + time_delta * self._cov
     return distrs.NormalDistr(mean=mean, cov=cov)
Exemple #6
0
    def testMultivariateNormalDistr(self):
        stdnormal1d = distrs.NormalDistr(dim=1)
        npt.assert_almost_equal(stdnormal1d.mean, 0.)
        npt.assert_almost_equal(stdnormal1d.cov, 1.)
        npt.assert_almost_equal(stdnormal1d.vol, 1.)

        stdnormal1d = distrs.NormalDistr(dim=2)
        npt.assert_almost_equal(stdnormal1d.mean, npu.colof(2, 0.))
        npt.assert_almost_equal(stdnormal1d.cov, np.eye(2))
        npt.assert_almost_equal(stdnormal1d.vol, np.eye(2))

        sd1 = 3.
        sd2 = 4.
        cor = -.5

        normal2d = distrs.NormalDistr(mean=[1., 2.],
                                      cov=distrs.NormalDistr.makecov2d(
                                          sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(
            normal2d.cov,
            [[sd1 * sd1, cor * sd1 * sd2], [cor * sd1 * sd2, sd2 * sd2]])
        npt.assert_almost_equal(
            normal2d.vol,
            [[sd1, 0.], [cor * sd2, np.sqrt(1. - cor * cor) * sd2]])

        normal2d = distrs.NormalDistr(mean=[1., 2.],
                                      vol=distrs.NormalDistr.makevol2d(
                                          sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(
            normal2d.cov,
            [[sd1 * sd1, cor * sd1 * sd2], [cor * sd1 * sd2, sd2 * sd2]])
        npt.assert_almost_equal(
            normal2d.vol,
            [[sd1, 0.], [cor * sd2, np.sqrt(1. - cor * cor) * sd2]])
Exemple #7
0
    def test_multinomial_resample(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)
        
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100000)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100000,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]])
        
        rnd.random_state(np.random.RandomState(seed=43), force=True)

        resampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d)
        self.assertEqual(resampled_approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(resampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(resampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[1]))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(resampled_approx_normal_empirical_2d.weight_sum, 100000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        
        rnd.random_state(np.random.RandomState(seed=43), force=True)
        
        resampled_approx_normal_empirical_2d_particles = approx_normal_empirical_2d.sample(size=100000)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d_particles, resampled_approx_normal_empirical_2d.particles)

        subsampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d, target_particle_count=40000)
        self.assertEqual(subsampled_approx_normal_empirical_2d.particle_count, 40000)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.effective_particle_count, 40000.)
        self.assertEqual(subsampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(subsampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[1]))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((40000,))))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((40000,))) / 40000.)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.normalised_weight(0), .000025)
        self.assertEqual(subsampled_approx_normal_empirical_2d.weight_sum, 40000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)

        supersampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d, target_particle_count=300000)
        self.assertEqual(supersampled_approx_normal_empirical_2d.particle_count, 300000)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.effective_particle_count, 300000.)
        self.assertEqual(supersampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(supersampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((300000,))))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((300000,))) / 300000.)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.normalised_weight(0), 3.3333333333333333e-06)
        self.assertEqual(supersampled_approx_normal_empirical_2d.weight_sum, 300000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
Exemple #8
0
    def test_normal_distr(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)

        std_normal_1d = distrs.NormalDistr(dim=1)
        npt.assert_almost_equal(std_normal_1d.mean, 0.)
        npt.assert_almost_equal(std_normal_1d.cov, 1.)
        npt.assert_almost_equal(std_normal_1d.vol, 1.)
        
        sample = std_normal_1d.sample()
        self.assertEqual(np.shape(sample), (1, 1))
        npt.assert_almost_equal(sample, [[ 0.49671415]])
        
        sample = std_normal_1d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 1))
        npt.assert_almost_equal(sample, [
                [-0.1382643 ],
                [ 0.64768854],
                [ 1.52302986],
                [-0.23415337],
                [-0.23413696],
                [ 1.57921282],
                [ 0.76743473],
                [-0.46947439],
                [ 0.54256004],
                [-0.46341769]])
        
        std_normal_2d = distrs.NormalDistr(dim=2)
        npt.assert_almost_equal(std_normal_2d.mean, npu.col_of(2, 0.))
        npt.assert_almost_equal(std_normal_2d.cov, np.eye(2))
        npt.assert_almost_equal(std_normal_2d.vol, np.eye(2))
        
        sample = std_normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [-0.46572975,  0.24196227],
                [-1.91328024, -1.72491783],
                [-0.56228753, -1.01283112],
                [ 0.31424733, -0.90802408],
                [-1.4123037 ,  1.46564877],
                [-0.2257763 ,  0.0675282 ],
                [-1.42474819, -0.54438272],
                [ 0.11092259, -1.15099358],
                [ 0.37569802, -0.60063869],
                [-0.29169375, -0.60170661]])

        sd1=3.; sd2=4.; cor=-.5

        normal_2d = distrs.NormalDistr(mean=[1., 2.], cov=stats.make_cov_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(normal_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(normal_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])

        sample = normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [-3.09581812,  9.06710684],
                [ 5.00400357, -1.07912958],
                [ 4.10821238, -2.42324481],
                [ 2.58989516, -7.05256838],
                [ 2.07671635,  3.61955714],
                [ 0.38728403,  2.5195548 ],
                [-1.36010204, -0.88681309],
                [ 1.63968707, -1.29329703],
                [-0.61960168,  6.44566548],
                [ 5.53451941, -4.36131646]])

        normal_2d = distrs.NormalDistr(mean=[1., 2.], vol=stats.make_vol_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(normal_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(normal_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])

        sample = normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [ 0.4624506 , -0.26705979],
                [ 1.76344545,  5.54913479],
                [-2.76038957,  4.57609973],
                [ 2.35608833,  1.20642031],
                [-2.1218454 ,  5.16796697],
                [-0.85307657, -0.00850715],
                [ 5.28771297, -1.62048489],
                [-2.12592264,  7.1016208 ],
                [-0.46508111,  6.26189296],
                [ 3.15543223, -0.04269231]])
Exemple #9
0
    def test_empirical_distr(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)
        
        trivial_empirical_1d = distrs.EmpiricalDistr(particles=[[0.]], weights=[1.])
        self.assertEqual(trivial_empirical_1d.particle_count, 1)
        npt.assert_almost_equal(trivial_empirical_1d.effective_particle_count, 1.)
        self.assertEqual(trivial_empirical_1d.dim, 1)
        npt.assert_almost_equal(trivial_empirical_1d.particles, np.array([[0.]]))
        npt.assert_almost_equal(trivial_empirical_1d.particle(0), np.array([[0.]]))
        npt.assert_almost_equal(trivial_empirical_1d.weights, np.array([[1.]]))
        npt.assert_almost_equal(trivial_empirical_1d.weight(0), 1.)
        npt.assert_almost_equal(trivial_empirical_1d.normalised_weights, np.array([[1.]]))
        npt.assert_almost_equal(trivial_empirical_1d.normalised_weight(0), 1.)
        self.assertEqual(trivial_empirical_1d.weight_sum, 1.)
        self.assertEqual(trivial_empirical_1d.mean, 0.)
        self.assertEqual(trivial_empirical_1d.var_n, 0.)
        npt.assert_almost_equal(trivial_empirical_1d.var_n_minus_1, np.nan)
        self.assertEqual(trivial_empirical_1d.var, 0.)
        self.assertEqual(trivial_empirical_1d.cov_n, 0.)
        npt.assert_almost_equal(trivial_empirical_1d.cov_n_minus_1, np.nan)
        self.assertEqual(trivial_empirical_1d.cov, 0.)
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol_n
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol_n_minus_1
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol

        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(simple_empirical_1d.particle(0), np.array([[-1.]]))
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))
        npt.assert_almost_equal(simple_empirical_1d.weight(0), .5)
        npt.assert_almost_equal(simple_empirical_1d.normalised_weights, np.array([[.5], [.5]]))
        npt.assert_almost_equal(simple_empirical_1d.normalised_weight(0), .5)
        self.assertEqual(simple_empirical_1d.weight_sum, 1.)
        self.assertEqual(simple_empirical_1d.mean, 0.)
        self.assertEqual(simple_empirical_1d.var_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        npt.assert_almost_equal(simple_empirical_1d.var_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.cov_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        npt.assert_almost_equal(simple_empirical_1d.cov_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.cov, 1.)
        self.assertEqual(simple_empirical_1d.vol_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        self.assertEqual(simple_empirical_1d.vol_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.vol, 1.)

        # The weights can be specified as a one-dimensional array...
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        # ...but they come back as a (two-dimensional) column vector:
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # ...alternatively, the weights can be specified as a (two-dimensional) column vector:
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[[.5], [.5]])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        # ...they always come back as a (two-dimensional) column vector:
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # If the particles are specified as a one-dimensional array, they are interpreted as...
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[-1., 1.], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        # ...multiple one-dimensional particles (each row corresponds to a particle, each column to a dimension):
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # Now we shall be using "repeat"-type weights:
        repeat_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[2., 1.])
        self.assertEqual(repeat_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(repeat_empirical_1d.effective_particle_count, 1.7999999999999998)
        self.assertEqual(repeat_empirical_1d.dim, 1)
        npt.assert_almost_equal(repeat_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.particle(0), np.array([[-1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.weights, np.array([[2.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.weight(0), 2.)
        npt.assert_almost_equal(repeat_empirical_1d.normalised_weights, np.array([[ 0.6666667], [ 0.3333333]]))
        npt.assert_almost_equal(repeat_empirical_1d.normalised_weight(0), 0.6666667)
        self.assertEqual(repeat_empirical_1d.weight_sum, 3.)
        npt.assert_almost_equal(repeat_empirical_1d.mean, -0.33333333)
        npt.assert_almost_equal(repeat_empirical_1d.var_n, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.var_n_minus_1, 1.3333333)
        npt.assert_almost_equal(repeat_empirical_1d.cov_n, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.cov_n_minus_1, 1.3333333)
        npt.assert_almost_equal(repeat_empirical_1d.cov, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.vol_n, 0.94280904)
        npt.assert_almost_equal(repeat_empirical_1d.vol_n_minus_1, 1.15470054)
        npt.assert_almost_equal(repeat_empirical_1d.vol, 0.94280904)

        # Now we shall be using "repeat"-type weights. There are three two-dimensional particles:
        repeat_empirical_2d = distrs.EmpiricalDistr(particles=[[-2., 2.], [0., 0.], [1., -1.]], weights=[2., 1., 1.])
        self.assertEqual(repeat_empirical_2d.particle_count, 3)
        npt.assert_almost_equal(repeat_empirical_2d.effective_particle_count, 2.6666666666666665)
        self.assertEqual(repeat_empirical_2d.dim, 2)
        npt.assert_almost_equal(repeat_empirical_2d.particles, np.array([[-2., 2.], [0., 0.], [1., -1.]]))
        npt.assert_almost_equal(repeat_empirical_2d.particle(0), np.array([[-2.], [2.]]))
        npt.assert_almost_equal(repeat_empirical_2d.weights, np.array([[2.], [1.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_2d.weight(0), 2.)
        npt.assert_almost_equal(repeat_empirical_2d.normalised_weights, np.array([[ 0.5 ], [ 0.25], [ 0.25]]))
        npt.assert_almost_equal(repeat_empirical_2d.normalised_weight(0), .5)
        self.assertEqual(repeat_empirical_2d.weight_sum, 4.)
        npt.assert_almost_equal(repeat_empirical_2d.mean, [[-0.75], [ 0.75]])
        npt.assert_almost_equal(repeat_empirical_2d.var_n, [[ 1.6875], [ 1.6875]])
        npt.assert_almost_equal(repeat_empirical_2d.var_n_minus_1, [[ 2.25], [ 2.25]])
        npt.assert_almost_equal(repeat_empirical_2d.cov_n, [[ 1.6875, -1.6875], [-1.6875,  1.6875]])
        npt.assert_almost_equal(repeat_empirical_2d.cov_n_minus_1, [[ 2.25, -2.25], [-2.25,  2.25]])
        npt.assert_almost_equal(repeat_empirical_2d.cov, [[ 1.6875, -1.6875], [-1.6875,  1.6875]])
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol_n
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol_n_minus_1
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol
        
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100,))) / 100.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .01)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[ 10.2077457], [ 99.6856645]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.3516275], [ 6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.3854823], [ 6.8332624]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.3516275, -1.8258307], [-1.8258307,  6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.3854823, -1.8442735], [-1.8442735,  6.8332624]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.3516275, -1.8258307], [-1.8258307,  6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.8307451,  0.       ], [-0.9973157,  2.4021431]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.839968 ,  0.       ], [-1.00234  ,  2.4142446]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.8307451,  0.       ], [-0.9973157,  2.4021431]])

        # Using more particles more faithfully approximates the mean and covariance of the normal distribution:
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100000)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100000,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[ 9.9863195], [ 100.0145412]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.9901799], [ 9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902198], [ 9.0391229]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.9901799, -3.0120428], [-3.0120428,  9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902198, -3.0120729], [-3.0120729,  9.0391229]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.9901799, -3.0120428], [-3.0120428,  9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.9975435,  0.       ], [-1.5078735,  2.6010287]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975535,  0.       ], [-1.507881 ,  2.6010417]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.9975435,  0.       ], [-1.5078735,  2.6010287]])
Exemple #10
0
def run(observable,
        obss=None,
        times=None,
        obs_covs=None,
        true_values=None,
        df=None,
        fun=None,
        return_df=False):
    if df is not None:
        if obss is not None and (checks.is_string(obss)
                                 or checks.is_int(obss)):
            obss = df[obss]

        if times is None:
            if isinstance(obss, pd.Series): times = obss.index.values
        elif (checks.is_string(times) or checks.is_int(times)):
            times = df[times].values

        if isinstance(obss, pd.Series): obss = obss.values

        if obs_covs is not None and (checks.is_string(obs_covs)
                                     or checks.is_int(obs_covs)):
            obs_covs = df[obs_covs].values

        if true_values is not None and (checks.is_string(true_values)
                                        or checks.is_int(true_values)):
            true_values = df[true_values].values

    checks.check_not_none(obss)

    if not checks.is_iterable_not_string(observable):
        observable = utils.xconst(observable)
    if not checks.is_iterable_not_string(obss): obss = [obss]
    if not checks.is_iterable_not_string(times): times = utils.xconst(times)
    if not checks.is_iterable_not_string(obs_covs):
        obs_covs = utils.xconst(obs_covs)
    if not checks.is_iterable_not_string(true_values):
        true_values = utils.xconst(true_values)

    obs_result = None
    cumulative_log_likelihood = 0.

    if return_df:
        time = []
        filter_name = []
        filter_type = []
        observable_name = []
        accepted = []
        obs_mean = []
        obs_cov = []
        predicted_obs_mean = []
        predicted_obs_cov = []
        cross_cov = []
        innov_mean = []
        innov_cov = []
        prior_state_mean = []
        prior_state_cov = []
        posterior_state_mean = []
        posterior_state_cov = []
        true_value = []
        log_likelihood = []
        gain = []

    last_time = None

    for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip(
            observable, obss, times, obs_covs, true_values):
        if a_time is None:
            if last_time is None: a_time = 0
            else: a_time = last_time + 1
        last_time = a_time

        if checks.is_callable(an_observable):
            an_observable = an_observable(an_obs)
        if fun is not None: an_obs = fun(an_obs)
        if an_obs_cov is not None:
            if isinstance(an_obs, (Obs, distrs.Distr)):
                raise ValueError(
                    'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments'
                )
            an_obs = distrs.NormalDistr(an_obs, an_obs_cov)

        if return_df and len(time) == 0:
            an_initial_state_mean = an_observable.filter.state.state_distr.mean
            an_initial_state_cov = an_observable.filter.state.state_distr.cov
            time.append(an_observable.filter.time)
            filter_name.append(an_observable.filter.name)
            filter_type.append(type(an_observable.filter))
            observable_name.append(None)
            accepted.append(None)
            obs_mean.append(None)
            obs_cov.append(None)
            predicted_obs_mean.append(None)
            predicted_obs_cov.append(None)
            cross_cov.append(None)
            innov_mean.append(None)
            innov_cov.append(None)
            prior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            true_value.append(None)
            log_likelihood.append(None)
            gain.append(None)

        if isinstance(an_obs, Obs):
            a_time, _ = _time_and_obs_distr(an_obs, a_time,
                                            an_observable.filter.time)

        predicted_obs = an_observable.predict(time=a_time,
                                              true_value=a_true_value)
        a_prior_state_mean = an_observable.filter.state.state_distr.mean
        a_prior_state_cov = an_observable.filter.state.state_distr.cov
        obs_result = an_observable.observe(obs=an_obs,
                                           time=a_time,
                                           true_value=a_true_value,
                                           predicted_obs=predicted_obs)
        if obs_result.accepted:
            cumulative_log_likelihood += obs_result.log_likelihood
        a_posterior_state_mean = an_observable.filter.state.state_distr.mean
        a_posterior_state_cov = an_observable.filter.state.state_distr.cov

        if return_df:
            time.append(obs_result.obs.time)
            filter_name.append(an_observable.filter.name)
            filter_type.append(type(an_observable.filter))
            observable_name.append(an_observable.name)
            accepted.append(obs_result.accepted)
            obs_mean.append(
                npu.to_scalar(obs_result.obs.distr.mean,
                              raise_value_error=False))
            obs_cov.append(
                npu.to_scalar(obs_result.obs.distr.cov,
                              raise_value_error=False))
            predicted_obs_mean.append(
                npu.to_scalar(obs_result.predicted_obs.distr.mean,
                              raise_value_error=False))
            predicted_obs_cov.append(
                npu.to_scalar(obs_result.predicted_obs.distr.cov,
                              raise_value_error=False))
            cross_cov.append(
                npu.to_scalar(obs_result.predicted_obs.cross_cov,
                              raise_value_error=False))
            innov_mean.append(
                npu.to_scalar(obs_result.innov_distr.mean,
                              raise_value_error=False))
            innov_cov.append(
                npu.to_scalar(obs_result.innov_distr.cov,
                              raise_value_error=False))
            prior_state_mean.append(
                npu.to_scalar(a_prior_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(a_prior_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(a_posterior_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(a_posterior_state_cov, raise_value_error=False))
            true_value.append(
                npu.to_scalar(a_true_value, raise_value_error=False))
            log_likelihood.append(
                npu.to_scalar(obs_result.log_likelihood,
                              raise_value_error=False))
            gain.append(
                obs_result.gain if hasattr(obs_result, 'gain') else None)

    df = None
    if return_df:
        df = pd.DataFrame(
            {
                'time': time,
                'filter_name': filter_name,
                'filter_type': filter_type,
                'observable_name': observable_name,
                'accepted': accepted,
                'obs_mean': obs_mean,
                'obs_cov': obs_cov,
                'predicted_obs_mean': predicted_obs_mean,
                'predicted_obs_cov': predicted_obs_cov,
                'cross_cov': cross_cov,
                'innov_mean': innov_mean,
                'innov_cov': innov_cov,
                'prior_state_mean': prior_state_mean,
                'prior_state_cov': prior_state_cov,
                'posterior_state_mean': prior_state_mean,
                'posterior_state_cov': prior_state_cov,
                'true_value': true_value,
                'log_likelihood': log_likelihood,
                'gain': gain
            },
            columns=('time', 'filter_name', 'filter_type', 'observable_name',
                     'accepted', 'obs_mean', 'obs_cov', 'predicted_obs_mean',
                     'predicted_obs_cov', 'cross_cov', 'innov_mean',
                     'innov_cov', 'prior_state_mean', 'prior_state_cov',
                     'posterior_state_mean', 'posterior_state_cov',
                     'true_value', 'log_likelihood', 'gain'))

    return FilterRunResult(obs_result, cumulative_log_likelihood, df)
Exemple #11
0
 def _propagate_distr_impl(self, time_delta, distr0):
     mean = distr0.mean + self._mean * time_delta
     cov = distr0.cov + time_delta * self._cov
     return distrs.NormalDistr(mean=mean, cov=cov)
Exemple #12
0
def run(observable,
        obss=None,
        times=None,
        obs_covs=None,
        true_values=None,
        df=None,
        fun=None,
        return_df=False):
    if df is not None:
        if obss is not None and checks.is_string(obss):
            obss = df[obss].values
        if times is not None and checks.is_string(times):
            times = df[times].values
        if obs_covs is not None and checks.is_string(obs_covs):
            obs_covs = df[obs_covs].values
        if true_values is not None and checks.is_string(true_values):
            true_values = df[true_values].values

    checks.check_not_none(obss)

    if not checks.is_iterable_not_string(observable):
        observable = utils.xconst(observable)
    if not checks.is_iterable_not_string(obss): obss = [obss]
    if not checks.is_iterable_not_string(times): times = utils.xconst(times)
    if not checks.is_iterable_not_string(obs_covs):
        obs_covs = utils.xconst(obs_covs)
    if not checks.is_iterable_not_string(true_values):
        true_values = utils.xconst(true_values)

    obs_result = None

    if return_df:
        time = []
        accepted = []
        obs_mean = []
        obs_cov = []
        predicted_obs_mean = []
        predicted_obs_cov = []
        innov_mean = []
        innov_cov = []
        prior_state_mean = []
        prior_state_cov = []
        posterior_state_mean = []
        posterior_state_cov = []
        log_likelihood = []

    for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip(
            observable, obss, times, obs_covs, true_values):
        if checks.is_callable(an_observable):
            an_observable = an_observable(an_obs)
        if fun is not None: an_obs = fun(an_obs)
        if an_obs_cov is not None:
            if isinstance(an_obs, (Obs, distrs.Distr)):
                raise ValueError(
                    'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments'
                )
            an_obs = distrs.NormalDistr(an_obs, an_obs_cov)

        if return_df and len(time) == 0:
            an_initial_state_mean = an_observable.filter.state.state_distr.mean
            an_initial_state_cov = an_observable.filter.state.state_distr.cov
            time.append(an_observable.filter.time)
            accepted.append(None)
            obs_mean.append(None)
            obs_cov.append(None)
            predicted_obs_mean.append(None)
            predicted_obs_cov.append(None)
            innov_mean.append(None)
            innov_cov.append(None)
            prior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(an_initial_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(an_initial_state_cov, raise_value_error=False))
            log_likelihood.append(None)

        if isinstance(an_obs, Obs):
            a_time, _ = _time_and_obs_distr(an_obs, a_time,
                                            an_observable.filter.time)

        predicted_obs = an_observable.predict(time=a_time,
                                              true_value=a_true_value)
        a_prior_state_mean = an_observable.filter.state.state_distr.mean
        a_prior_state_cov = an_observable.filter.state.state_distr.cov
        obs_result = an_observable.observe(obs=an_obs,
                                           time=a_time,
                                           true_value=a_true_value,
                                           predicted_obs=predicted_obs)
        a_posterior_state_mean = an_observable.filter.state.state_distr.mean
        a_posterior_state_cov = an_observable.filter.state.state_distr.cov

        if return_df:
            time.append(obs_result.obs.time)
            accepted.append(obs_result.accepted)
            obs_mean.append(
                npu.to_scalar(obs_result.obs.distr.mean,
                              raise_value_error=False))
            obs_cov.append(
                npu.to_scalar(obs_result.obs.distr.cov,
                              raise_value_error=False))
            predicted_obs_mean.append(
                npu.to_scalar(obs_result.predicted_obs.distr.mean,
                              raise_value_error=False))
            predicted_obs_cov.append(
                npu.to_scalar(obs_result.predicted_obs.distr.cov,
                              raise_value_error=False))
            innov_mean.append(
                npu.to_scalar(obs_result.innov_distr.mean,
                              raise_value_error=False))
            innov_cov.append(
                npu.to_scalar(obs_result.innov_distr.cov,
                              raise_value_error=False))
            prior_state_mean.append(
                npu.to_scalar(a_prior_state_mean, raise_value_error=False))
            prior_state_cov.append(
                npu.to_scalar(a_prior_state_cov, raise_value_error=False))
            posterior_state_mean.append(
                npu.to_scalar(a_posterior_state_mean, raise_value_error=False))
            posterior_state_cov.append(
                npu.to_scalar(a_posterior_state_cov, raise_value_error=False))
            log_likelihood.append(
                npu.to_scalar(obs_result.log_likelihood,
                              raise_value_error=False))

    if return_df:
        return pd.DataFrame(
            {
                'time': time,
                'accepted': accepted,
                'obs_mean': obs_mean,
                'obs_cov': obs_cov,
                'predicted_obs_mean': predicted_obs_mean,
                'predicted_obs_cov': predicted_obs_cov,
                'innov_mean': innov_mean,
                'innov_cov': innov_cov,
                'prior_state_mean': prior_state_mean,
                'prior_state_cov': prior_state_cov,
                'posterior_state_mean': prior_state_mean,
                'posterior_state_cov': prior_state_cov,
                'log_likelihood': log_likelihood
            },
            columns=('time', 'accepted', 'obs_mean', 'obs_cov',
                     'predicted_obs_mean', 'predicted_obs_cov', 'innov_mean',
                     'innov_cov', 'prior_state_mean', 'prior_state_cov',
                     'posterior_state_mean', 'posterior_state_cov',
                     'log_likelihood'))

    return obs_result