예제 #1
0
    def test_wide_sense_distr(self):
        std_wide_sense_1d = distrs.WideSenseDistr(dim=1)
        npt.assert_almost_equal(std_wide_sense_1d.mean, 0.)
        npt.assert_almost_equal(std_wide_sense_1d.cov, 1.)
        npt.assert_almost_equal(std_wide_sense_1d.vol, 1.)
        
        with self.assertRaises(NotImplementedError):
            std_wide_sense_1d.sample()
        
        std_wide_sense_2d = distrs.WideSenseDistr(dim=2)
        npt.assert_almost_equal(std_wide_sense_2d.mean, npu.col_of(2, 0.))
        npt.assert_almost_equal(std_wide_sense_2d.cov, np.eye(2))
        npt.assert_almost_equal(std_wide_sense_2d.vol, np.eye(2))
        
        with self.assertRaises(NotImplementedError):
            std_wide_sense_2d.sample()
        
        sd1=3.; sd2=4.; cor=-.5

        wide_sense_2d = distrs.WideSenseDistr(mean=[1., 2.], cov=stats.make_cov_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(wide_sense_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(wide_sense_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(wide_sense_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])
        
        with self.assertRaises(NotImplementedError):
            wide_sense_2d.sample()

        wide_sense_2d = distrs.WideSenseDistr(mean=[1., 2.], vol=stats.make_vol_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(wide_sense_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(wide_sense_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(wide_sense_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])
        
        with self.assertRaises(NotImplementedError):
            wide_sense_2d.sample()
예제 #2
0
    def test_euler_maruyama(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)

        W = proc.WienerProcess.create_2d(mean1=-.5, mean2=3., sd1=3., sd2=4., cor=.5)
        em = sim.EulerMaruyama(process=W)
        
        t, v = next(em)
        npt.assert_almost_equal(t, 0.)
        npt.assert_almost_equal(v, npu.col(0.0, 0.0))

        t, v = next(em)
        npt.assert_almost_equal(t, 1.)
        npt.assert_almost_equal(v, npu.col(0.9901425, 3.5144667))
예제 #3
0
 def testnrow(self):
     r = npu.row(429., 5., 2., 14.)
     self.assertEqual(npu.nrow(r), 1)
     c = npu.col(429., 5., 2., 14.)
     self.assertEqual(npu.nrow(c), 4)
     m = npu.matrixof(3, 5, 0.)
     self.assertEqual(npu.nrow(m), 3)
예제 #4
0
 def test_ncol(self):
     r = npu.row(429., 5., 2., 14.)
     self.assertEqual(npu.ncol(r), 4)
     c = npu.col(429., 5., 2., 14.)
     self.assertEqual(npu.ncol(c), 1)
     m = npu.matrix_of(3, 5, 0.)
     self.assertEqual(npu.ncol(m), 5)
예제 #5
0
 def _propagate_distr_impl(self, distr0, time_delta, assume_distr=False):
     if not isinstance(distr0, distrs.LogNormalDistr) and not assume_distr:
         raise ValueError('Do not know how to propagate a distribution that is not log-normal')
     # Note: the sum of two independent log-normal distributions is only approximately log-normal
     mean = np.log(distr0.mean) + (self._pct_drift - .5 * npu.col([self._pct_cov[i, i] for i in range(self.process_dim)])) * time_delta
     cov = distr0.cov + time_delta * self._pct_cov
     return distrs.LogNormalDistr(mean_of_log=mean, cov_of_log=cov)
예제 #6
0
 def test_vec_and_unvec(self):
     a = np.array([[  5., 1.,   14., 2., 42.],
                   [132., 2.,  429., 1.,  1.],
                   [  1., 2., 1430., 2.,  2.]])
     b = npu.col(5., 132., 1., 1., 2., 2., 14., 429., 1430., 2., 1., 2., 42., 1., 2.)
     npt.assert_almost_equal(npu.vec(a), b)
     npt.assert_almost_equal(npu.unvec(b, 3), a)
예제 #7
0
파일: processes.py 프로젝트: cav71/tsa
 def propagate(self, time, variate, time0, value0, state0=None):
     if time == time0: return npu.to_ndim_2(value0, ndim_1_to_col=True, copy=True)
     value0 = npu.to_ndim_2(value0, ndim_1_to_col=True, copy=False)
     variate = npu.to_ndim_2(variate, ndim_1_to_col=True, copy=False)
     time_delta = time - time0
     return value0 * np.exp(
             (self._pct_drift - .5 * npu.col(*np.sum(self._pct_vol**2, axis=1))) * time_delta + \
             np.dot(self._pct_vol, np.sqrt(time_delta) * variate))
예제 #8
0
파일: distrs.py 프로젝트: vishalbelsare/tsa
    def __init__(self, mean_of_log=None, cov_of_log=None, vol_of_log=None, dim=None, copy=True):
        if mean_of_log is not None and dim is not None and np.size(mean_of_log) == 1:
            mean_of_log = npu.col_of(dim, npu.to_scalar(mean_of_log))
        
        if mean_of_log is None and vol_of_log is None and cov_of_log is None:
            self._dim = 1 if dim is None else dim
            mean_of_log = npu.col_of(self._dim, 0.)
            cov_of_log = np.eye(self._dim)
            vol_of_log = np.eye(self._dim)
            
        self._dim, self._mean_of_log, self._vol_of_log, self._cov_of_log = None, None, None, None
        
        # TODO We don't currently check whether cov_of_log and vol_of_log are consistent, i.e. that cov_of_log = np.dot(vol_of_log, vol_of_log.T) -- should we?
        
        if mean_of_log is not None:
            self._mean_of_log = npu.to_ndim_2(mean_of_log, ndim_1_to_col=True, copy=copy)
            self._dim = npu.nrow(self._mean_of_log)
        if cov_of_log is not None:
            self._cov_of_log = npu.to_ndim_2(cov_of_log, ndim_1_to_col=True, copy=copy)
            self._dim = npu.nrow(self._cov_of_log)
        if vol_of_log is not None:
            self._vol_of_log = npu.to_ndim_2(vol_of_log, ndim_1_to_col=True, copy=copy)
            self._dim = npu.nrow(self._vol_of_log)
        
        if self._mean_of_log is None: self._mean_of_log = npu.col_of(self._dim, 0.)
        if self._cov_of_log is None and self._vol_of_log is None:
            self._cov_of_log = np.eye(self._dim)
            self._vol_of_log = np.eye(self._dim)
        npc.check_col(self._mean_of_log)
        npc.check_nrow(self._mean_of_log, self._dim)
        if self._cov_of_log is not None:
            npc.check_nrow(self._cov_of_log, self._dim)
            npc.check_square(self._cov_of_log)
        if self._vol_of_log is not None:
            npc.check_nrow(self._vol_of_log, self._dim)

        if self._cov_of_log is None: self._cov_of_log = stats.vol_to_cov(self._vol_of_log)
        if self._vol_of_log is None: self._vol_of_log = stats.cov_to_vol(self._cov_of_log)
            
        npu.make_immutable(self._mean_of_log)
        npu.make_immutable(self._cov_of_log)
        npu.make_immutable(self._vol_of_log)

        mean = np.exp(self._mean_of_log + .5 * npu.col(*[self._cov_of_log[i,i] for i in range(self._dim)]))
        cov = np.array([[np.exp(self._mean_of_log[i,0] + self._mean_of_log[j,0] + .5 * (self._cov_of_log[i,i] + self._cov_of_log[j,j])) * (np.exp(self._cov_of_log[i,j]) - 1.) for j in range(self._dim)] for i in range(self._dim)])
        vol = stats.cov_to_vol(cov)
        
        self._to_string_helper_LogNormalDistr = None
        self._str_LogNormalDistr = None
        
        super().__init__(mean, cov, vol, self._dim, copy)
예제 #9
0
파일: testdistrs.py 프로젝트: ericjohn/tsa
    def testMultivariateNormalDistr(self):
        stdnormal1d = distrs.NormalDistr(dim=1)
        npt.assert_almost_equal(stdnormal1d.mean, 0.)
        npt.assert_almost_equal(stdnormal1d.cov, 1.)
        npt.assert_almost_equal(stdnormal1d.vol, 1.)

        stdnormal1d = distrs.NormalDistr(dim=2)
        npt.assert_almost_equal(stdnormal1d.mean, npu.colof(2, 0.))
        npt.assert_almost_equal(stdnormal1d.cov, np.eye(2))
        npt.assert_almost_equal(stdnormal1d.vol, np.eye(2))

        sd1 = 3.
        sd2 = 4.
        cor = -.5

        normal2d = distrs.NormalDistr(mean=[1., 2.],
                                      cov=distrs.NormalDistr.makecov2d(
                                          sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(
            normal2d.cov,
            [[sd1 * sd1, cor * sd1 * sd2], [cor * sd1 * sd2, sd2 * sd2]])
        npt.assert_almost_equal(
            normal2d.vol,
            [[sd1, 0.], [cor * sd2, np.sqrt(1. - cor * cor) * sd2]])

        normal2d = distrs.NormalDistr(mean=[1., 2.],
                                      vol=distrs.NormalDistr.makevol2d(
                                          sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(
            normal2d.cov,
            [[sd1 * sd1, cor * sd1 * sd2], [cor * sd1 * sd2, sd2 * sd2]])
        npt.assert_almost_equal(
            normal2d.vol,
            [[sd1, 0.], [cor * sd2, np.sqrt(1. - cor * cor) * sd2]])
예제 #10
0
 def propagate(self, time0, value0, time, variate=None, state0=None, random_state=None):
     if time == time0: return npu.to_ndim_2(value0, ndim_1_to_col=True, copy=True)
     value0 = npu.to_ndim_2(value0, ndim_1_to_col=True, copy=False)
     if variate is None:
         if random_state is None: random_state = rnd.random_state()
         variate = random_state.normal(size=self.noise_dim)
     variate = npu.to_ndim_2(variate, ndim_1_to_col=True, copy=False)
     time_delta = time - time0
     if isinstance(time_delta, np.timedelta64):
         time_delta = time_delta.item()
     if isinstance(time_delta, dt.timedelta):
         time_delta = time_delta.total_seconds() / self._time_unit.total_seconds()
     return value0 * np.exp(
             (self._pct_drift - .5 * npu.col(*np.sum(self._pct_vol**2, axis=1))) * time_delta + \
             np.dot(self._pct_vol, np.sqrt(time_delta) * variate))
예제 #11
0
    def test_vectorized(self):
        func_call_count = 0

        def func(a, b):
            nonlocal func_call_count
            func_call_count += 1
            return a + b
        
        funcv_call_count = 0
        
        @vectorized
        def funcv(a, b):
            nonlocal funcv_call_count
            funcv_call_count += 1
            return a + b
        
        def solver(a, b, f):
            if npu.is_vectorized(f):
                return f(a, b)
            else:
                rc = np.shape(a)[0]
                r = np.empty((rc, 1))
                for i in range(rc):
                    r[i] = f(a[i], b[i])
                return r
            
        self.assertFalse(npu.is_vectorized(func))
        self.assertTrue(npu.is_vectorized(funcv))
        a = npu.col(14., 2., 429.)
        b = npu.col(42., 1., 5.)
        r = solver(a, b, func)
        npt.assert_almost_equal(r, np.array([[56.], [3.], [434.]]))
        self.assertEqual(func_call_count, 3)
        r = solver(a, b, funcv)
        npt.assert_almost_equal(r, np.array([[56.], [3.], [434.]]))
        self.assertEqual(funcv_call_count, 1)
예제 #12
0
    def test_multinomial_resample(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)
        
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100000)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100000,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]])
        
        rnd.random_state(np.random.RandomState(seed=43), force=True)

        resampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d)
        self.assertEqual(resampled_approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(resampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(resampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[1]))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(resampled_approx_normal_empirical_2d.weight_sum, 100000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        
        rnd.random_state(np.random.RandomState(seed=43), force=True)
        
        resampled_approx_normal_empirical_2d_particles = approx_normal_empirical_2d.sample(size=100000)
        npt.assert_almost_equal(resampled_approx_normal_empirical_2d_particles, resampled_approx_normal_empirical_2d.particles)

        subsampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d, target_particle_count=40000)
        self.assertEqual(subsampled_approx_normal_empirical_2d.particle_count, 40000)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.effective_particle_count, 40000.)
        self.assertEqual(subsampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(subsampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[1]))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((40000,))))
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((40000,))) / 40000.)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.normalised_weight(0), .000025)
        self.assertEqual(subsampled_approx_normal_empirical_2d.weight_sum, 40000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(subsampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)

        supersampled_approx_normal_empirical_2d = distrs.multinomial_resample(approx_normal_empirical_2d, target_particle_count=300000)
        self.assertEqual(supersampled_approx_normal_empirical_2d.particle_count, 300000)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.effective_particle_count, 300000.)
        self.assertEqual(supersampled_approx_normal_empirical_2d.dim, 2)
        # The resampled particles should ("almost certainly") be different from the original ones:
        self.assertFalse(np.sum(supersampled_approx_normal_empirical_2d.particles) == np.sum(particles))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.weights, npu.col(*np.ones((300000,))))
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((300000,))) / 300000.)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.normalised_weight(0), 3.3333333333333333e-06)
        self.assertEqual(supersampled_approx_normal_empirical_2d.weight_sum, 300000.)
        # But the stats should be pretty close to those of the original empirical distribution, though not to seven
        # decimal places:
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.mean, [[   9.9866994], [ 100.0141095]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.var_n, [[ 3.9902435], [ 9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902834], [ 9.036362 ]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov_n, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902834, -3.0112521], [-3.0112521,  9.036362 ]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.cov, [[ 3.9902435, -3.011222 ], [-3.011222 ,  9.0362717]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol_n, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975694,  0.       ], [-1.5074581,  2.6007561]], decimal=1)
        npt.assert_almost_equal(supersampled_approx_normal_empirical_2d.vol, [[ 1.9975594,  0.       ], [-1.5074505,  2.6007431]], decimal=1)
예제 #13
0
    def test_normal_distr(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)

        std_normal_1d = distrs.NormalDistr(dim=1)
        npt.assert_almost_equal(std_normal_1d.mean, 0.)
        npt.assert_almost_equal(std_normal_1d.cov, 1.)
        npt.assert_almost_equal(std_normal_1d.vol, 1.)
        
        sample = std_normal_1d.sample()
        self.assertEqual(np.shape(sample), (1, 1))
        npt.assert_almost_equal(sample, [[ 0.49671415]])
        
        sample = std_normal_1d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 1))
        npt.assert_almost_equal(sample, [
                [-0.1382643 ],
                [ 0.64768854],
                [ 1.52302986],
                [-0.23415337],
                [-0.23413696],
                [ 1.57921282],
                [ 0.76743473],
                [-0.46947439],
                [ 0.54256004],
                [-0.46341769]])
        
        std_normal_2d = distrs.NormalDistr(dim=2)
        npt.assert_almost_equal(std_normal_2d.mean, npu.col_of(2, 0.))
        npt.assert_almost_equal(std_normal_2d.cov, np.eye(2))
        npt.assert_almost_equal(std_normal_2d.vol, np.eye(2))
        
        sample = std_normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [-0.46572975,  0.24196227],
                [-1.91328024, -1.72491783],
                [-0.56228753, -1.01283112],
                [ 0.31424733, -0.90802408],
                [-1.4123037 ,  1.46564877],
                [-0.2257763 ,  0.0675282 ],
                [-1.42474819, -0.54438272],
                [ 0.11092259, -1.15099358],
                [ 0.37569802, -0.60063869],
                [-0.29169375, -0.60170661]])

        sd1=3.; sd2=4.; cor=-.5

        normal_2d = distrs.NormalDistr(mean=[1., 2.], cov=stats.make_cov_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(normal_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(normal_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])

        sample = normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [-3.09581812,  9.06710684],
                [ 5.00400357, -1.07912958],
                [ 4.10821238, -2.42324481],
                [ 2.58989516, -7.05256838],
                [ 2.07671635,  3.61955714],
                [ 0.38728403,  2.5195548 ],
                [-1.36010204, -0.88681309],
                [ 1.63968707, -1.29329703],
                [-0.61960168,  6.44566548],
                [ 5.53451941, -4.36131646]])

        normal_2d = distrs.NormalDistr(mean=[1., 2.], vol=stats.make_vol_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(normal_2d.mean, npu.col(1., 2.))
        npt.assert_almost_equal(normal_2d.cov, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(normal_2d.vol, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])

        sample = normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [ 0.4624506 , -0.26705979],
                [ 1.76344545,  5.54913479],
                [-2.76038957,  4.57609973],
                [ 2.35608833,  1.20642031],
                [-2.1218454 ,  5.16796697],
                [-0.85307657, -0.00850715],
                [ 5.28771297, -1.62048489],
                [-2.12592264,  7.1016208 ],
                [-0.46508111,  6.26189296],
                [ 3.15543223, -0.04269231]])
예제 #14
0
    def test_empirical_distr(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)
        
        trivial_empirical_1d = distrs.EmpiricalDistr(particles=[[0.]], weights=[1.])
        self.assertEqual(trivial_empirical_1d.particle_count, 1)
        npt.assert_almost_equal(trivial_empirical_1d.effective_particle_count, 1.)
        self.assertEqual(trivial_empirical_1d.dim, 1)
        npt.assert_almost_equal(trivial_empirical_1d.particles, np.array([[0.]]))
        npt.assert_almost_equal(trivial_empirical_1d.particle(0), np.array([[0.]]))
        npt.assert_almost_equal(trivial_empirical_1d.weights, np.array([[1.]]))
        npt.assert_almost_equal(trivial_empirical_1d.weight(0), 1.)
        npt.assert_almost_equal(trivial_empirical_1d.normalised_weights, np.array([[1.]]))
        npt.assert_almost_equal(trivial_empirical_1d.normalised_weight(0), 1.)
        self.assertEqual(trivial_empirical_1d.weight_sum, 1.)
        self.assertEqual(trivial_empirical_1d.mean, 0.)
        self.assertEqual(trivial_empirical_1d.var_n, 0.)
        npt.assert_almost_equal(trivial_empirical_1d.var_n_minus_1, np.nan)
        self.assertEqual(trivial_empirical_1d.var, 0.)
        self.assertEqual(trivial_empirical_1d.cov_n, 0.)
        npt.assert_almost_equal(trivial_empirical_1d.cov_n_minus_1, np.nan)
        self.assertEqual(trivial_empirical_1d.cov, 0.)
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol_n
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol_n_minus_1
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            trivial_empirical_1d.vol

        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(simple_empirical_1d.particle(0), np.array([[-1.]]))
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))
        npt.assert_almost_equal(simple_empirical_1d.weight(0), .5)
        npt.assert_almost_equal(simple_empirical_1d.normalised_weights, np.array([[.5], [.5]]))
        npt.assert_almost_equal(simple_empirical_1d.normalised_weight(0), .5)
        self.assertEqual(simple_empirical_1d.weight_sum, 1.)
        self.assertEqual(simple_empirical_1d.mean, 0.)
        self.assertEqual(simple_empirical_1d.var_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        npt.assert_almost_equal(simple_empirical_1d.var_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.cov_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        npt.assert_almost_equal(simple_empirical_1d.cov_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.cov, 1.)
        self.assertEqual(simple_empirical_1d.vol_n, 1.)
        # "n minus 1" (unbiased) stats don't make sense as we are not using "repeat"-type weights, meaning that each
        # weight represents the number of occurrences of one observation:
        self.assertEqual(simple_empirical_1d.vol_n_minus_1, np.inf)
        self.assertEqual(simple_empirical_1d.vol, 1.)

        # The weights can be specified as a one-dimensional array...
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        # ...but they come back as a (two-dimensional) column vector:
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # ...alternatively, the weights can be specified as a (two-dimensional) column vector:
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[[.5], [.5]])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        # ...they always come back as a (two-dimensional) column vector:
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # If the particles are specified as a one-dimensional array, they are interpreted as...
        simple_empirical_1d = distrs.EmpiricalDistr(particles=[-1., 1.], weights=[.5, .5])
        self.assertEqual(simple_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(simple_empirical_1d.effective_particle_count, 2.)
        self.assertEqual(simple_empirical_1d.dim, 1)
        # ...multiple one-dimensional particles (each row corresponds to a particle, each column to a dimension):
        npt.assert_almost_equal(simple_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(simple_empirical_1d.weights, np.array([[.5], [.5]]))

        # Now we shall be using "repeat"-type weights:
        repeat_empirical_1d = distrs.EmpiricalDistr(particles=[[-1.], [1.]], weights=[2., 1.])
        self.assertEqual(repeat_empirical_1d.particle_count, 2)
        npt.assert_almost_equal(repeat_empirical_1d.effective_particle_count, 1.7999999999999998)
        self.assertEqual(repeat_empirical_1d.dim, 1)
        npt.assert_almost_equal(repeat_empirical_1d.particles, np.array([[-1.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.particle(0), np.array([[-1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.weights, np.array([[2.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_1d.weight(0), 2.)
        npt.assert_almost_equal(repeat_empirical_1d.normalised_weights, np.array([[ 0.6666667], [ 0.3333333]]))
        npt.assert_almost_equal(repeat_empirical_1d.normalised_weight(0), 0.6666667)
        self.assertEqual(repeat_empirical_1d.weight_sum, 3.)
        npt.assert_almost_equal(repeat_empirical_1d.mean, -0.33333333)
        npt.assert_almost_equal(repeat_empirical_1d.var_n, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.var_n_minus_1, 1.3333333)
        npt.assert_almost_equal(repeat_empirical_1d.cov_n, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.cov_n_minus_1, 1.3333333)
        npt.assert_almost_equal(repeat_empirical_1d.cov, 0.88888889)
        npt.assert_almost_equal(repeat_empirical_1d.vol_n, 0.94280904)
        npt.assert_almost_equal(repeat_empirical_1d.vol_n_minus_1, 1.15470054)
        npt.assert_almost_equal(repeat_empirical_1d.vol, 0.94280904)

        # Now we shall be using "repeat"-type weights. There are three two-dimensional particles:
        repeat_empirical_2d = distrs.EmpiricalDistr(particles=[[-2., 2.], [0., 0.], [1., -1.]], weights=[2., 1., 1.])
        self.assertEqual(repeat_empirical_2d.particle_count, 3)
        npt.assert_almost_equal(repeat_empirical_2d.effective_particle_count, 2.6666666666666665)
        self.assertEqual(repeat_empirical_2d.dim, 2)
        npt.assert_almost_equal(repeat_empirical_2d.particles, np.array([[-2., 2.], [0., 0.], [1., -1.]]))
        npt.assert_almost_equal(repeat_empirical_2d.particle(0), np.array([[-2.], [2.]]))
        npt.assert_almost_equal(repeat_empirical_2d.weights, np.array([[2.], [1.], [1.]]))
        npt.assert_almost_equal(repeat_empirical_2d.weight(0), 2.)
        npt.assert_almost_equal(repeat_empirical_2d.normalised_weights, np.array([[ 0.5 ], [ 0.25], [ 0.25]]))
        npt.assert_almost_equal(repeat_empirical_2d.normalised_weight(0), .5)
        self.assertEqual(repeat_empirical_2d.weight_sum, 4.)
        npt.assert_almost_equal(repeat_empirical_2d.mean, [[-0.75], [ 0.75]])
        npt.assert_almost_equal(repeat_empirical_2d.var_n, [[ 1.6875], [ 1.6875]])
        npt.assert_almost_equal(repeat_empirical_2d.var_n_minus_1, [[ 2.25], [ 2.25]])
        npt.assert_almost_equal(repeat_empirical_2d.cov_n, [[ 1.6875, -1.6875], [-1.6875,  1.6875]])
        npt.assert_almost_equal(repeat_empirical_2d.cov_n_minus_1, [[ 2.25, -2.25], [-2.25,  2.25]])
        npt.assert_almost_equal(repeat_empirical_2d.cov, [[ 1.6875, -1.6875], [-1.6875,  1.6875]])
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol_n
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol_n_minus_1
        with self.assertRaises(np.linalg.LinAlgError):  # Matrix is not positive definite
            repeat_empirical_2d.vol
        
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100,))) / 100.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .01)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[ 10.2077457], [ 99.6856645]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.3516275], [ 6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.3854823], [ 6.8332624]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.3516275, -1.8258307], [-1.8258307,  6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.3854823, -1.8442735], [-1.8442735,  6.8332624]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.3516275, -1.8258307], [-1.8258307,  6.7649298]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.8307451,  0.       ], [-0.9973157,  2.4021431]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.839968 ,  0.       ], [-1.00234  ,  2.4142446]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.8307451,  0.       ], [-0.9973157,  2.4021431]])

        # Using more particles more faithfully approximates the mean and covariance of the normal distribution:
        normal_distr = distrs.NormalDistr(mean=[10., 100.], cov=[[4., -3.], [-3., 9.]])
        particles = normal_distr.sample(size=100000)
        approx_normal_empirical_2d = distrs.EmpiricalDistr(particles=particles, weights=np.ones((100000,)))
        self.assertEqual(approx_normal_empirical_2d.particle_count, 100000)
        npt.assert_almost_equal(approx_normal_empirical_2d.effective_particle_count, 100000.)
        self.assertEqual(approx_normal_empirical_2d.dim, 2)
        npt.assert_almost_equal(approx_normal_empirical_2d.particles, particles)
        npt.assert_almost_equal(approx_normal_empirical_2d.particle(0), npu.col(*particles[0]))
        npt.assert_almost_equal(approx_normal_empirical_2d.weights, npu.col(*np.ones((100000,))))
        npt.assert_almost_equal(approx_normal_empirical_2d.weight(0), 1.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weights, npu.col(*np.ones((100000,))) / 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.normalised_weight(0), .00001)
        self.assertEqual(approx_normal_empirical_2d.weight_sum, 100000.)
        npt.assert_almost_equal(approx_normal_empirical_2d.mean, [[ 9.9863195], [ 100.0145412]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n, [[ 3.9901799], [ 9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.var_n_minus_1, [[ 3.9902198], [ 9.0391229]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n, [[ 3.9901799, -3.0120428], [-3.0120428,  9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov_n_minus_1, [[ 3.9902198, -3.0120729], [-3.0120729,  9.0391229]])
        npt.assert_almost_equal(approx_normal_empirical_2d.cov, [[ 3.9901799, -3.0120428], [-3.0120428,  9.0390325]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n, [[ 1.9975435,  0.       ], [-1.5078735,  2.6010287]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol_n_minus_1, [[ 1.9975535,  0.       ], [-1.507881 ,  2.6010417]])
        npt.assert_almost_equal(approx_normal_empirical_2d.vol, [[ 1.9975435,  0.       ], [-1.5078735,  2.6010287]])
예제 #15
0
    def test_log_normal_distr(self):
        rnd.random_state(np.random.RandomState(seed=42), force=True)

        std_log_normal_1d = distrs.LogNormalDistr(dim=1)
        npt.assert_almost_equal(std_log_normal_1d.mean, [[ 1.6487213]])
        npt.assert_almost_equal(std_log_normal_1d.cov, [[ 4.6707743]])
        npt.assert_almost_equal(std_log_normal_1d.vol, [[ 2.1611974]])
        
        sample = std_log_normal_1d.sample(size=1)
        self.assertEqual(np.shape(sample), (1, 1))
        npt.assert_almost_equal(sample, [[ 1.6433127]])
        
        sample = std_log_normal_1d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 1))
        npt.assert_almost_equal(sample, [
                [ 0.87086849],
                [ 1.91111824],
                [ 4.58609939],
                [ 0.79124045],
                [ 0.79125344],
                [ 4.85113557],
                [ 2.15423297],
                [ 0.62533086],
                [ 1.72040554],
                [ 0.62912979]])
        
        std_log_normal_2d = distrs.LogNormalDistr(dim=2)
        npt.assert_almost_equal(std_log_normal_2d.mean, [
                [ 1.6487213],
                [ 1.6487213]])
        npt.assert_almost_equal(std_log_normal_2d.cov, [
                [ 4.6707743,  0.       ],
                [ 0.       ,  4.6707743]])
        npt.assert_almost_equal(std_log_normal_2d.vol, [
                [ 2.1611974,  0.       ],
                [ 0.       ,  2.1611974]])
        
        sample = std_log_normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [ 0.62767689,  1.27374614],
                [ 0.14759544,  0.17818769],
                [ 0.5699039 ,  0.36318929],
                [ 1.36922835,  0.40332037],
                [ 0.2435815 ,  4.33035173],
                [ 0.79789657,  1.06986043],
                [ 0.24056903,  0.58019982],
                [ 1.11730841,  0.31632232],
                [ 1.45600738,  0.54846123],
                [ 0.74699727,  0.54787583]])

        sd1=.4; sd2=.4; cor=-.5

        log_normal_2d = distrs.LogNormalDistr(mean_of_log=[1., 1.3], cov_of_log=stats.make_cov_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(log_normal_2d.mean_of_log, npu.col(1., 1.3))
        npt.assert_almost_equal(log_normal_2d.cov_of_log, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(log_normal_2d.vol_of_log, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])
        npt.assert_almost_equal(log_normal_2d.mean, [[ 2.9446796], [ 3.9749016]])
        npt.assert_almost_equal(log_normal_2d.cov, [[ 1.5045366, -0.8999087], [-0.8999087,  2.7414445]])
        npt.assert_almost_equal(log_normal_2d.vol, [[ 1.2265956,  0.       ], [-0.7336637,  1.484312 ]])

        sample = log_normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [ 1.42711164,  6.95143797],
                [ 4.62238496,  2.99848502],
                [ 4.32618186,  2.50643161],
                [ 4.10913455,  1.42691268],
                [ 2.94320341,  4.55346303],
                [ 2.50304159,  3.80468825],
                [ 2.24476532,  2.45957906],
                [ 3.18112082,  2.60781028],
                [ 2.01884543,  5.66848303],
                [ 5.34174201,  2.12565878]])

        log_normal_2d = distrs.LogNormalDistr(mean_of_log=[1., 1.3], vol_of_log=stats.make_vol_2d(sd1=sd1, sd2=sd2, cor=cor))
        npt.assert_almost_equal(log_normal_2d.mean_of_log, npu.col(1., 1.3))
        npt.assert_almost_equal(log_normal_2d.cov_of_log, [[sd1*sd1, cor*sd1*sd2], [cor*sd1*sd2, sd2*sd2]])
        npt.assert_almost_equal(log_normal_2d.vol_of_log, [[sd1, 0.], [cor*sd2, np.sqrt(1.-cor*cor)*sd2]])
        npt.assert_almost_equal(log_normal_2d.mean, npu.col(2.9446796, 3.9749016))
        npt.assert_almost_equal(log_normal_2d.cov, [[ 1.5045366, -0.8999087], [-0.8999087,  2.7414445]])
        npt.assert_almost_equal(log_normal_2d.vol, [[ 1.2265956,  0.       ], [-0.7336637,  1.484312 ]])

        sample = log_normal_2d.sample(size=10)
        self.assertEqual(np.shape(sample), (10, 2))
        npt.assert_almost_equal(sample, [
                [ 2.71288329,  2.80448293],
                [ 2.70285608,  5.57387658],
                [ 1.66454464,  4.28346127],
                [ 3.23285936,  3.52238521],
                [ 1.76160691,  4.67441442],
                [ 2.32343609,  2.75776026],
                [ 4.8398479 ,  2.85230385],
                [ 1.67494888,  5.78583855],
                [ 2.06409776,  5.58431178],
                [ 3.6537541 ,  3.15441508]])
예제 #16
0
 def test_col(self):
     col = npu.col(1., 1., 2., 5., 14.)
     npt.assert_almost_equal(col, np.array(([[1.], [1.], [2.], [5.], [14.]])))
예제 #17
0
 def create_2d(mean1, mean2, sd1, sd2, cor):
     return WienerProcess(npu.col(mean1, mean2),
                          stats.make_vol_2d(sd1, sd2, cor))
예제 #18
0
    def testkalmanfiltermultid(self):
        t0 = dt.datetime(2017, 5, 12, 16, 18, 25, 204000)

        process1 = proc.WienerProcess.create_from_cov(mean=3., cov=25.)
        process2 = proc.WienerProcess.create_from_cov(mean=[1., 4.],
                                                      cov=[[36.0, -9.0],
                                                           [-9.0, 25.0]])

        kf = kalman.KalmanFilter(t0,
                                 state_distr=N(mean=[100.0, 120.0, 130.0],
                                               cov=[[250.0, 0.0, 0.0],
                                                    [0.0, 360.0, 0.0],
                                                    [0.0, 0.0, 250.0]]),
                                 process=(process1, process2))

        state_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(1.0, np.eye(2)), process1,
            process2)
        coord0_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(1.), process1)
        coord1_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(npu.row(1., 0.)), process2)
        coord2_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(npu.row(0., 1.)), process2)
        sum_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(npu.row(1., 1., 1.)), process1,
            process2)
        lin_comb_observable = kf.create_observable(
            kalman.KalmanFilterObsModel.create(npu.row(2., 0., -3.)), process1,
            process2)

        t1 = t0 + dt.timedelta(hours=1)

        predicted_obs1_prior = state_observable.predict(t1)
        npt.assert_almost_equal(
            predicted_obs1_prior.distr.mean,
            npu.col(100.0 + 3.0 / 24.0, 120.0 + 1.0 / 24.0,
                    130.0 + 4.0 / 24.0))
        npt.assert_almost_equal(predicted_obs1_prior.distr.cov,
                                [[250.0 + 25.0 / 24.0, 0.0, 0.0],
                                 [0.0, 360.0 + 36.0 / 24.0, -9.0 / 24.0],
                                 [0.0, -9.0 / 24.0, 250 + 25.0 / 24.0]])
        npt.assert_almost_equal(predicted_obs1_prior.cross_cov,
                                predicted_obs1_prior.distr.cov)

        state_observable.observe(time=t1,
                                 obs=N(mean=[100.35, 121.0, 135.0],
                                       cov=[[100.0, 0.0,
                                             0.0], [0.0, 400.0, 0.0],
                                            [0.0, 0.0, 100.0]]))

        predicted_obs1_posterior = state_observable.predict(t1)
        npt.assert_almost_equal(
            predicted_obs1_posterior.distr.mean,
            npu.col(100.285905044, 120.493895183, 133.623010239))
        npt.assert_almost_equal(
            predicted_obs1_posterior.distr.cov,
            [[71.513353115, 0.0, 0.0], [0.0, 189.888267669, -0.056112925],
             [0.0, -0.056112925, 71.513338130]])
        npt.assert_almost_equal(predicted_obs1_posterior.cross_cov,
                                predicted_obs1_posterior.distr.cov)

        predicted_obs1_0 = coord0_observable.predict(t1)
        npt.assert_almost_equal(predicted_obs1_0.distr.mean, 100.285905044)
        npt.assert_almost_equal(predicted_obs1_0.distr.cov, 71.513353115)
        npt.assert_almost_equal(predicted_obs1_0.cross_cov,
                                npu.row(71.513353115, 0.0, 0.0))

        predicted_obs1_1 = coord1_observable.predict(t1)
        npt.assert_almost_equal(predicted_obs1_1.distr.mean, 120.493895183)
        npt.assert_almost_equal(predicted_obs1_1.distr.cov, 189.888267669)
        npt.assert_almost_equal(predicted_obs1_1.cross_cov,
                                npu.row(0.0, 189.888267669, -0.056112925))

        predicted_obs1_2 = coord2_observable.predict(t1)
        npt.assert_almost_equal(predicted_obs1_2.distr.mean, 133.623010239)
        npt.assert_almost_equal(predicted_obs1_2.distr.cov, 71.513338130)
        npt.assert_almost_equal(predicted_obs1_2.cross_cov,
                                npu.row(0.0, -0.056112925, 71.513338130))

        predicted_obs1_sum = sum_observable.predict(t1)
        npt.assert_almost_equal(predicted_obs1_sum.distr.mean, 354.402810466)
        npt.assert_almost_equal(predicted_obs1_sum.distr.cov, 332.802733064)
        npt.assert_almost_equal(
            predicted_obs1_sum.cross_cov,
            npu.row(71.513353115, 189.832154744, 71.457225204))

        predicted_obs1_lin_comb = lin_comb_observable.predict(t1)
        npt.assert_almost_equal(predicted_obs1_lin_comb.distr.mean,
                                -200.297220628)
        npt.assert_almost_equal(predicted_obs1_lin_comb.distr.cov,
                                929.673455633)
        npt.assert_almost_equal(
            predicted_obs1_lin_comb.cross_cov,
            npu.row(143.026706231, 0.168338776, -214.540014390))

        t2 = t1 + dt.timedelta(minutes=30)

        coord1_observable.observe(time=t2, obs=N(mean=125.25, cov=4.))

        predicted_obs2_1 = coord1_observable.predict(t2)
        npt.assert_almost_equal(predicted_obs2_1.distr.mean, 125.152685704)
        npt.assert_almost_equal(predicted_obs2_1.distr.cov, 3.917796226)
        npt.assert_almost_equal(predicted_obs2_1.cross_cov,
                                npu.row(0.0, 3.917796226, -0.005006475))

        t3 = t2 + dt.timedelta(minutes=30)

        predicted_obs3_prior_sum = sum_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_prior_sum.distr.mean,
                                359.368174232)
        npt.assert_almost_equal(predicted_obs3_prior_sum.distr.cov,
                                149.392502944)
        npt.assert_almost_equal(
            predicted_obs3_prior_sum.cross_cov,
            npu.row(72.555019782, 4.475289751, 72.36219341))

        predicted_obs3_prior0 = coord0_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_prior0.distr.mean,
                                100.410905044)
        npt.assert_almost_equal(predicted_obs3_prior0.distr.cov, 72.555019782)
        npt.assert_almost_equal(predicted_obs3_prior0.cross_cov,
                                npu.row(72.555019782, 0.0, 0.0))
        predicted_obs3_prior1 = coord1_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_prior1.distr.mean,
                                125.173519037)
        npt.assert_almost_equal(predicted_obs3_prior1.distr.cov, 4.667796226)
        npt.assert_almost_equal(predicted_obs3_prior1.cross_cov,
                                npu.row(0.0, 4.667796226, -0.192506475))
        predicted_obs3_prior2 = coord2_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_prior2.distr.mean,
                                133.783750150)
        npt.assert_almost_equal(predicted_obs3_prior2.distr.cov, 72.554699886)
        npt.assert_almost_equal(predicted_obs3_prior2.cross_cov,
                                npu.row(0.0, -0.192506475, 72.554699886))

        sum_observable.observe(time=t3, obs=N(mean=365.00, cov=9.))

        predicted_obs3_posterior_sum = sum_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_posterior_sum.distr.mean,
                                364.679994753)
        npt.assert_almost_equal(predicted_obs3_posterior_sum.distr.cov,
                                8.488612159)
        npt.assert_almost_equal(predicted_obs3_posterior_sum.cross_cov,
                                npu.row(4.122639429, 0.254289862, 4.111682867))
        predicted_obs3_posterior0 = coord0_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_posterior0.distr.mean,
                                102.990681374)
        npt.assert_almost_equal(predicted_obs3_posterior0.distr.cov,
                                39.319665849)
        npt.assert_almost_equal(predicted_obs3_posterior0.cross_cov,
                                npu.row(39.319665849, 0.0, 0.0))
        predicted_obs3_posterior1 = coord1_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_posterior1.distr.mean,
                                125.332643059)
        npt.assert_almost_equal(predicted_obs3_posterior1.distr.cov,
                                4.541349469)
        npt.assert_almost_equal(predicted_obs3_posterior1.cross_cov,
                                npu.row(0.0, 4.541349469, -2.237058941))
        predicted_obs3_posterior2 = coord2_observable.predict(t3)
        npt.assert_almost_equal(predicted_obs3_posterior2.distr.mean,
                                136.356670319)
        npt.assert_almost_equal(predicted_obs3_posterior2.distr.cov,
                                39.495767563)
        npt.assert_almost_equal(predicted_obs3_posterior2.cross_cov,
                                npu.row(0.0, -2.237058941, 39.495767563))
예제 #19
0
 def create_2d(pct_drift1, pct_drift2, pct_sd1, pct_sd2, pct_cor):
     return GeometricBrownianMotion(
         npu.col(pct_drift1, pct_drift2),
         stats.make_vol_2d(pct_sd1, pct_sd2, pct_cor))
예제 #20
0
 def create2d(mean1, mean2, sd1, sd2, cor):
     return WienerProcess(npu.col(mean1, mean2),
                          distrs.NormalDistr.makevol2d(sd1, sd2, cor))