def __init__(self, pct_drift=None, pct_vol=None, time_unit=dt.timedelta(days=1)): if pct_drift is None and pct_vol is None: pct_drift = 0.; pct_vol = 1. self._pct_drift, self._pct_vol = None, None if pct_drift is not None: self._pct_drift = npu.to_ndim_2(pct_drift, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._pct_drift) if pct_vol is not None: self._pct_vol = npu.to_ndim_2(pct_vol, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._pct_vol) if self._pct_drift is None: self._pct_drift = npu.col_of(process_dim, 0.) if self._pct_vol is None: self._pct_vol = np.eye(process_dim) npc.check_col(self._pct_drift) npc.check_nrow(self._pct_drift, process_dim) npc.check_nrow(self._pct_vol, process_dim) noise_dim = npu.ncol(self._pct_vol) self._pct_cov = stats.vol_to_cov(self._pct_vol) npu.make_immutable(self._pct_drift) npu.make_immutable(self._pct_vol) npu.make_immutable(self._pct_cov) self._to_string_helper_GeometricBrownianMotion = None self._str_GeometricBrownianMotion = None super(GeometricBrownianMotion, self).__init__(process_dim=process_dim, noise_dim=noise_dim, drift=lambda t, x: self._pct_drift * x, diffusion=lambda t, x: x * self._pct_vol, time_unit=time_unit)
def __init__(self, mean=None, vol=None): if mean is None and vol is None: mean = 0. vol = 1. self.__mean, self.__vol = None, None if mean is not None: self.__mean = npu.tondim2(mean, ndim1tocol=True, copy=True) processdim = npu.nrow(self.__mean) if vol is not None: self.__vol = npu.tondim2(vol, ndim1tocol=True, copy=True) processdim = npu.nrow(self.__vol) if self.__mean is None: self.__mean = npu.colof(processdim, 0.) if self.__vol is None: self.__vol = np.eye(processdim) npc.checkcol(self.__mean) npc.checknrow(self.__mean, processdim) npc.checknrow(self.__vol, processdim) noisedim = npu.ncol(self.__vol) self.__cov = np.dot(self.__vol, self.__vol.T) npu.makeimmutable(self.__mean) npu.makeimmutable(self.__vol) npu.makeimmutable(self.__cov) super(WienerProcess, self).__init__(processdim=processdim, noisedim=noisedim, drift=lambda t, x: self.__mean, diffusion=lambda t, x: self.__vol)
def __init__(self, mean=None, vol=None): if mean is None and vol is None: mean = 0. vol = 1. self._mean, self._vol = None, None if mean is not None: self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._mean) if vol is not None: self._vol = npu.to_ndim_2(vol, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._vol) if self._mean is None: self._mean = npu.col_of(process_dim, 0.) if self._vol is None: self._vol = np.eye(process_dim) npc.check_col(self._mean) npc.check_nrow(self._mean, process_dim) npc.check_nrow(self._vol, process_dim) noise_dim = npu.ncol(self._vol) self._cov = np.dot(self._vol, self._vol.T) npu.make_immutable(self._mean) npu.make_immutable(self._vol) npu.make_immutable(self._cov) self._to_string_helper_WienerProcess = None self._str_WienerProcess = None super(WienerProcess, self).__init__(process_dim=process_dim, noise_dim=noise_dim, drift=lambda t, x: self._mean, diffusion=lambda t, x: self._vol)
def test_nrow(self): r = npu.row(429., 5., 2., 14.) self.assertEqual(npu.nrow(r), 1) c = npu.col(429., 5., 2., 14.) self.assertEqual(npu.nrow(c), 4) m = npu.matrix_of(3, 5, 0.) self.assertEqual(npu.nrow(m), 3)
def __init__(self, transition=None, mean=None, vol=None): if transition is None and mean is None and vol is None: transition = 1. mean = 0. vol = 1. self._transition, self._mean, self._vol = None, None, None if transition is not None: self._transition = npu.to_ndim_2(transition, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._transition) if mean is not None: self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._mean) if vol is not None: self._vol = npu.to_ndim_2(vol, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self._vol) if self._transition is None: self._transition = np.eye(process_dim) if self._mean is None: self._mean = npu.col_of(process_dim, 0.) if self._vol is None: self._vol = np.eye(process_dim) npc.check_square(self._transition) npc.check_nrow(self._transition, process_dim) npc.check_col(self._mean) npc.check_nrow(self._mean, process_dim) npc.check_nrow(self._vol, process_dim) noise_dim = npu.ncol(self._vol) self._transition_x_2 = npu.kron_sum(self._transition, self._transition) self._transition_x_2_inverse = np.linalg.inv(self._transition_x_2) self._cov = np.dot(self._vol, self._vol.T) self._cov_vec = npu.vec(self._cov) self._cached_mean_reversion_factor = None self._cached_mean_reversion_factor_time_delta = None self._cached_mean_reversion_factor_squared = None self._cached_mean_reversion_factor_squared_time_delta = None npu.make_immutable(self._transition) npu.make_immutable(self._transition_x_2) npu.make_immutable(self._transition_x_2_inverse) npu.make_immutable(self._mean) npu.make_immutable(self._vol) npu.make_immutable(self._cov) npu.make_immutable(self._cov_vec) self._to_string_helper_OrnsteinUhlenbeckProcess = None self._str_OrnsteinUhlenbeckProcess = None super(OrnsteinUhlenbeckProcess, self).__init__( process_dim=process_dim, noise_dim=noise_dim, drift=lambda t, x: -np.dot(self._transition, x - self._mean), diffusion=lambda t, x: self._vol)
def __init__(self, initial_value=None, final_value=None, initial_time=0., final_time=1., vol=None, time_unit=dt.timedelta(days=1)): process_dim = 1 self.__initial_value = None self.__final_value = None if initial_value is not None: self.__initial_value = npu.to_ndim_2(initial_value, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self.__initial_value) if final_value is not None: self.__final_value = npu.to_ndim_2(final_value, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self.__final_value) if self.__initial_value is None: self.__initial_value = npu.col_of(process_dim, 0.) if self.__final_value is None: self.__final_value = npu.col_of(process_dim, 0.) self.__vol = None if vol is not None: self.__vol = npu.to_ndim_2(vol, ndim_1_to_col=True, copy=True) process_dim = npu.nrow(self.__vol) if self.__vol is None: self.__vol = np.eye(process_dim) self.__initial_time = initial_time self.__final_time = final_time npc.check_col(self.__initial_value) npc.check_col(self.__final_value) npc.check_nrow(self.__initial_value, process_dim) npc.check_nrow(self.__final_value, process_dim) noise_dim = npu.ncol(self.__vol) self.__cov = stats.vol_to_cov(self.__vol) npu.make_immutable(self.__initial_value) npu.make_immutable(self.__final_value) npu.make_immutable(self.__vol) npu.make_immutable(self.__cov) self._to_string_helper_BrownianBridge = None self._str_BrownianBridge = None super(BrownianBridge, self).__init__(process_dim=process_dim, noise_dim=noise_dim, drift=lambda t, x: (self.__final_value - x) / (self.__final_time - t), diffusion=lambda t, x: self.__vol, time_unit=time_unit)
def __init__(self, particles=None, weights=None, dim=None, use_n_minus_1_stats=False, sampler=None, copy=True): self._particles, self._weights, self._dim = None, None, None if particles is not None: self._particles = npu.to_ndim_2(particles, ndim_1_to_col=True, copy=copy) self._dim = npu.ncol(self._particles) if weights is None: weights = np.ones((npu.nrow(self._particles), 1)) weights /= float(npu.nrow(self._particles)) if weights is not None: checks.check_not_none(particles) self._weights = npu.to_ndim_2(weights, ndim_1_to_col=True, copy=copy) self._dim = npu.ncol(self._particles) if dim is not None: self._dim = dim if self._particles is not None: npc.check_ncol(self._particles, self._dim) if self._weights is not None: npc.check_nrow(self._weights, npu.nrow(self._particles)) npu.make_immutable(self._particles, allow_none=True) npu.make_immutable(self._weights, allow_none=True) self._use_n_minus_1_stats = use_n_minus_1_stats # "n minus 1" (unbiased) stats only make sense when using "repeat"-type weights, meaning that each weight # represents the number of occurrences of one observation. # # See https://stats.stackexchange.com/questions/61225/correct-equation-for-weighted-unbiased-sample-covariance self._effective_particle_count = None self._weight_sum = None self._mean = None self._var_n = None self._var_n_minus_1 = None self._cov_n = None self._cov_n_minus_1 = None self._vol_n = None self._vol_n_minus_1 = None self._to_string_helper_EmpiricalDistr = None self._str_EmpiricalDistr = None super().__init__(do_not_init=True)
def __init__(self, transition=None, mean=None, vol=None): if transition is None and mean is None and vol is None: transition = 1. mean = 0. vol = 1. self.__transition, self.__mean, self.__vol = None, None, None if transition is not None: self.__transition = npu.tondim2(transition, ndim1tocol=True, copy=True) processdim = npu.nrow(self.__transition) if mean is not None: self.__mean = npu.tondim2(mean, ndim1tocol=True, copy=True) processdim = npu.nrow(self.__mean) if vol is not None: self.__vol = npu.tondim2(vol, ndim1tocol=True, copy=True) processdim = npu.nrow(self.__vol) if self.__transition is None: self.__transition = np.eye(processdim) if self.__mean is None: self.__mean = npu.colof(processdim, 0.) if self.__vol is None: self.__vol = np.eye(processdim) npc.checksquare(self.__transition) npc.checknrow(self.__transition, processdim) npc.checkcol(self.__mean) npc.checknrow(self.__mean, processdim) npc.checknrow(self.__vol, processdim) noisedim = npu.ncol(self.__vol) self.__transitionx2 = npu.kronsum(self.__transition, self.__transition) self.__transitionx2inverse = np.linalg.inv(self.__transitionx2) self.__cov = np.dot(self.__vol, self.__vol.T) self.__covvec = npu.vec(self.__cov) self.__cachedmeanreversionfactor = None self.__cachedmeanreversionfactortimedelta = None self.__cachedmeanreversionfactorsquared = None self.__cachedmeanreversionfactorsquaredtimedelta = None npu.makeimmutable(self.__transition) npu.makeimmutable(self.__transitionx2) npu.makeimmutable(self.__transitionx2inverse) npu.makeimmutable(self.__mean) npu.makeimmutable(self.__vol) npu.makeimmutable(self.__cov) npu.makeimmutable(self.__covvec) super(OrnsteinUhlenbeckProcess, self).__init__( processdim=processdim, noisedim=noisedim, drift=lambda t, x: -np.dot(self.__transition, x - self.__mean), diffusion=lambda t, x: self.__vol)
def test_kron_sum(self): a = np.array([[5., 1.], [2., 5.]]) b = np.array([[14., 42., 1.], [132., 14., 2.], [5., 2., 42.]]) c = npu.kron_sum(a, b) m = npu.nrow(a) self.assertEqual(npu.ncol(a), m) n = npu.nrow(b) self.assertEqual(npu.ncol(b), n) known_kron_sum = np.kron(a, np.eye(n)) + np.kron(np.eye(m), b) npt.assert_almost_equal(c, known_kron_sum)
def __init__(self, mean=None, cov=None, vol=None, dim=None, copy=True, do_not_init=False): if not do_not_init: if mean is not None and dim is not None and np.size(mean) == 1: mean = npu.col_of(dim, npu.to_scalar(mean)) if mean is None and vol is None and cov is None: self._dim = 1 if dim is None else dim mean = npu.col_of(self._dim, 0.) cov = np.eye(self._dim) vol = np.eye(self._dim) self._dim, self._mean, self._vol, self._cov = None, None, None, None # TODO We don't currently check whether cov and vol are consistent, i.e. that cov = np.dot(vol, vol.T) -- should we? if mean is not None: self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._mean) if cov is not None: self._cov = npu.to_ndim_2(cov, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._cov) if vol is not None: self._vol = npu.to_ndim_2(vol, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._vol) if self._mean is None: self._mean = npu.col_of(self._dim, 0.) if self._cov is None and self._vol is None: self._cov = np.eye(self._dim) self._vol = np.eye(self._dim) npc.check_col(self._mean) npc.check_nrow(self._mean, self._dim) if self._cov is not None: npc.check_nrow(self._cov, self._dim) npc.check_square(self._cov) if self._vol is not None: npc.check_nrow(self._vol, self._dim) npu.make_immutable(self._mean) if self._cov is not None: npu.make_immutable(self._cov) if self._vol is not None: npu.make_immutable(self._vol) self._to_string_helper_WideSenseDistr = None self._str_WideSenseDistr = None super().__init__()
def __init__(self, mean_of_log=None, cov_of_log=None, vol_of_log=None, dim=None, copy=True): if mean_of_log is not None and dim is not None and np.size(mean_of_log) == 1: mean_of_log = npu.col_of(dim, npu.to_scalar(mean_of_log)) if mean_of_log is None and vol_of_log is None and cov_of_log is None: self._dim = 1 if dim is None else dim mean_of_log = npu.col_of(self._dim, 0.) cov_of_log = np.eye(self._dim) vol_of_log = np.eye(self._dim) self._dim, self._mean_of_log, self._vol_of_log, self._cov_of_log = None, None, None, None # TODO We don't currently check whether cov_of_log and vol_of_log are consistent, i.e. that cov_of_log = np.dot(vol_of_log, vol_of_log.T) -- should we? if mean_of_log is not None: self._mean_of_log = npu.to_ndim_2(mean_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._mean_of_log) if cov_of_log is not None: self._cov_of_log = npu.to_ndim_2(cov_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._cov_of_log) if vol_of_log is not None: self._vol_of_log = npu.to_ndim_2(vol_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._vol_of_log) if self._mean_of_log is None: self._mean_of_log = npu.col_of(self._dim, 0.) if self._cov_of_log is None and self._vol_of_log is None: self._cov_of_log = np.eye(self._dim) self._vol_of_log = np.eye(self._dim) npc.check_col(self._mean_of_log) npc.check_nrow(self._mean_of_log, self._dim) if self._cov_of_log is not None: npc.check_nrow(self._cov_of_log, self._dim) npc.check_square(self._cov_of_log) if self._vol_of_log is not None: npc.check_nrow(self._vol_of_log, self._dim) if self._cov_of_log is None: self._cov_of_log = stats.vol_to_cov(self._vol_of_log) if self._vol_of_log is None: self._vol_of_log = stats.cov_to_vol(self._cov_of_log) npu.make_immutable(self._mean_of_log) npu.make_immutable(self._cov_of_log) npu.make_immutable(self._vol_of_log) mean = np.exp(self._mean_of_log + .5 * npu.col(*[self._cov_of_log[i,i] for i in range(self._dim)])) cov = np.array([[np.exp(self._mean_of_log[i,0] + self._mean_of_log[j,0] + .5 * (self._cov_of_log[i,i] + self._cov_of_log[j,j])) * (np.exp(self._cov_of_log[i,j]) - 1.) for j in range(self._dim)] for i in range(self._dim)]) vol = stats.cov_to_vol(cov) self._to_string_helper_LogNormalDistr = None self._str_LogNormalDistr = None super().__init__(mean, cov, vol, self._dim, copy)
def test_kron(self): a = np.array([[5., 1., 14., 2., 42.], [132., 2., 429., 1., 1.], [1., 2., 1430., 2., 2.]]) b = np.array([[42., 2.], [5., 1.], [5., 2.], [14., 132.]]) c = np.kron(a, b) n = npu.nrow(a) p = npu.ncol(a) m = npu.nrow(b) q = npu.ncol(b) self.assertEqual(npu.nrow(c), m * n) self.assertEqual(npu.ncol(c), p * q) for i in range(n): for j in range(p): npt.assert_almost_equal( c[i * m:(i + 1) * m, j * q:(j + 1) * q], a[i, j] * b)
def __init__(self, mean=None, cov=None, vol=None, dim=None, copy=True): if mean is None and vol is None and cov is None: self.__dim = 1 if dim is None else dim mean = npu.colof(self.__dim, 0.) cov = np.eye(self.__dim) vol = np.eye(self.__dim) self.__dim, self.__mean, self.__vol, self.__cov = None, None, None, None # TODO We don't currently check whether cov and vol are consistent, i.e. that cov = np.dot(vol, vol.T) -- should we? if mean is not None: self.__mean = npu.tondim2(mean, ndim1tocol=True, copy=copy) self.__dim = npu.nrow(self.__mean) if cov is not None: self.__cov = npu.tondim2(cov, ndim1tocol=True, copy=copy) self.__dim = npu.nrow(self.__cov) if vol is not None: self.__vol = npu.tondim2(vol, ndim1tocol=True, copy=copy) self.__dim = npu.nrow(self.__vol) if self.__mean is None: self.__mean = npu.colof(self.__dim, 0.) if self.__cov is None and self.__vol is None: self.__cov = np.eye(self.__dim) self.__vol = np.eye(self.__dim) npc.checkcol(self.__mean) npc.checknrow(self.__mean, self.__dim) if self.__cov is not None: npc.checknrow(self.__cov, self.__dim) npc.checksquare(self.__cov) if self.__vol is not None: npc.checknrow(self.__vol, self.__dim) npu.makeimmutable(self.__mean) if self.__cov is not None: npu.makeimmutable(self.__cov) if self.__vol is not None: npu.makeimmutable(self.__vol) super(NormalDistr, self).__init__()
def check_nrow( arg, nrow, message='Unexpected number of rows: actual=%(actual)d, expected=%(expected)d', level=1): n = npu.nrow(arg) check(n == nrow, message=lambda: message % { 'actual': n, 'expected': nrow }, level=level) return arg
def predict(self, time, true_value=None): self.filter.predict(time, true_value) predicted_obs = self._obs_model.predict_obs(time, self._sub_state_distr(self.filter._state_distr), self) cc = predicted_obs.cross_cov # While cc is the cross-covariance between the "observed" processes and the observation, we need the # cross-covariance between the full compound process and the observation. Therefore we enlarge this matrix # by inserting columns of zeros at appropriate indices cc_nrow = npu.nrow(cc) cross_cov = np.zeros((cc_nrow, self.filter._state_distr.dim)) col = 0 for r in self._state_mean_rects: size = r[0].stop - r[0].start cross_cov[0:cc_nrow, r[0].start:r[0].start+size] = cc[0:cc_nrow, col:col+size] col += size return filtering.PredictedObs(self, time, predicted_obs.distr, cross_cov)
def multivariate_normal(mean=None, cov=None, size=None, ndim=None, random_state=None): global _rs if ndim is None: if mean is not None: ndim = np.size(mean) elif cov is not None: ndim = npu.nrow(cov) else: ndim = 1 if ndim is not None: if mean is None: mean = npu.ndim_1_of(ndim, 0.) if cov is None: cov = np.eye(ndim, ndim) mean = npu.to_ndim_1(mean) cov = npu.to_ndim_2(cov) npc.check_size(mean, ndim) npc.check_nrow(cov, ndim) npc.check_square(cov) if random_state is None: random_state = _rs() return random_state.multivariate_normal(mean, cov, size)
def __init__(self, mean=None, dim=None, copy=True): if mean is not None and dim is not None and np.size(mean) == 1: mean = npu.col_of(dim, npu.to_scalar(mean)) if mean is None: dim = 1 if dim is None else dim mean = npu.col_of(dim, 0.) self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=copy) if dim is None: dim = npu.nrow(self._mean) self._dim = dim npc.check_col(self._mean) npc.check_nrow(self._mean, self._dim) npu.make_immutable(self._mean) self._zero_cov = None self._to_string_helper_DiracDeltaDistr = None self._str_DiracDeltaDistr = None
def multivariate_lognormal(mean_of_log=0., cov_of_log=1., size=None, ndim=None, random_state=None): global _rs if ndim is None: if mean_of_log is not None: ndim = np.size(mean_of_log) elif cov_of_log is not None: ndim = npu.nrow(cov_of_log) else: ndim = 1 if ndim is not None: if mean_of_log is None: mean_of_log = npu.ndim_1_of(ndim, 0.) if cov_of_log is None: cov_of_log = np.eye(ndim, ndim) mean_of_log = npu.to_ndim_1(mean_of_log) cov_of_log = npu.to_ndim_2(cov_of_log) npc.check_size(mean_of_log, ndim) npc.check_nrow(cov_of_log, ndim) npc.check_square(cov_of_log) if random_state is None: random_state = _rs() normal = random_state.multivariate_normal(mean_of_log, cov_of_log, size) return np.exp(normal)
def isnrow(arg, nrow): return npu.nrow(arg) == nrow
def particle_count(self): return npu.nrow(self._particles) if self._particles is not None else 0