def __init__(self, mean=None, cov=None, vol=None, dim=None, copy=True): if mean is not None and dim is not None and np.size(mean) == 1: mean = npu.col_of(dim, npu.to_scalar(mean)) if mean is None and vol is None and cov is None: self._dim = 1 if dim is None else dim mean = npu.col_of(self._dim, 0.) cov = np.eye(self._dim) vol = np.eye(self._dim) self._dim, self._mean, self._vol, self._cov = None, None, None, None # TODO We don't currently check whether cov and vol are consistent, i.e. that cov = np.dot(vol, vol.T) -- should we? if mean is not None: self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._mean) if cov is not None: self._cov = npu.to_ndim_2(cov, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._cov) if vol is not None: self._vol = npu.to_ndim_2(vol, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._vol) if self._mean is None: self._mean = npu.col_of(self._dim, 0.) if self._cov is None and self._vol is None: self._cov = np.eye(self._dim) self._vol = np.eye(self._dim) npc.check_col(self._mean) npc.check_nrow(self._mean, self._dim) if self._cov is not None: npc.check_nrow(self._cov, self._dim) npc.check_square(self._cov) if self._vol is not None: npc.check_nrow(self._vol, self._dim) npu.make_immutable(self._mean) if self._cov is not None: npu.make_immutable(self._cov) if self._vol is not None: npu.make_immutable(self._vol) self._to_string_helper_NormalDistr = None self._str_NormalDistr = None super(NormalDistr, self).__init__()
def __init__(self, mean=None, dim=None, copy=True): if mean is not None and dim is not None and np.size(mean) == 1: mean = npu.col_of(dim, npu.to_scalar(mean)) if mean is None: dim = 1 if dim is None else dim mean = npu.col_of(dim, 0.) self._mean = npu.to_ndim_2(mean, ndim_1_to_col=True, copy=copy) if dim is None: dim = npu.nrow(self._mean) self._dim = dim npc.check_col(self._mean) npc.check_nrow(self._mean, self._dim) npu.make_immutable(self._mean) self._zero_cov = None self._to_string_helper_DiracDeltaDistr = None self._str_DiracDeltaDistr = None
def run(observable, obss=None, times=None, obs_covs=None, true_values=None, df=None, fun=None, return_df=False): if df is not None: if obss is not None and (checks.is_string(obss) or checks.is_int(obss)): obss = df[obss] if times is None: if isinstance(obss, pd.Series): times = obss.index.values elif (checks.is_string(times) or checks.is_int(times)): times = df[times].values if isinstance(obss, pd.Series): obss = obss.values if obs_covs is not None and (checks.is_string(obs_covs) or checks.is_int(obs_covs)): obs_covs = df[obs_covs].values if true_values is not None and (checks.is_string(true_values) or checks.is_int(true_values)): true_values = df[true_values].values checks.check_not_none(obss) if not checks.is_iterable_not_string(observable): observable = utils.xconst(observable) if not checks.is_iterable_not_string(obss): obss = [obss] if not checks.is_iterable_not_string(times): times = utils.xconst(times) if not checks.is_iterable_not_string(obs_covs): obs_covs = utils.xconst(obs_covs) if not checks.is_iterable_not_string(true_values): true_values = utils.xconst(true_values) obs_result = None cumulative_log_likelihood = 0. if return_df: time = [] filter_name = [] filter_type = [] observable_name = [] accepted = [] obs_mean = [] obs_cov = [] predicted_obs_mean = [] predicted_obs_cov = [] cross_cov = [] innov_mean = [] innov_cov = [] prior_state_mean = [] prior_state_cov = [] posterior_state_mean = [] posterior_state_cov = [] true_value = [] log_likelihood = [] gain = [] last_time = None for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip( observable, obss, times, obs_covs, true_values): if a_time is None: if last_time is None: a_time = 0 else: a_time = last_time + 1 last_time = a_time if checks.is_callable(an_observable): an_observable = an_observable(an_obs) if fun is not None: an_obs = fun(an_obs) if an_obs_cov is not None: if isinstance(an_obs, (Obs, distrs.Distr)): raise ValueError( 'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments' ) an_obs = distrs.NormalDistr(an_obs, an_obs_cov) if return_df and len(time) == 0: an_initial_state_mean = an_observable.filter.state.state_distr.mean an_initial_state_cov = an_observable.filter.state.state_distr.cov time.append(an_observable.filter.time) filter_name.append(an_observable.filter.name) filter_type.append(type(an_observable.filter)) observable_name.append(None) accepted.append(None) obs_mean.append(None) obs_cov.append(None) predicted_obs_mean.append(None) predicted_obs_cov.append(None) cross_cov.append(None) innov_mean.append(None) innov_cov.append(None) prior_state_mean.append( npu.to_scalar(an_initial_state_mean, raise_value_error=False)) prior_state_cov.append( npu.to_scalar(an_initial_state_cov, raise_value_error=False)) posterior_state_mean.append( npu.to_scalar(an_initial_state_mean, raise_value_error=False)) posterior_state_cov.append( npu.to_scalar(an_initial_state_cov, raise_value_error=False)) true_value.append(None) log_likelihood.append(None) gain.append(None) if isinstance(an_obs, Obs): a_time, _ = _time_and_obs_distr(an_obs, a_time, an_observable.filter.time) predicted_obs = an_observable.predict(time=a_time, true_value=a_true_value) a_prior_state_mean = an_observable.filter.state.state_distr.mean a_prior_state_cov = an_observable.filter.state.state_distr.cov obs_result = an_observable.observe(obs=an_obs, time=a_time, true_value=a_true_value, predicted_obs=predicted_obs) if obs_result.accepted: cumulative_log_likelihood += obs_result.log_likelihood a_posterior_state_mean = an_observable.filter.state.state_distr.mean a_posterior_state_cov = an_observable.filter.state.state_distr.cov if return_df: time.append(obs_result.obs.time) filter_name.append(an_observable.filter.name) filter_type.append(type(an_observable.filter)) observable_name.append(an_observable.name) accepted.append(obs_result.accepted) obs_mean.append( npu.to_scalar(obs_result.obs.distr.mean, raise_value_error=False)) obs_cov.append( npu.to_scalar(obs_result.obs.distr.cov, raise_value_error=False)) predicted_obs_mean.append( npu.to_scalar(obs_result.predicted_obs.distr.mean, raise_value_error=False)) predicted_obs_cov.append( npu.to_scalar(obs_result.predicted_obs.distr.cov, raise_value_error=False)) cross_cov.append( npu.to_scalar(obs_result.predicted_obs.cross_cov, raise_value_error=False)) innov_mean.append( npu.to_scalar(obs_result.innov_distr.mean, raise_value_error=False)) innov_cov.append( npu.to_scalar(obs_result.innov_distr.cov, raise_value_error=False)) prior_state_mean.append( npu.to_scalar(a_prior_state_mean, raise_value_error=False)) prior_state_cov.append( npu.to_scalar(a_prior_state_cov, raise_value_error=False)) posterior_state_mean.append( npu.to_scalar(a_posterior_state_mean, raise_value_error=False)) posterior_state_cov.append( npu.to_scalar(a_posterior_state_cov, raise_value_error=False)) true_value.append( npu.to_scalar(a_true_value, raise_value_error=False)) log_likelihood.append( npu.to_scalar(obs_result.log_likelihood, raise_value_error=False)) gain.append( obs_result.gain if hasattr(obs_result, 'gain') else None) df = None if return_df: df = pd.DataFrame( { 'time': time, 'filter_name': filter_name, 'filter_type': filter_type, 'observable_name': observable_name, 'accepted': accepted, 'obs_mean': obs_mean, 'obs_cov': obs_cov, 'predicted_obs_mean': predicted_obs_mean, 'predicted_obs_cov': predicted_obs_cov, 'cross_cov': cross_cov, 'innov_mean': innov_mean, 'innov_cov': innov_cov, 'prior_state_mean': prior_state_mean, 'prior_state_cov': prior_state_cov, 'posterior_state_mean': prior_state_mean, 'posterior_state_cov': prior_state_cov, 'true_value': true_value, 'log_likelihood': log_likelihood, 'gain': gain }, columns=('time', 'filter_name', 'filter_type', 'observable_name', 'accepted', 'obs_mean', 'obs_cov', 'predicted_obs_mean', 'predicted_obs_cov', 'cross_cov', 'innov_mean', 'innov_cov', 'prior_state_mean', 'prior_state_cov', 'posterior_state_mean', 'posterior_state_cov', 'true_value', 'log_likelihood', 'gain')) return FilterRunResult(obs_result, cumulative_log_likelihood, df)
def __init__(self, mean_of_log=None, cov_of_log=None, vol_of_log=None, dim=None, copy=True): if mean_of_log is not None and dim is not None and np.size( mean_of_log) == 1: mean_of_log = npu.col_of(dim, npu.to_scalar(mean_of_log)) if mean_of_log is None and vol_of_log is None and cov_of_log is None: self._dim = 1 if dim is None else dim mean_of_log = npu.col_of(self._dim, 0.) cov_of_log = np.eye(self._dim) vol_of_log = np.eye(self._dim) self._dim, self._mean_of_log, self._vol_of_log, self._cov_of_log = None, None, None, None # TODO We don't currently check whether cov_of_log and vol_of_log are consistent, i.e. that cov_of_log = np.dot(vol_of_log, vol_of_log.T) -- should we? if mean_of_log is not None: self._mean_of_log = npu.to_ndim_2(mean_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._mean_of_log) if cov_of_log is not None: self._cov_of_log = npu.to_ndim_2(cov_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._cov_of_log) if vol_of_log is not None: self._vol_of_log = npu.to_ndim_2(vol_of_log, ndim_1_to_col=True, copy=copy) self._dim = npu.nrow(self._vol_of_log) if self._mean_of_log is None: self._mean_of_log = npu.col_of(self._dim, 0.) if self._cov_of_log is None and self._vol_of_log is None: self._cov_of_log = np.eye(self._dim) self._vol_of_log = np.eye(self._dim) npc.check_col(self._mean_of_log) npc.check_nrow(self._mean_of_log, self._dim) if self._cov_of_log is not None: npc.check_nrow(self._cov_of_log, self._dim) npc.check_square(self._cov_of_log) if self._vol_of_log is not None: npc.check_nrow(self._vol_of_log, self._dim) if self._cov_of_log is None: self._cov_of_log = stats.vol_to_cov(self._vol_of_log) if self._vol_of_log is None: self._vol_of_log = stats.cov_to_vol(self._cov_of_log) npu.make_immutable(self._mean_of_log) npu.make_immutable(self._cov_of_log) npu.make_immutable(self._vol_of_log) mean = np.exp( self._mean_of_log + .5 * npu.col(*[self._cov_of_log[i, i] for i in range(self._dim)])) cov = np.array([[ np.exp(self._mean_of_log[i, 0] + self._mean_of_log[j, 0] + .5 * (self._cov_of_log[i, i] + self._cov_of_log[j, j])) * (np.exp(self._cov_of_log[i, j]) - 1.) for j in range(self._dim) ] for i in range(self._dim)]) vol = stats.cov_to_vol(cov) self._to_string_helper_LogNormalDistr = None self._str_LogNormalDistr = None super().__init__(mean, cov, vol, self._dim, copy)
def run(observable, obss=None, times=None, obs_covs=None, true_values=None, df=None, fun=None, return_df=False): if df is not None: if obss is not None and checks.is_string(obss): obss = df[obss].values if times is not None and checks.is_string(times): times = df[times].values if obs_covs is not None and checks.is_string(obs_covs): obs_covs = df[obs_covs].values if true_values is not None and checks.is_string(true_values): true_values = df[true_values].values checks.check_not_none(obss) if not checks.is_iterable_not_string(observable): observable = utils.xconst(observable) if not checks.is_iterable_not_string(obss): obss = [obss] if not checks.is_iterable_not_string(times): times = utils.xconst(times) if not checks.is_iterable_not_string(obs_covs): obs_covs = utils.xconst(obs_covs) if not checks.is_iterable_not_string(true_values): true_values = utils.xconst(true_values) obs_result = None if return_df: time = [] accepted = [] obs_mean = [] obs_cov = [] predicted_obs_mean = [] predicted_obs_cov = [] innov_mean = [] innov_cov = [] prior_state_mean = [] prior_state_cov = [] posterior_state_mean = [] posterior_state_cov = [] log_likelihood = [] for an_observable, an_obs, a_time, an_obs_cov, a_true_value in zip( observable, obss, times, obs_covs, true_values): if checks.is_callable(an_observable): an_observable = an_observable(an_obs) if fun is not None: an_obs = fun(an_obs) if an_obs_cov is not None: if isinstance(an_obs, (Obs, distrs.Distr)): raise ValueError( 'An observation covariance is provided while the observation is given by a distribution --- conflicting arguments' ) an_obs = distrs.NormalDistr(an_obs, an_obs_cov) if return_df and len(time) == 0: an_initial_state_mean = an_observable.filter.state.state_distr.mean an_initial_state_cov = an_observable.filter.state.state_distr.cov time.append(an_observable.filter.time) accepted.append(None) obs_mean.append(None) obs_cov.append(None) predicted_obs_mean.append(None) predicted_obs_cov.append(None) innov_mean.append(None) innov_cov.append(None) prior_state_mean.append( npu.to_scalar(an_initial_state_mean, raise_value_error=False)) prior_state_cov.append( npu.to_scalar(an_initial_state_cov, raise_value_error=False)) posterior_state_mean.append( npu.to_scalar(an_initial_state_mean, raise_value_error=False)) posterior_state_cov.append( npu.to_scalar(an_initial_state_cov, raise_value_error=False)) log_likelihood.append(None) if isinstance(an_obs, Obs): a_time, _ = _time_and_obs_distr(an_obs, a_time, an_observable.filter.time) predicted_obs = an_observable.predict(time=a_time, true_value=a_true_value) a_prior_state_mean = an_observable.filter.state.state_distr.mean a_prior_state_cov = an_observable.filter.state.state_distr.cov obs_result = an_observable.observe(obs=an_obs, time=a_time, true_value=a_true_value, predicted_obs=predicted_obs) a_posterior_state_mean = an_observable.filter.state.state_distr.mean a_posterior_state_cov = an_observable.filter.state.state_distr.cov if return_df: time.append(obs_result.obs.time) accepted.append(obs_result.accepted) obs_mean.append( npu.to_scalar(obs_result.obs.distr.mean, raise_value_error=False)) obs_cov.append( npu.to_scalar(obs_result.obs.distr.cov, raise_value_error=False)) predicted_obs_mean.append( npu.to_scalar(obs_result.predicted_obs.distr.mean, raise_value_error=False)) predicted_obs_cov.append( npu.to_scalar(obs_result.predicted_obs.distr.cov, raise_value_error=False)) innov_mean.append( npu.to_scalar(obs_result.innov_distr.mean, raise_value_error=False)) innov_cov.append( npu.to_scalar(obs_result.innov_distr.cov, raise_value_error=False)) prior_state_mean.append( npu.to_scalar(a_prior_state_mean, raise_value_error=False)) prior_state_cov.append( npu.to_scalar(a_prior_state_cov, raise_value_error=False)) posterior_state_mean.append( npu.to_scalar(a_posterior_state_mean, raise_value_error=False)) posterior_state_cov.append( npu.to_scalar(a_posterior_state_cov, raise_value_error=False)) log_likelihood.append( npu.to_scalar(obs_result.log_likelihood, raise_value_error=False)) if return_df: return pd.DataFrame( { 'time': time, 'accepted': accepted, 'obs_mean': obs_mean, 'obs_cov': obs_cov, 'predicted_obs_mean': predicted_obs_mean, 'predicted_obs_cov': predicted_obs_cov, 'innov_mean': innov_mean, 'innov_cov': innov_cov, 'prior_state_mean': prior_state_mean, 'prior_state_cov': prior_state_cov, 'posterior_state_mean': prior_state_mean, 'posterior_state_cov': prior_state_cov, 'log_likelihood': log_likelihood }, columns=('time', 'accepted', 'obs_mean', 'obs_cov', 'predicted_obs_mean', 'predicted_obs_cov', 'innov_mean', 'innov_cov', 'prior_state_mean', 'prior_state_cov', 'posterior_state_mean', 'posterior_state_cov', 'log_likelihood')) return obs_result