def __init__(self, time, state_distr, process, approximate_distr=False, name=None, pype=None, pype_options=frozenset(filtering.FilterPypeOptions)): super().__init__(name) self._pype = pype self._pype_options = frozenset() if ( pype_options is None or pype is None) else frozenset(pype_options) if not checks.is_iterable(process): process = (process, ) checks.check_instance(state_distr, N) process = checks.check_iterable_over_instances(process, proc.MarkovProcess) self._time = time self._state_distr = state_distr self._is_posterior = False self._processes = tuple(process) self._approximate_distr = approximate_distr self._to_string_helper_KalmanFilter = None self._str_KalmanFilter = None if filtering.FilterPypeOptions.PRIOR_STATE in self._pype_options: self._pype.send(self.state)
def __init__(self, filter, name, obs_model, observed_processes, *args, **kwargs): super().__init__(filter, name) if not checks.is_iterable(observed_processes): observed_processes = [observed_processes] observed_processes = tuple(checks.check_iterable_over_instances(observed_processes, proc.MarkovProcess)) if obs_model is None: obs_model = ParticleFilterObsModel.create( np.eye(sum([p.process_dim for p in observed_processes]))) self._obs_model = obs_model self._state_mean_rects = [] self._state_mean_rects = [] self._state_cov_diag_rects = [] for op in observed_processes: matched = False row = 0 for ap in self.filter._processes: process_dim = ap.process_dim if op is ap: matched = True self._state_mean_rects.append(np.s_[row:row+process_dim, 0:1]) self._state_cov_diag_rects.append(np.s_[row:row+process_dim, row:row+process_dim]) row += process_dim if not matched: raise ValueError('Each observed process must match a particle filter\'s process') self._state_cov_rects = [] for r in self._state_cov_diag_rects: startrow = r[0].start stoprow = r[0].stop rects = [] for r1 in self._state_cov_diag_rects: startcol = r1[1].start stopcol = r1[1].stop rects.append(np.s_[startrow:stoprow, startcol:stopcol]) self._state_cov_rects.append(rects)
def __init__(self, time, state_distr, process, weighting_func=None, particle_count=1000, observation_dim=1, random_state=None, predicted_observation_sampler=None, outlier_threshold=None, name=None, pype=None, pype_options=frozenset(filtering.FilterPypeOptions)): super().__init__(name) self._pype = pype self._pype_options = frozenset() if (pype_options is None or pype is None) else frozenset(pype_options) if not checks.is_iterable(process): process = (process,) process = checks.check_iterable_over_instances(process, proc.SolvedItoProcess) if weighting_func is None: weighting_func = KDEWeightingFunction() self._time = time self._observation_dim = observation_dim self._state_distr = state_distr self._processes = tuple(process) self._state_dim = sum([p.process_dim for p in self._processes]) self._weighting_func = weighting_func self._particle_count = particle_count self._current_particle_idx = None self._random_state = rnd.random_state() if random_state is None else random_state self._predicted_observation_sampler = predicted_observation_sampler self._prior_particles = np.empty((self._particle_count, self._state_dim)) self._resampled_particles = np.empty((self._particle_count, self._state_dim)) self._unnormalised_weights = np.empty((self._particle_count,)) self._weights = np.empty((self._particle_count,)) self._resampled_particles_uptodate = False self._last_observation = None self._cached_prior_mean = None self._cached_prior_var = None self._cached_posterior_mean = None self._cached_posterior_var = None self._cached_resampled_mean = None self._cached_resampled_var = None self.log_likelihood = 0.0 self.effective_sample_size = np.NaN if self._predicted_observation_sampler is not None: self.predicted_observation_particles = None self.predicted_observation_kde = None self.predicted_observation = np.NaN self.innovation = np.NaN self.innovationvar = np.NaN assert self._predicted_observation_sampler is not None or outlier_threshold is None self._outlier_threshold = outlier_threshold self._context = OrderedDict() self._initialise()