def reduce_mixture( self, immstate_mixture: MixtureParameters[MixtureParameters[MT]] ) -> MixtureParameters[MT]: """ Approximate a mixture of immstates as a single immstate. :param immstate_mixture: Association probabilities weighted mode mixtures. { weights: [], mixtureComponents} { Pr{a_k | Z_1:k} , } :return: Reduced mixture """ # P(a_k = j | Z_1:k) -> Length m_k + 1 weights = immstate_mixture.weights # P(s_k=i | a_k=j, Z_1:k) -> Length of # modes component_conditioned_mode_prob = np.array( [c.weights.ravel() for c in immstate_mixture.components]) # where inner gaussParams inside immstate_mixture.components.components # would be P(x_k | s_k=i, a_k=j, Z_1:k) for each mode -> # flip conditioning order with Bayes, sum over a_k j's, so we get right dimension out ( mode dimensions) # Prior: P(a_k = j | Z_1:k) Conditional: P(s_k=i |a_k = j, Z_1:k) mode_prob, mode_conditioned_component_prob = discretebayes.discrete_bayes( weights, component_conditioned_mode_prob) # Returns -> # Marginal: P(s_k=i, Z_1:k) Joint: P(a_k = j | s_k=i, Z_1:k) # We want P(x_k | s_k =i, Z_1:k) = Joint * P( x_k | a_k = j, s_k = i, Z_1:k) # Where we know the last term is in immstate_mixture.components[j].components[i] for i modes, j associations mode_states = [] # For mode_idx, filter for i, fs in zip(range(len(self.filters)), self.filters): # This is (3) from graded assignment, summed over all association hypothesis mode_conditional_density_components = \ [immstate_mixture.components[j].components[i] for j in range(len(weights))] # Reduce with mode_conditional_association weights ( components = association here ) reduced_mode_conditional = fs.reduce_mixture( MixtureParameters( weights=mode_conditioned_component_prob[i], components=mode_conditional_density_components)) mode_states.append(reduced_mode_conditional) # Final reduction over modes immstate_reduced = MixtureParameters(mode_prob, mode_states) return immstate_reduced
def setUpClass(cls) -> None: cls.pregen = PreGenData() # Init settings sigma_z = 3 sigma_a_CV = 0.2 sigma_a_CT = 0.1 sigma_omega = 0.002 * np.pi cls.ts = 0.1 # Transition matrix PI = np.array([[0.95, 0.05], [0.05, 0.95]]) assert np.allclose(PI.sum(axis=1), 1), "rows of PI must sum to 1" measurement_model = CartesianPosition(sigma_z, state_dim=5) CV = WhitenoiseAccelleration(sigma_a_CV, n=5) CT = ConstantTurnrate(sigma_a_CT, sigma_omega) ekf_filters = [EKF(CV, measurement_model), EKF(CT, measurement_model)] cls.imm_filter = IMM(ekf_filters, PI) # IMM init weights init_weights = np.array([0.5] * 2) init_mean = [0] * 5 init_cov = np.diag( [1] * 5 ) # HAVE TO BE DIFFERENT: use intuition, eg. diag guessed distance to true values squared. init_mode_states = [GaussParams(init_mean, init_cov) ] * 2 # copy of the two modes cls.init_immstate = MixtureParameters(init_weights, init_mode_states)
def _(self, init: dict) -> MixtureParameters[MT]: # extract weights got_weights = False got_components = False for key in init: if not got_weights and key in [ "weights", "probs", "probabilities", "mode_probs", ]: weights = np.asfarray([key]) got_weights = True elif not got_components and key in ["components", "modes"]: components = self.init_components(init[key]) got_components = True if not got_weights: weights = self.initial_mode_probabilities if not got_components: components = self.init_components(init) assert np.allclose(weights.sum(), 1), "Mode probabilities must sum to 1 for" return MixtureParameters(weights, components)
def predict( self, immstate: MixtureParameters[MT], # sampling time Ts: float, ) -> MixtureParameters[MT]: """ Predict the immstate Ts time units ahead approximating the mixture step. Ie. Predict mode probabilities, condition states on predicted mode, appoximate resulting state distribution as Gaussian for each mode, then predict each mode. """ predicted_mode_probability, mixing_probability = self.mix_probabilities( immstate, Ts) mixed_mode_states: List[MT] = self.mix_states(immstate, mixing_probability) predicted_mode_states = self.mode_matched_prediction( mode_states=mixed_mode_states, Ts=Ts) predicted_immstate = MixtureParameters(predicted_mode_probability, predicted_mode_states) return predicted_immstate
def _(self, init: tuple) -> MixtureParameters[MT]: assert isinstance(init[0], Sized) and len(init[0]) == len( self.filters ), f"To initialize from tuple the first element must be of len(self.filters)={len(self.filters)}" weights = np.asfarray(init[0]) components = self.init_compontents(init[1]) return MixtureParameters(weights, components)
def update( self, z: np.ndarray, immstate: MixtureParameters[MT], sensor_state: Dict[str, Any] = None, ) -> MixtureParameters[MT]: """Update the immstate with z in sensor_state.""" updated_weights = self.update_mode_probabilities( z, immstate, sensor_state) updated_states = self.mode_matched_update(z, immstate, sensor_state) updated_immstate = MixtureParameters(updated_weights, updated_states) return updated_immstate
def update( self, # measurements of shape=(M, m)=(#measurements, dim) Z: np.ndarray, filter_state: ET, *, sensor_state: Optional[Dict[str, Any]] = None, ) -> ET: # The filter_state updated by approximating the data association """ Perform the PDA update cycle. Gate -> association probabilities -> conditional update -> reduce mixture. TODO: DO WE ASSUME PREDICTED X IN HERE??? YES """ # remove the not gated measurements from consideration gated = self.gate(Z, filter_state, sensor_state=sensor_state) Zg = Z[gated] # find association probabilities beta = self.association_probabilities(Zg, filter_state, sensor_state=sensor_state) # find the mixture components filter_state_updated_mixture_components = self.conditional_update( Zg, filter_state, sensor_state=sensor_state) # make mixture filter_state_updated_mixture = MixtureParameters( beta, filter_state_updated_mixture_components) # reduce mixture filter_state_updated_reduced = self.reduce_mixture( filter_state_updated_mixture) return filter_state_updated_reduced
sigma_omega = 0.3 # markov chain PI11 = 0.9 PI22 = 0.9 p10 = 0.9 # initvalue for mode probabilities PI = np.array([[PI11, (1 - PI11)], [(1 - PI22), PI22]]) assert np.allclose(np.sum(PI, axis=1), 1), "rows of PI must sum to 1" mean_init = np.array([0, 0, 0, 0, 0]) cov_init = np.diag([1000, 1000, 30, 30, 0.1])**2 # THIS WILL NOT BE GOOD mode_probabilities_init = np.array([p10, (1 - p10)]) mode_states_init = GaussParams(mean_init, cov_init) init_imm_state = MixtureParameters(mode_probabilities_init, [mode_states_init] * 2) assert np.allclose(np.sum(mode_probabilities_init), 1), "initial mode probabilities must sum to 1" # make model measurement_model = measurementmodels.CartesianPosition(sigma_z, state_dim=5) dynamic_models: List[dynamicmodels.DynamicModel] = [] dynamic_models.append(dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5)) dynamic_models.append(dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega)) ekf_filters = [] ekf_filters.append(ekf.EKF(dynamic_models[0], measurement_model)) ekf_filters.append(ekf.EKF(dynamic_models[1], measurement_model)) imm_filter = imm.IMM(ekf_filters, PI) tracker = pda.PDA(imm_filter, clutter_intensity, PD, gate_size)
measurement_model = measurementmodels.CartesianPosition(sigma_z, state_dim=5) CV = dynamicmodels.WhitenoiseAccelleration(sigma_a_CV, n=5) CT = dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega) ekf_filters = [] ekf_filters.append(ekf.EKF(CV, measurement_model)) ekf_filters.append(ekf.EKF(CT, measurement_model)) imm_filter = imm.IMM(ekf_filters, PI) init_weights = np.array([0.5] * 2) init_mean = [0] * 5 init_cov = np.diag( [1] * 5 ) # HAVE TO BE DIFFERENT: use intuition, eg. diag guessed distance to true values squared. init_mode_states = [GaussParams(init_mean, init_cov) ] * 2 # copy of the two modes init_immstate = MixtureParameters(init_weights, init_mode_states) imm_preds = [] imm_upds = [] imm_ests = [] updated_immstate = init_immstate for zk in Z: predicted_immstate = imm_filter.predict(updated_immstate, Ts) updated_immstate = imm_filter.update(zk, predicted_immstate) estimate = imm_filter.estimate(updated_immstate) imm_preds.append(predicted_immstate) imm_upds.append(updated_immstate) imm_ests.append(estimate) x_est = np.array([est.mean for est in imm_ests])
CT = dynamicmodels.ConstantTurnrate(sigma_a_CT, sigma_omega) ekf_filters = [ekf.EKF(CV, measurement_model), ekf.EKF(CT, measurement_model)] # Transition matrix test assert np.allclose(PI.sum(axis=1), 1), "rows of PI must sum to 1" assert PI.shape[0] == modes, 'Dimension transition matrix not same as modes' # IMM filter instantiation imm_filter = imm.IMM(ekf_filters, PI) # IMM init mode probabilities test assert np.allclose(np.sum(init_weights), 1), \ "initial mode probabilities must sum to 1" init_mode_states = [GaussParams(init_mean, init_cov)] * modes init_immstate = MixtureParameters(init_weights, init_mode_states) # Mixture of two mode states tracker = pda.PDA(imm_filter, clutter_intensity, PD, gate_size) # allocate NEES = np.zeros(K) NEESpos = np.zeros(K) NEESvel = np.zeros(K) tracker_update = init_immstate tracker_update_list = [] tracker_predict_list = [] tracker_estimate_list = [] # estimate for k, (Zk, x_true_k) in enumerate(zip(Z, Xgt)): tracker_predict = tracker.predict(tracker_update, Ts=Ts) tracker_update = tracker.update(Zk, tracker_predict)
def _(self, init: Sequence) -> MixtureParameters[MT]: weights = self.initial_mode_probabilities components = self.init_components(init) return MixtureParameters(weights, components)