示例#1
0
    def phdUpdate(self, observation_set):
        num_observations = observation_set.shape[0]
        if num_observations:
            z_dim = observation_set.shape[1]
        else:
            z_dim = 0

        if not self.weights.shape[0]:
            self._states_ = self.states.copy()
            self._weights_ = self.weights.copy()
            return

        detection_probability = self.parameters.pd_fn.handle(self.states, self.parameters.pd_fn.parameters)
        # clutter_pdf = [self.clutter_fn.handle(_observation_,
        #                                      self.clutter_fn.parameters) \
        #               for _observation_ in observation_set]
        clutter_pdf = self.parameters.clutter_fn.handle(observation_set, self.parameters.clutter_fn.parameters)
        # Account for missed detection
        self._states_ = self.states.copy()
        self._weights_ = [self.weights * (1 - detection_probability)]

        # Split x and P out from the combined state vector
        detected_indices = detection_probability > 0
        detected_states = self.states[detected_indices]
        x = detected_states.state
        P = detected_states.covariance
        # Scale the weights by detection probability
        weights = self.weights[detected_indices] * detection_probability[detected_indices]

        if x.shape[0]:
            # Part of the Kalman update is common to all observation-updates
            x, P, kalman_info = kalmanfilter.kf_update(
                x,
                P,
                np.array([self.parameters.obs_fn.parameters.H]),
                np.array([self.parameters.obs_fn.parameters.R]),
                None,
                INPLACE=True,
            )  # USE_NP=0)

            # Container for the updated states
            new_gmstate = self.states.__class__(0)
            # We need to update the states and find the updated weights
            for (_observation_, obs_count) in zip(observation_set, range(num_observations)):
                # new_x = copy.deepcopy(x)
                # Apply the Kalman update to get the new state - update in-place
                # and return the residuals
                new_x, residuals = kalmanfilter.kf_update_x(
                    x, kalman_info.pred_z, _observation_, kalman_info.kalman_gain, INPLACE=False
                )

                # Calculate the weight of the Gaussians for this observation
                # Calculate term in the exponent
                x_pdf = np.exp(-0.5 * np.power(blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1)) / np.sqrt(
                    kalman_info.det_S * (2 * np.pi) ** z_dim
                )

                new_weight = weights * x_pdf
                # Normalise the weights
                normalisation_factor = clutter_pdf[obs_count] + new_weight.sum()
                new_weight /= normalisation_factor

                # Create new state with new_x and P to add to _states_
                new_gmstate.set(new_x, P)
                self._states_.append(new_gmstate)
                self._weights_ += [new_weight]

        self._weights_ = np.concatenate(self._weights_)
示例#2
0
 def phdUpdate(self, observation_set):
     # Container for slam parent update
     slam_info = PARAMETERS()
     num_observations = observation_set.shape[0]
     if num_observations:
         z_dim = observation_set.shape[1]
     else:
         z_dim = 0
     
     if not self.weights.shape[0]:
         self._states_ = self.states.copy()
         self._weights_ = self.weights.copy()
         return
     
     detection_probability = self.parameters.pd_fn.handle(self.states, 
                                         self.parameters.pd_fn.parameters)
     #clutter_pdf = [self.clutter_fn.handle(_observation_, 
     #                                      self.clutter_fn.parameters) \
     #               for _observation_ in observation_set]
     clutter_pdf = self.parameters.clutter_fn.handle(observation_set, 
                                     self.parameters.clutter_fn.parameters)
     # Account for missed detection
     self._states_ = self.states.copy()
     self._weights_ = [self.weights*(1-detection_probability)]
     
     # SLAM,  step 1:
     slam_info.exp_sum__pd_predwt = np.exp(-self.weights.sum())
     
     # Split x and P out from the combined state vector
     detected_indices = detection_probability > 0.1
     detected_states = self.states[detected_indices]
     x = detected_states.state
     P = detected_states.covariance
     # Scale the weights by detection probability 
     weights = self.weights[detected_indices]*detection_probability[detected_indices]
     
     # SLAM, prep for step 2:
     slam_info.sum__clutter_with_pd_updwt = np.zeros(num_observations)
     
     if x.shape[0]:
         # Part of the Kalman update is common to all observation-updates
         x, P, kalman_info = kf_update(x, P, 
                             np.array([self.parameters.obs_fn.parameters.H]), 
                             np.array([self.parameters.obs_fn.parameters.R]), 
                             None, INPLACE=True)#USE_NP=0)
             
         # Container for the updated states
         new_gmstate = self.states.__class__(0)
         # Predicted observation from the current states
         pred_z = featuredetector.tf.relative(self.parameters.obs_fn.parameters.parent_state_xyz, 
                                              self.parameters.obs_fn.parameters.parent_state_rpy, 
                                              x)
         #print "PREDICTED Z:"
         #print pred_z
         # We need to update the states and find the updated weights
         for (_observation_, obs_count) in zip(observation_set, 
                                               range(num_observations)):
             #new_x = copy.deepcopy(x)
             # Apply the Kalman update to get the new state - update in-place
             # and return the residuals
             new_x, residuals = kf_update_x(x, pred_z, 
                                         _observation_, kalman_info.kalman_gain,
                                         INPLACE=False)
             code.interact(local=locals())
             # Calculate the weight of the Gaussians for this observation
             # Calculate term in the exponent
             x_pdf = np.exp(-0.5*np.power(
                 blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1))/ \
                 np.sqrt(kalman_info.det_S*(2*np.pi)**z_dim) 
             
             new_weight = weights*x_pdf
             # Normalise the weights
             normalisation_factor = clutter_pdf[obs_count] + new_weight.sum()
             new_weight /= normalisation_factor
             # SLAM, step 2:
             slam_info.sum__clutter_with_pd_updwt[obs_count] = \
                                                         normalisation_factor
             
             # Create new state with new_x and P to add to _states_
             new_gmstate.set(new_x, P)
             self._states_.append(new_gmstate)
             self._weights_ += [new_weight]
         
     else:
         slam_info.sum__clutter_with_pd_updwt = np.array(clutter_pdf)
         
     self._weights_ = np.concatenate(self._weights_)
     # SLAM, finalise:
     slam_info.likelihood = (slam_info.exp_sum__pd_predwt * 
                             slam_info.sum__clutter_with_pd_updwt.prod())
     return slam_info