Exemplo n.º 1
0
 def _kf_update_(self, weights, states, covs, h_mat, r_mat, z):
     # predicted observations
     #pred_z = blas.dgemv(h_mat, states)
     pred_z = np.array(np.dot(h_mat, states.T).T, order='C')
     # covariance is the same for all states, do the update for one matrix
     
     upd_weights = weights.copy()
     upd_cov0, kalman_info = np_kf_update_cov(covs[0], h_mat, r_mat, False)
     
     upd_covs = np.repeat(upd_cov0[np.newaxis], covs.shape[0], axis=0)
     # Update the states
     upd_states, residuals = kf_update_x(states, pred_z, z, 
         np.array([kalman_info.kalman_gain], order='C'))
     if not upd_states.flags.c_contiguous:
         upd_states = np.array(upd_states, order='C')
     # Evaluate the new weight
     #x_pdf = np.exp(-0.5*np.power(
     #    blas.dgemv(np.array([kalman_info.inv_sqrt_S]), residuals), 2).sum(axis=1))/ \
     #    np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0])
     x_pdf = np.exp(-0.5*np.power(
         np.dot(kalman_info.inv_sqrt_S, residuals.T).T, 2).sum(axis=1))/ \
         np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0])
     upd_weights = weights * x_pdf
     upd_weights /= upd_weights.sum()
     return upd_weights, upd_states, upd_covs
Exemplo n.º 2
0
def g500_kf_update(weights, states, covs, obs_matrix, obs_noise, z):
    upd_weights = weights.copy()
    #upd_states = np.empty(states.shape)
    #upd_covs = np.empty(covs.shape)
    # Covariance is the same for all the particles
    upd_cov0, kalman_info = kf_update_cov(np.array([covs[0]]), obs_matrix, obs_noise, False)
    upd_covs = np.repeat(upd_cov0, covs.shape[0], axis=0)
    # Update the states
    pred_z = blas.dgemv(obs_matrix, states)
    upd_states, residuals = kf_update_x(states, pred_z, z, kalman_info.kalman_gain)
    # Evaluate the new weight
    x_pdf = np.exp(-0.5*np.power(
                blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1))/ \
                np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0])
    upd_weights = weights * x_pdf
    upd_weights /= upd_weights.sum()
    
    """
    for count in range(states.shape[0]):
        this_state = states[count]
        this_state.shape = (1,) + this_state.shape
        this_cov = covs[count]
        this_cov.shape = (1,) + this_cov.shape
        (upd_state, upd_covariance, kalman_info) = \
                kf_update(this_state, this_cov, obs_matrix, obs_noise, z, False)
        #x_pdf = misctools.mvnpdf(x, mu, sigma)
        x_pdf = np.exp(-0.5*np.power(
                blas.dgemv(kalman_info.inv_sqrt_S, kalman_info.residuals), 2).sum(axis=1))/ \
                np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0])
        upd_weights[count] *= x_pdf
        upd_states[count] = upd_state
        upd_covs[count] = upd_covariance
    upd_weights /= upd_weights.sum()
    """
    return upd_weights, upd_states, upd_covs
Exemplo n.º 3
0
 def update(self, observations, observation_noise):
     self.flags.ESTIMATE_IS_VALID = False
     # Container for slam parent update
     slam_info = STRUCT()
     slam_info.likelihood = 1
     num_observations, z_dim = (observations.shape + (3,))[0:2]
     
     if not self.weights.shape[0]:
         return slam_info
     
     detection_probability = self.camera_pd(self.parent_ned, 
                                            self.parent_rpy, self.states)
     
     #detection_probability[detection_probability<0.1] = 0
     clutter_pdf = self.camera_clutter(observations)
     clutter_intensity = self.vars.clutter_intensity*clutter_pdf
     
     self.flags.LOCK.acquire()
     try:
         # Account for missed detection
         prev_weights = self.weights.copy()
         prev_states = self.states.copy()
         prev_covs = self.covs.copy()
     
         updated = STRUCT()
         updated.weights = [self.weights*(1-detection_probability)]
         updated.states = [self.states]
         updated.covs = [self.covs]
         
         # Do the update only for detected landmarks
         detected_indices = detection_probability >= 0
         detected = STRUCT()
         detected.weights = ( prev_weights[detected_indices]*
                              detection_probability[detected_indices] )
         detected.states = prev_states[detected_indices]
         detected.covs = prev_covs[detected_indices]
         #ZZ SLAM,  step 1:
         slam_info.exp_sum__pd_predwt = np.exp(-detected.weights.sum())
         
         # SLAM, prep for step 2:
         slam_info.sum__clutter_with_pd_updwt = np.zeros(num_observations)
         
         if detected.weights.shape[0]:
             # Covariance update part of the Kalman update is common to all 
             # observation-updates
             if observations.shape[0]:
                 h_mat = featuredetector.tf.relative_rot_mat(self.parent_rpy)
                 # Predicted observation from the current states
                 pred_z = featuredetector.tf.relative(self.parent_ned, 
                     self.parent_rpy, detected.states)
                 observation_noise = observation_noise[0]
                 detected.covs, kalman_info = kf_update_cov(detected.covs, 
                                                            h_mat, 
                                                            observation_noise, 
                                                            INPLACE=True)
             # We need to update the states and find the updated weights
             for (_observation_, obs_count) in zip(observations, 
                                                   range(num_observations)):
                 #new_x = copy.deepcopy(x)
                 # Apply the Kalman update to get the new state - 
                 # update in-place and return the residuals
                 upd_states, residuals = kf_update_x(detected.states, pred_z, 
                                                     _observation_, 
                                                     kalman_info.kalman_gain,
                                                     INPLACE=False)
                 # Calculate the weight of the Gaussians for this observation
                 # Calculate term in the exponent
                 #code.interact(local=locals())
                 #x_pdf = np.exp(-0.5*np.power(
                 #    blas.dgemv(kalman_info.inv_sqrt_S, 
                 #               residuals, TRANSPOSE_A=True), 2).sum(axis=1))/ \
                 #    np.sqrt(kalman_info.det_S*(2*np.pi)**z_dim)
                 x_pdf = misctools.mvnpdf(_observation_, pred_z, kalman_info.S)
                 
                 upd_weights = detected.weights*x_pdf
                 
                 # Normalise the weights
                 normalisation_factor = ( clutter_intensity[obs_count] + 
                                          #self.vars.birth_intensity +
                                          upd_weights.sum() )
                 upd_weights /= normalisation_factor
                 #print "Obs Index: ", str(obs_count+1)
                 #print upd_weights.sum()
                 # SLAM, step 2:
                 slam_info.sum__clutter_with_pd_updwt[obs_count] = \
                     normalisation_factor
                 
                 # Create new state with new_x and P to add to _states_
                 updated.weights += [upd_weights.copy()]
                 updated.states += [upd_states.copy()]
                 updated.covs += [detected.covs.copy()]
                 #print upd_weights.sum()
         else:
             slam_info.sum__clutter_with_pd_updwt = np.array(clutter_intensity)
         
         self.weights = np.concatenate(updated.weights)
         self.states = np.concatenate(updated.states)
         self.covs = np.concatenate(updated.covs)
         
         # SLAM, finalise:
         slam_info.likelihood = (slam_info.exp_sum__pd_predwt * 
                                 slam_info.sum__clutter_with_pd_updwt.prod())
         assert self.weights.shape[0] == self.states.shape[0] == self.covs.shape[0], "Lost states!!"
     finally:
         self.flags.LOCK.release()
     return slam_info
Exemplo n.º 4
0
    def phdUpdate(self, observation_set):
        num_observations = observation_set.shape[0]
        if num_observations:
            z_dim = observation_set.shape[1]
        else:
            z_dim = 0

        if not self.weights.shape[0]:
            self._states_ = self.states.copy()
            self._weights_ = self.weights.copy()
            return

        detection_probability = self.parameters.pd_fn.handle(self.states, self.parameters.pd_fn.parameters)
        # clutter_pdf = [self.clutter_fn.handle(_observation_,
        #                                      self.clutter_fn.parameters) \
        #               for _observation_ in observation_set]
        clutter_pdf = self.parameters.clutter_fn.handle(observation_set, self.parameters.clutter_fn.parameters)
        # Account for missed detection
        self._states_ = self.states.copy()
        self._weights_ = [self.weights * (1 - detection_probability)]

        # Split x and P out from the combined state vector
        detected_indices = detection_probability > 0
        detected_states = self.states[detected_indices]
        x = detected_states.state
        P = detected_states.covariance
        # Scale the weights by detection probability
        weights = self.weights[detected_indices] * detection_probability[detected_indices]

        if x.shape[0]:
            # Part of the Kalman update is common to all observation-updates
            x, P, kalman_info = kalmanfilter.kf_update(
                x,
                P,
                np.array([self.parameters.obs_fn.parameters.H]),
                np.array([self.parameters.obs_fn.parameters.R]),
                None,
                INPLACE=True,
            )  # USE_NP=0)

            # Container for the updated states
            new_gmstate = self.states.__class__(0)
            # We need to update the states and find the updated weights
            for (_observation_, obs_count) in zip(observation_set, range(num_observations)):
                # new_x = copy.deepcopy(x)
                # Apply the Kalman update to get the new state - update in-place
                # and return the residuals
                new_x, residuals = kalmanfilter.kf_update_x(
                    x, kalman_info.pred_z, _observation_, kalman_info.kalman_gain, INPLACE=False
                )

                # Calculate the weight of the Gaussians for this observation
                # Calculate term in the exponent
                x_pdf = np.exp(-0.5 * np.power(blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1)) / np.sqrt(
                    kalman_info.det_S * (2 * np.pi) ** z_dim
                )

                new_weight = weights * x_pdf
                # Normalise the weights
                normalisation_factor = clutter_pdf[obs_count] + new_weight.sum()
                new_weight /= normalisation_factor

                # Create new state with new_x and P to add to _states_
                new_gmstate.set(new_x, P)
                self._states_.append(new_gmstate)
                self._weights_ += [new_weight]

        self._weights_ = np.concatenate(self._weights_)
Exemplo n.º 5
0
 def phdUpdate(self, observation_set):
     # Container for slam parent update
     slam_info = PARAMETERS()
     num_observations = observation_set.shape[0]
     if num_observations:
         z_dim = observation_set.shape[1]
     else:
         z_dim = 0
     
     if not self.weights.shape[0]:
         self._states_ = self.states.copy()
         self._weights_ = self.weights.copy()
         return
     
     detection_probability = self.parameters.pd_fn.handle(self.states, 
                                         self.parameters.pd_fn.parameters)
     #clutter_pdf = [self.clutter_fn.handle(_observation_, 
     #                                      self.clutter_fn.parameters) \
     #               for _observation_ in observation_set]
     clutter_pdf = self.parameters.clutter_fn.handle(observation_set, 
                                     self.parameters.clutter_fn.parameters)
     # Account for missed detection
     self._states_ = self.states.copy()
     self._weights_ = [self.weights*(1-detection_probability)]
     
     # SLAM,  step 1:
     slam_info.exp_sum__pd_predwt = np.exp(-self.weights.sum())
     
     # Split x and P out from the combined state vector
     detected_indices = detection_probability > 0.1
     detected_states = self.states[detected_indices]
     x = detected_states.state
     P = detected_states.covariance
     # Scale the weights by detection probability 
     weights = self.weights[detected_indices]*detection_probability[detected_indices]
     
     # SLAM, prep for step 2:
     slam_info.sum__clutter_with_pd_updwt = np.zeros(num_observations)
     
     if x.shape[0]:
         # Part of the Kalman update is common to all observation-updates
         x, P, kalman_info = kf_update(x, P, 
                             np.array([self.parameters.obs_fn.parameters.H]), 
                             np.array([self.parameters.obs_fn.parameters.R]), 
                             None, INPLACE=True)#USE_NP=0)
             
         # Container for the updated states
         new_gmstate = self.states.__class__(0)
         # Predicted observation from the current states
         pred_z = featuredetector.tf.relative(self.parameters.obs_fn.parameters.parent_state_xyz, 
                                              self.parameters.obs_fn.parameters.parent_state_rpy, 
                                              x)
         #print "PREDICTED Z:"
         #print pred_z
         # We need to update the states and find the updated weights
         for (_observation_, obs_count) in zip(observation_set, 
                                               range(num_observations)):
             #new_x = copy.deepcopy(x)
             # Apply the Kalman update to get the new state - update in-place
             # and return the residuals
             new_x, residuals = kf_update_x(x, pred_z, 
                                         _observation_, kalman_info.kalman_gain,
                                         INPLACE=False)
             code.interact(local=locals())
             # Calculate the weight of the Gaussians for this observation
             # Calculate term in the exponent
             x_pdf = np.exp(-0.5*np.power(
                 blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1))/ \
                 np.sqrt(kalman_info.det_S*(2*np.pi)**z_dim) 
             
             new_weight = weights*x_pdf
             # Normalise the weights
             normalisation_factor = clutter_pdf[obs_count] + new_weight.sum()
             new_weight /= normalisation_factor
             # SLAM, step 2:
             slam_info.sum__clutter_with_pd_updwt[obs_count] = \
                                                         normalisation_factor
             
             # Create new state with new_x and P to add to _states_
             new_gmstate.set(new_x, P)
             self._states_.append(new_gmstate)
             self._weights_ += [new_weight]
         
     else:
         slam_info.sum__clutter_with_pd_updwt = np.array(clutter_pdf)
         
     self._weights_ = np.concatenate(self._weights_)
     # SLAM, finalise:
     slam_info.likelihood = (slam_info.exp_sum__pd_predwt * 
                             slam_info.sum__clutter_with_pd_updwt.prod())
     return slam_info