def g500_kf_update(weights, states, covs, obs_matrix, obs_noise, z): upd_weights = weights.copy() #upd_states = np.empty(states.shape) #upd_covs = np.empty(covs.shape) # Covariance is the same for all the particles upd_cov0, kalman_info = kf_update_cov(np.array([covs[0]]), obs_matrix, obs_noise, False) upd_covs = np.repeat(upd_cov0, covs.shape[0], axis=0) # Update the states pred_z = blas.dgemv(obs_matrix, states) upd_states, residuals = kf_update_x(states, pred_z, z, kalman_info.kalman_gain) # Evaluate the new weight x_pdf = np.exp(-0.5*np.power( blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1))/ \ np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0]) upd_weights = weights * x_pdf upd_weights /= upd_weights.sum() """ for count in range(states.shape[0]): this_state = states[count] this_state.shape = (1,) + this_state.shape this_cov = covs[count] this_cov.shape = (1,) + this_cov.shape (upd_state, upd_covariance, kalman_info) = \ kf_update(this_state, this_cov, obs_matrix, obs_noise, z, False) #x_pdf = misctools.mvnpdf(x, mu, sigma) x_pdf = np.exp(-0.5*np.power( blas.dgemv(kalman_info.inv_sqrt_S, kalman_info.residuals), 2).sum(axis=1))/ \ np.sqrt(kalman_info.det_S*(2*np.pi)**z.shape[0]) upd_weights[count] *= x_pdf upd_states[count] = upd_state upd_covs[count] = upd_covariance upd_weights /= upd_weights.sum() """ return upd_weights, upd_states, upd_covs
def kf_predict(state, covariance, F, Q, B=None, u=None): pred_state = blas.dgemv(F, state) if (not B==None) and (not u==None): blas.dgemv(B, u, y=pred_state) # Repeat Q n times and return as the predicted covariance pred_cov = np.repeat(np.array([Q]), state.shape[0], 0) blas.dgemm(F, blas.dgemm(covariance, F, TRANSPOSE_B=True), C=pred_cov) return pred_state, pred_cov
def kf_update_x(x, pred_z, z, kalman_gain, INPLACE=True): assert len(z.shape) == 1, "z must be a single observations, \ not an array of observations" if INPLACE: upd_state = x else: upd_state = x.copy() #residuals = np.repeat([z], pred_z.shape[0], 0) #blas.daxpy(-1, pred_z, residuals) residuals = z - pred_z # Update the state blas.dgemv(kalman_gain, residuals, y=upd_state) return upd_state, residuals
def relative(vehicle_NED, vehicle_RPY, features_NED): if not features_NED.shape[0]: return np.empty(0) relative_position = features_NED - vehicle_NED rot_matrix = np.array([rotation_matrix(-vehicle_RPY)]) relative_position = blas.dgemv(rot_matrix, relative_position) #rot_matrix = rotation_matrix(-vehicle_RPY) #relative_position = np.dot(rot_matrix, relative_position.T).T return relative_position
def absolute(vehicle_NED, vehicle_RPY, features_NED): if not features_NED.shape[0]: return np.empty(0) rot_matrix = np.array([rotation_matrix(vehicle_RPY)]) absolute_position = blas.dgemv(rot_matrix, features_NED) + vehicle_NED #rot_matrix = rotation_matrix(vehicle_RPY) #absolute_position = ( # np.array(np.dot(rot_matrix, features_NED.T).T, order='C') + # vehicle_NED) return absolute_position
def predict(self, ctrl_input, predict_to_time): USE_KF = True if self.last_odo_predict_time==0: self.last_odo_predict_time = predict_to_time return delta_t = predict_to_time - self.last_odo_predict_time self.last_odo_predict_time = predict_to_time if delta_t < 0: #print "negative delta_t, ignoring" return trans_mat, sc_process_noise = self.trans_matrices(ctrl_input, delta_t) pred_states = blas.dgemv(trans_mat, self.states) #pred_states2 = np.dot(trans_mat[0], self.states.T).T nparticles = self.parameters.state_parameters["nparticles"] ndims = self.parameters.state_parameters["ndims"] if not USE_KF: pred_states += np.random.multivariate_normal(mean=np.zeros(ndims, dtype=float), cov=sc_process_noise, size=(nparticles)) self.states = pred_states self.covariances = kf_predict_cov(self.covariances, trans_mat, sc_process_noise) states_xyz = np.array(pred_states[:,0:3]) #Calculate the rotation matrix to store for the map update rot_mat = featuredetector.tf.rotation_matrix(ctrl_input) # Copy the predicted states to the "parent state" attribute and # perform a prediction for the map for i in range(self.parameters.state_parameters["nparticles"]): #self.maps[i].parameters.obs_fn.H = self.trans_matrices(-ctrl_input, 1.0)[0] setattr(self.maps[i].parameters.obs_fn.parameters, "parent_state_xyz", states_xyz[i]) setattr(self.maps[i].parameters.obs_fn.parameters, "parent_state_rpy", ctrl_input) self.maps[i].parameters.obs_fn.H = rot_mat setattr(self.maps[i].parameters.pd_fn.parameters, "parent_state_xyz", states_xyz[i]) setattr(self.maps[i].parameters.pd_fn.parameters, "parent_state_rpy", ctrl_input) setattr(self.maps[i].parameters.birth_fn.parameters, "parent_state_xyz", states_xyz[i]) setattr(self.maps[i].parameters.birth_fn.parameters, "parent_state_rpy", ctrl_input)
def kf_update(state, covariance, H, R, z=None, INPLACE=True): kalman_info = lambda:0 assert z == None or len(z.shape) == 1, "z must be a single observations, \ not an array of observations" if INPLACE: upd_state = state else: upd_state = state.copy() # Update the covariance and generate the Kalman gain, etc. upd_covariance, kalman_info = kf_update_cov(covariance, H, R, INPLACE) # Observation from current state pred_z = blas.dgemv(H, state) if not (z==None): upd_state, residuals = kf_update_x(upd_state, pred_z, z, kalman_info.kalman_gain, INPLACE=True) else: residuals = np.empty(0) kalman_info.pred_z = pred_z kalman_info.residuals = residuals return upd_state, upd_covariance, kalman_info
def phdUpdate(self, observation_set): num_observations = observation_set.shape[0] if num_observations: z_dim = observation_set.shape[1] else: z_dim = 0 if not self.weights.shape[0]: self._states_ = self.states.copy() self._weights_ = self.weights.copy() return detection_probability = self.parameters.pd_fn.handle(self.states, self.parameters.pd_fn.parameters) # clutter_pdf = [self.clutter_fn.handle(_observation_, # self.clutter_fn.parameters) \ # for _observation_ in observation_set] clutter_pdf = self.parameters.clutter_fn.handle(observation_set, self.parameters.clutter_fn.parameters) # Account for missed detection self._states_ = self.states.copy() self._weights_ = [self.weights * (1 - detection_probability)] # Split x and P out from the combined state vector detected_indices = detection_probability > 0 detected_states = self.states[detected_indices] x = detected_states.state P = detected_states.covariance # Scale the weights by detection probability weights = self.weights[detected_indices] * detection_probability[detected_indices] if x.shape[0]: # Part of the Kalman update is common to all observation-updates x, P, kalman_info = kalmanfilter.kf_update( x, P, np.array([self.parameters.obs_fn.parameters.H]), np.array([self.parameters.obs_fn.parameters.R]), None, INPLACE=True, ) # USE_NP=0) # Container for the updated states new_gmstate = self.states.__class__(0) # We need to update the states and find the updated weights for (_observation_, obs_count) in zip(observation_set, range(num_observations)): # new_x = copy.deepcopy(x) # Apply the Kalman update to get the new state - update in-place # and return the residuals new_x, residuals = kalmanfilter.kf_update_x( x, kalman_info.pred_z, _observation_, kalman_info.kalman_gain, INPLACE=False ) # Calculate the weight of the Gaussians for this observation # Calculate term in the exponent x_pdf = np.exp(-0.5 * np.power(blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1)) / np.sqrt( kalman_info.det_S * (2 * np.pi) ** z_dim ) new_weight = weights * x_pdf # Normalise the weights normalisation_factor = clutter_pdf[obs_count] + new_weight.sum() new_weight /= normalisation_factor # Create new state with new_x and P to add to _states_ new_gmstate.set(new_x, P) self._states_.append(new_gmstate) self._weights_ += [new_weight] self._weights_ = np.concatenate(self._weights_)
def phdUpdate(self, observation_set): # Container for slam parent update slam_info = PARAMETERS() num_observations = observation_set.shape[0] if num_observations: z_dim = observation_set.shape[1] else: z_dim = 0 if not self.weights.shape[0]: self._states_ = self.states.copy() self._weights_ = self.weights.copy() return detection_probability = self.parameters.pd_fn.handle(self.states, self.parameters.pd_fn.parameters) #clutter_pdf = [self.clutter_fn.handle(_observation_, # self.clutter_fn.parameters) \ # for _observation_ in observation_set] clutter_pdf = self.parameters.clutter_fn.handle(observation_set, self.parameters.clutter_fn.parameters) # Account for missed detection self._states_ = self.states.copy() self._weights_ = [self.weights*(1-detection_probability)] # SLAM, step 1: slam_info.exp_sum__pd_predwt = np.exp(-self.weights.sum()) # Split x and P out from the combined state vector detected_indices = detection_probability > 0.1 detected_states = self.states[detected_indices] x = detected_states.state P = detected_states.covariance # Scale the weights by detection probability weights = self.weights[detected_indices]*detection_probability[detected_indices] # SLAM, prep for step 2: slam_info.sum__clutter_with_pd_updwt = np.zeros(num_observations) if x.shape[0]: # Part of the Kalman update is common to all observation-updates x, P, kalman_info = kf_update(x, P, np.array([self.parameters.obs_fn.parameters.H]), np.array([self.parameters.obs_fn.parameters.R]), None, INPLACE=True)#USE_NP=0) # Container for the updated states new_gmstate = self.states.__class__(0) # Predicted observation from the current states pred_z = featuredetector.tf.relative(self.parameters.obs_fn.parameters.parent_state_xyz, self.parameters.obs_fn.parameters.parent_state_rpy, x) #print "PREDICTED Z:" #print pred_z # We need to update the states and find the updated weights for (_observation_, obs_count) in zip(observation_set, range(num_observations)): #new_x = copy.deepcopy(x) # Apply the Kalman update to get the new state - update in-place # and return the residuals new_x, residuals = kf_update_x(x, pred_z, _observation_, kalman_info.kalman_gain, INPLACE=False) code.interact(local=locals()) # Calculate the weight of the Gaussians for this observation # Calculate term in the exponent x_pdf = np.exp(-0.5*np.power( blas.dgemv(kalman_info.inv_sqrt_S, residuals), 2).sum(axis=1))/ \ np.sqrt(kalman_info.det_S*(2*np.pi)**z_dim) new_weight = weights*x_pdf # Normalise the weights normalisation_factor = clutter_pdf[obs_count] + new_weight.sum() new_weight /= normalisation_factor # SLAM, step 2: slam_info.sum__clutter_with_pd_updwt[obs_count] = \ normalisation_factor # Create new state with new_x and P to add to _states_ new_gmstate.set(new_x, P) self._states_.append(new_gmstate) self._weights_ += [new_weight] else: slam_info.sum__clutter_with_pd_updwt = np.array(clutter_pdf) self._weights_ = np.concatenate(self._weights_) # SLAM, finalise: slam_info.likelihood = (slam_info.exp_sum__pd_predwt * slam_info.sum__clutter_with_pd_updwt.prod()) return slam_info