def log_gaussian_pdf(x, mu=None, Sigma=None, is_cholesky=False, compute_grad=False): if mu is None: mu = np.zeros(len(x)) if Sigma is None: Sigma = np.eye(len(mu)) if is_cholesky is False: L = np.linalg.cholesky(Sigma) else: L = Sigma assert len(x) == Sigma.shape[0] assert len(x) == Sigma.shape[1] assert len(x) == len(mu) # solve y=K^(-1)x = L^(-T)L^(-1)x x = np.array(x - mu) y = cho_solve((L, True), x) # y = solve_triangular(L, x.T, lower=True) # y = solve_triangular(L.T, y, lower=False) if not compute_grad: log_determinant_part = -np.sum(np.log(np.diag(L))) quadratic_part = -0.5 * x.dot(y) const_part = -0.5 * len(L) * np.log(2 * np.pi) return const_part + log_determinant_part + quadratic_part else: return -y
def non_negative_qpsolver(A, b, x_init, x_tol, check_tol=-1, epsilon_low=-1, epsilon_high=-1): """ Solves (1/2)x.T A x - b.T x :param x_init: Initial value for solution x :param x_tol: Smallest allowed non zero value for x_opt. Values below x_tol are made zero :param check_tol: Allowed tolerance for stopping criteria. If negative, uses x_tol value :param epsilon_high: maximum value of x during optimization :param epsilon_low: minimum value of x during optimization :return: x_opt, error """ if epsilon_low < 0: epsilon_low = x_tol # np.finfo(float).eps if epsilon_high < 0: epsilon_high = x_tol if check_tol < 0: check_tol = x_tol n = A.shape[0] # A = A + 1e-6 * np.eye(n) max_iter = 50 * n itr = 0 # %% x_opt = np.reshape(x_init, (n, 1)) N = x_opt > (1 - epsilon_high ) # Similarity too close to 1 (nodes collapse) if sum(N) > 0: x_opt = x_opt * N.astype(np.float) return x_opt[:, 0], 0 # %% non_pruned_elements = np.ones( (n, 1), dtype=np.bool) # F tracks elements pruned based on negativity check = 1 while (check > check_tol) and (itr < max_iter): non_pruned_elements = np.logical_and(x_opt > epsilon_low, non_pruned_elements) x_opt = np.zeros((n, 1)) x_opt[non_pruned_elements] = cho_solve( cho_factor(A[non_pruned_elements[:, 0], :][:, non_pruned_elements[:, 0]]), b[non_pruned_elements[:, 0]]) itr = itr + 1 N = x_opt < epsilon_low if sum(N) > 0: check = np.max(np.abs(x_opt[N])) else: check = 0 x_opt[x_opt < x_tol] = 0 return x_opt[:, 0], check
def update( self, eta: np.ndarray, P: np.ndarray, z: np.ndarray) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray]: """Update eta and P with z, associating landmarks and adding new ones. Parameters ---------- eta : np.ndarray [description] P : np.ndarray [description] z : np.ndarray, shape=(#detections, 2) [description] Returns ------- Tuple[np.ndarray, np.ndarray, float, np.ndarray] [description] """ numLmk = int((eta.shape[0] - 3) // 2) assert (eta.shape[0] - 3) % 2 == 0, "EKFSLAM.update: landmark length not even" if numLmk > 0: # Prediction and innovation covariance zpred = self.h(eta) H = self.H(eta) # Here you can use simply np.kron (a bit slow) to form the big (very big in VP after a while) R, # or be smart with indexing and broadcasting (3d indexing into 2d mat) realizing you are adding the same R on all diagonals S = H @ P @ H.T idxs = np.arange(numLmk * 2).reshape(numLmk, 2) S[idxs[..., None], idxs[:, None]] += self.R[None] assert (S.shape == zpred.shape * 2), "EKFSLAM.update: wrong shape on either S or zpred" z = z.ravel() # 2D -> flat # Perform data association za, zpred, Ha, Sa, a = self.associate(z, zpred, H, S) # No association could be made, so skip update if za.shape[0] == 0: etaupd = eta Pupd = P NIS = 1 # beware this one when analysing consistency. else: # Create the associated innovation v = za.ravel() - zpred # za: 2D -> flat v[1::2] = utils.wrapToPi(v[1::2]) # Kalman mean update # Optional, used in places for S^-1, see scipy.linalg.cho_factor and scipy.linalg.cho_solve S_cho_factors = la.cho_factor(Sa) # Kalman gain, can use S_cho_factors W = la.cho_solve(S_cho_factors, Ha @ P).T etaupd = eta + W @ v # Kalman update # Kalman cov update: use Joseph form for stability jo = -W @ Ha # same as adding Identity mat jo[np.diag_indices(jo.shape[0])] += 1 Pupd = jo @ P @ jo.T + W @ np.kron( np.eye(za.size // 2), self.R ) @ W.T # Kalman update. This is the main workload on VP after speedups # calculate NIS, can use S_cho_factors NIS = v.T @ cho_solve(S_cho_factors, v) # When tested, remove for speed assert np.allclose( Pupd, Pupd.T), "EKFSLAM.update: Pupd not symmetric" assert np.all(np.linalg.eigvals(Pupd) > 0 ), "EKFSLAM.update: Pupd not positive definite" else: # All measurements are new landmarks, a = np.full(z.shape[0], -1) z = z.flatten() NIS = 0 # beware this one, you can change the value to for instance 1 etaupd = eta Pupd = P # Create new landmarks if any is available if self.do_asso: is_new_lmk = a == -1 if np.any(is_new_lmk): z_new_inds = np.empty_like(z, dtype=bool) z_new_inds[::2] = is_new_lmk z_new_inds[1::2] = is_new_lmk z_new = z[z_new_inds] # add new landmarks. etaupd, Pupd = self.add_landmarks(etaupd, Pupd, z_new) assert np.allclose(Pupd, Pupd.T), "EKFSLAM.update: Pupd must be symmetric" assert np.all( np.linalg.eigvals(Pupd) >= 0), "EKFSLAM.update: Pupd must be PSD" return etaupd, Pupd, NIS, a