def __init__(self, camera, first_pose=SE3.identity()): self.camera = camera """Camera model""" self.first_pose = first_pose """First pose""" self.keyframes = [] """List of keyframes""" self.T_c_w = [first_pose] """List of camera poses""" self.motion_options = Options() """Optimizer parameters for motion estimation""" # Default optimizer parameters for motion estimation self.motion_options.allow_nondecreasing_steps = True self.motion_options.max_nondecreasing_steps = 5 self.motion_options.min_cost_decrease = 0.99 self.motion_options.max_iters = 30 self.motion_options.num_threads = 1 self.motion_options.linesearch_max_iters = 0 self.pyrlevels = 4 """Number of image pyramid levels for coarse-to-fine optimization""" self.pyrlevel_sequence = list(range(self.pyrlevels)) self.pyrlevel_sequence.reverse() self.keyframe_trans_thresh = 3.0 # meters """Translational distance threshold to drop new keyframes""" self.keyframe_rot_thresh = 0.3 # rad """Rotational distance threshold to drop new keyframes""" self.intensity_stiffness = 1. / 0.01 """Photometric measurement stiffness""" self.depth_stiffness = 1. / 0.01 """Depth or disparity measurement stiffness""" self.min_grad = 0.1 """Minimum image gradient magnitude to use a given pixel""" self.depth_map_type = 'depth' """Is the depth map depth, inverse depth, disparity? ['depth','disparity'] supported""" self.mode = 'map' """Create new keyframes or localize against existing ones? ['map'|'track']""" self.use_motion_model_guess = True """Use constant motion model for initial guess.""" # self.loss = L2Loss() self.loss = HuberLoss(10.0) # self.loss = TukeyLoss(5.0) # self.loss = CauchyLoss(5.0) # self.loss = TDistributionLoss(5.0) # Kerl et al. ICRA 2013 # self.loss = TDistributionLoss(3.0) """Loss function""" self._make_pyramid_cameras()
def __init__(self, camera, first_pose=SE3.identity()): self.camera = camera """Camera model""" self.first_pose = first_pose """First pose""" self.keyframes = [] """List of keyframes""" self.T_c_w = [first_pose] """List of camera poses""" self.motion_options = Options() """Optimizer parameters for motion estimation""" # Default optimizer parameters for motion estimation self.motion_options.allow_nondecreasing_steps = True self.motion_options.max_nondecreasing_steps = 5 self.motion_options.min_cost_decrease = 0.99 self.motion_options.max_iters = 30 self.motion_options.num_threads = 1 self.motion_options.linesearch_max_iters = 0 self.keyframe_trans_thresh = 3.0 # meters """Translational distance threshold to drop new keyframes""" self.keyframe_rot_thresh = 0.3 # rad """Rotational distance threshold to drop new keyframes""" self.matcher_params = viso2.Matcher_parameters() """Parameters for libviso2 matcher""" self.matcher = viso2.Matcher(self.matcher_params) """libviso2 matcher""" self.matcher_mode = 0 """libviso2 matching mode 0=flow 1=stereo 2=quad""" self.ransac = FrameToFrameRANSAC(self.camera) """RANSAC outlier rejection""" self.reprojection_stiffness = np.diag([1., 1., 1.]) """Reprojection error stiffness matrix""" self.mode = 'map' """Create new keyframes or localize against existing ones? ['map'|'track']""" self.loss = L2Loss() """Loss function"""
T_cam0_imu.normalize() T_0_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[0].T_w_imu).inv()) T_0_w.normalize() T_1_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[1].T_w_imu).inv()) T_1_w.normalize() T_1_0_true = T_1_w.dot(T_0_w.inv()) # params_init = {'T_1_0': T_1_0_true} params_init = {'T_1_0': SE3.identity()} # Scaling parameters pyrlevels = [3, 2, 1] params = params_init options = Options() options.allow_nondecreasing_steps = True options.max_nondecreasing_steps = 3 options.min_cost_decrease = 0.99 # options.max_iters = 100 # options.print_iter_summary = True for pyrlevel in pyrlevels: pyrfactor = 1. / 2**pyrlevel # Disparity computation parameters window_size = 5 min_disp = 1 max_disp = np.max([16, np.int(64 * pyrfactor)]) + min_disp # Use semi-global block matching
residual2 = PoseToPoseResidual(T_3_2_obs, odom_stiffness) residual2_params = ['T_2_0', 'T_3_0'] residual3 = PoseToPoseResidual(T_4_3_obs, odom_stiffness) residual3_params = ['T_3_0', 'T_4_0'] residual4 = PoseToPoseResidual(T_5_4_obs, odom_stiffness) residual4_params = ['T_4_0', 'T_5_0'] residual5 = PoseToPoseResidual(T_6_5_obs, odom_stiffness) residual5_params = ['T_5_0', 'T_6_0'] residual6 = PoseToPoseResidual(T_6_2_obs, loop_stiffness) residual6_params = ['T_2_0', 'T_6_0'] options = Options() options.allow_nondecreasing_steps = True options.max_nondecreasing_steps = 3 problem = Problem(options) problem.add_residual_block(residual0, residual0_params) problem.add_residual_block(residual1, residual1_params) problem.add_residual_block(residual2, residual2_params) problem.add_residual_block(residual3, residual3_params) problem.add_residual_block(residual4, residual4_params) problem.add_residual_block(residual5, residual5_params) problem.add_residual_block(residual6, residual6_params) # problem.set_parameters_constant(residual0_params) # problem.set_parameters_constant(residual1_params)
def options(self): options = Options() options.allow_nondecreasing_steps = True options.max_nondecreasing_steps = 3 return options
def error_ij(): # define later in case of measurement-based model def Q_i(Q_C,t_i,t_im1): delta_t_i = t_i - t_im1 return np.array([(1/3)*((delta_t_i)**3)*Q_C, (1/2)*((delta_t_i)**2)*Q_C; (1/2)*((delta_t_i)**2)*Q_C, (delta_t_i)*Q_C]) # TODO """ DONE setup xi and t data containers defined the cost function structure come up with matrix Q_C #TODO do some research on the gauss-newton algorithm define dataset xi above define w_bar dataset above (lin and rotational velocity) """ def Q_C(dim): return np.eye(dim) def J(xi, w_bar, N, Q_C, t): # create error vector # fill up error vector with all training data e = np.empty([N,1]) for i in range(N): e.append(error_i(xi[i], xi[i+1],w_bar[i],w_bar[i+1])) # create Q matrix and invert it # create a vector of all Q_i entries Q_vect = np.empty([N,1]) for i in range(N): Q_vect.append(Q_i(Q_C(6),t[i],t[i-1])) Q = np.diag(Q_vect) Q_inv = np.linalg.inv(Q) return (1/2)*np.matmul(np.matmul(np.tranpose(e),Q_inv),e) #system_to_solve # implementation of E: def E(): F_k = np.empty([4,2]) k = 0 dim = 3 T_kp1 = SE3.exp(xi[k+1]) T_k = SE3.exp(xi[k]) tau_bar = (SE3.exp([xi[k+1]]).dot(SE3.exp([xi[k]]))).adjoint() SE3.exp F_k[0,0] = SE3.inv_left_jacobian(T_kp1.dot(T_k)).dot(tau_bar_kp1_k) F_k[1,0] = (1/2)*w_bar(v_kp1,omega_kp1).dot(SE3.inv_left_jacobian(T_kp1.dot(T_k))).dot(tau_bar_kp1_k): F_k[0,1] = (t_kp1-tk)*np.eye(dim) F_k[1,1] = np.eye(dim) F_k[0,2] = -SE3.inv_left_jacobian(T_kp1.dot(T_k)) F_k[1,2] = (-1/2)*SE3.vee(w_bar(v_kp1,omega_kp1)).dot(SE3.inv_left_jacobian(T_kp1.dot(T_k))) F_k[0,3] = np.zeros(dim) F_k[1,3] = -SE3.inv_left_jacobian(T_kp1.dot(T_k)) P = np.empty([2,1]) """ perturbation: epsilon* initial pose: identity transform per iteration we update: x_op <- x_op + delta_x_op T_i = """ xi_init = np.array([0,0,0,0,0,0]) T_init = T_i = SE3.exp(xi_init) v_init = np.array([0.5, 0.5, 0.5]) omega_init = np.array([0.0, 0.0, 0.0]) w_bar_init = w_bar(v_init, omega_init) e_op_init = error_i(xi_init, xi[0], w_bar_init, w_bar[0]) error_i(xi_i, xi_ip1,w_bar_i,w_bar_ip1): def delta_i(theta_i,psi_i): return np.array([theta_i, psi_i]) def epsilon(): delta = np.empty([N,1]) for i in range(N): delta.append(delta_i(theta[i], psi[i])) return epsilon from pyslam.problem import Problem, Options options = Options() options.print_summary = True problem = Problem(options) def residuals(self, b): """Evaluate the residuals f(x, b) - y with the given parameters. Parameters ---------- b : tuple, list or ndarray Values for the model parameters. Return ------ out : ndarray Residual vector for the given model parameters. """ x, y = self.xvals, self.yvals return self._numexpr(x, *b) - y def jacobian(self, b): """Evaluate the model's Jacobian matrix with the given parameters. Parameters ---------- b : tuple, list or ndarray Values for the model parameters. Return ------ out : ndarray Evaluation of the model's Jacobian matrix in column-major order wrt the model parameters. """ # Substitute parameters in partial derivatives subs = [pd.subs(zip(self._b, b)) for pd in self._pderivs] # Evaluate substituted partial derivatives for all x-values vals = [sp.lambdify(self._x, sub, "numpy")(self.xvals) for sub in subs] # Arrange values in column-major order return np.column_stack(vals) def gauss_newton(sys, x0, tol = 1e-10, maxits = 256): """Gauss-Newton algorithm for solving nonlinear least squares problems. Parameters ---------- sys : Dataset Class providing residuals() and jacobian() functions. The former should evaluate the residuals of a nonlinear system for a given set of parameters. The latter should evaluate the Jacobian matrix of said system for the same parameters. x0 : tuple, list or ndarray Initial guesses or starting estimates for the system. tol : float Tolerance threshold. The problem is considered solved when this value becomes smaller than the magnitude of the correction vector. Defaults to 1e-10. maxits : int Maximum number of iterations of the algorithm to perform. Defaults to 256. Return ------ sol : ndarray Resultant values. its : int Number of iterations performed. Note ---- Uses numpy.linalg.pinv() in place of similar functions from scipy, both because it was found to be faster and to eliminate the extra dependency. """ dx = np.ones(len(x0)) # Correction vector xn = np.array(x0) # Approximation of solution i = 0 while (i < maxits) and (dx[dx > tol].size > 0): # correction = pinv(jacobian) . residual vector dx = np.dot(np.linalg.pinv(sys.jacobian(xn)), -sys.residuals(xn)) xn += dx # x_{n + 1} = x_n + dx_n i += 1 return xn, i