def h_imu(self, u): """ Transforms the imu measurement (gyro, acc) in pre-integrated measurement :param u: imu measurements, shape [k, 6] :return: pre-integrated measurement """ delta_R_prev = torch.eye(3) delta_v_prev = torch.zeros(3) delta_p_prev = torch.zeros(3) self.J = torch.zeros(u.shape[0], 9, 8) for k in range(u.shape[0]): self.J[k, :3, :3] = delta_R_prev * self.delta_t self.J[ k, 3:6, :3] = -delta_R_prev.mm(self.skew(u[k, 3:])) * self.delta_t self.J[k, 3:6, 3:6] = delta_R_prev * self.delta_t self.J[k, 3:6, :3] = -1 / 2 * delta_R_prev.mm(self.skew( u[k, 3:])) * (self.delta_t**2) self.J[k, 6:9, 3:6] = 1 / 2 * delta_R_prev * (self.delta_t**2) delta_R = delta_R_prev.mm( SO3.exp(u[k, :3] * self.delta_t).as_matrix()) delta_v = delta_v_prev + delta_R.mv(u[k, 3:] * self.delta_t) delta_p = delta_p_prev + delta_v * self.delta_t + delta_R.mv( u[k, 3:] * self.delta_t) * (self.delta_t**2) / 2 delta_R_prev = SO3.from_matrix(delta_R, normalize=True).as_matrix() delta_v_prev = delta_v delta_p_prev = delta_p return torch.cat((SO3.from_matrix(delta_R).log(), delta_v, delta_p), 0)
def h_hat(self, u_odo): def odo2speed(u): v = 1 / 2 * (u[0] + u[1]) return torch.Tensor([ v * torch.cos(self.x_prev[5]), v * torch.sin(self.x_prev[5]), 0 ]) # initial speed v0 = odo2speed(u_odo[0]) # end speed v_end = odo2speed(u_odo[1]) R0 = SO3.from_rpy(self.x_prev[3:6]).as_matrix() Rend = SO3.from_rpy(self.x[3:6]).as_matrix() p0 = self.x_prev[:3] p_end = self.x[:3] delta_R = SO3.from_matrix(R0.t().mm(Rend)).log() delta_v = R0.t().mv(v_end - v0 - self.g * self.Delta_t) delta_p = R0.t().mv(p_end - p0 - v0 * self.Delta_t - 1 / 2 * self.g * (self.Delta_t**2)) return torch.cat((delta_R, delta_v, delta_p), 0)
def se3_to_SE3(self, f2f_x, f2f_r): batch_size, seq_size, _ = f2f_x.shape f2g_q = torch.zeros((batch_size, seq_size, 4), dtype=f2f_x.dtype, device=f2f_x.device) f2g_x = torch.zeros((batch_size, seq_size, 3), dtype=f2f_x.dtype, device=f2f_x.device) for b in range(batch_size): R_prev = torch.zeros((3, 3), dtype=f2f_x.dtype, device=f2f_x.device) R_prev[:] = torch.eye(3, dtype=f2f_x.dtype, device=f2f_x.device) t_prev = torch.zeros((3), dtype=f2f_x.dtype, device=f2f_x.device) for s in range(0, seq_size): t_cur = f2f_x[b, s] #q_cur = spatial.euler_to_rotation_matrix (f2f_r[b, s]) w_cur = f2f_r[b, s] R_cur = SO3.exp(w_cur).as_matrix() # spatial.quaternion_to_rotation_matrix(q_cur) if not torch.isclose(torch.det(R_cur), torch.FloatTensor([1.]).to(self.device)).all(): raise ValueError("Det error:\nR\n{}\nq:\n{}".format(R_cur, w_cur)) t_prev = torch.matmul(R_prev, t_cur) + t_prev R_prev = torch.matmul(R_prev, R_cur) if not torch.isclose(torch.det(R_prev), torch.FloatTensor([1.]).to(self.device)).all(): raise ValueError("Det error:\nR\n{}".format(R_prev)) f2g_q[b, s] = SO3.from_matrix(R_prev, normalize=True).to_quaternion() f2g_x[b, s] = t_prev return f2g_x, f2g_q
def test_perturb(): C = SO3.exp(0.25 * np.pi * torch.ones(3)) C_copy = copy.deepcopy(C) phi = torch.Tensor([0.1, 0.2, 0.3]) C.perturb(phi) assert utils.allclose(C.as_matrix(), (SO3.exp(phi).dot(C_copy)).as_matrix())
def test_left_jacobians_batch(): phis = torch.Tensor([[0., 0., 0.], [np.pi / 2, np.pi / 3, np.pi / 4]]) left_jacobian = SO3.left_jacobian(phis) inv_left_jacobian = SO3.inv_left_jacobian(phis) assert utils.allclose(torch.bmm(left_jacobian, inv_left_jacobian), torch.eye(3).unsqueeze_(dim=0).expand(2, 3, 3))
def correct(self, x, u_odo, u_fog, compute_G=False, full_cov=False): u_odo_fog = torch.cat((u_odo, u_fog), 1).unsqueeze(0) u_odo_fog.requires_grad = True Xnew = self.normalize(u_odo_fog) # take mean to speed up correction y_cor_nor, _ = self.gp_f.forward(Xnew, full_cov) # # sample corrections and take mean # N = 100 # mean, cov = self.gp_f.forward(Xnew, full_cov=True) # y_cor_nor = torch.zeros(6) # dist = torch.distributions.MultivariateNormal(loc=mean, cov) # for i in range(N): # y_cor_nor += 1/N * dist.sample() y_cor = self.unnormalize(y_cor_nor.t(), var="y_odo_fog").squeeze() G_cor = self.correct_cov(u_odo_fog, y_cor, compute_G) u_odo_fog.requires_grad = False y_cor = y_cor.detach() y_cor[[3, 4]] = 0 # pitch and roll corrections are set to 0 G_cor[[3, 4], :] = 0 Rot = SO3.from_rpy(x[3:6]).as_matrix() # correct state dRot_cor = SO3.exp(y_cor[3:]).as_matrix() x[:3] = x[:3] + Rot.mv(SE3.exp(y_cor).as_matrix()[:3, 3]) x[3:6] = SO3.from_matrix(Rot.mm(dRot_cor)).to_rpy() return x, G_cor
def test_identity_batch(): C = SO3.identity(5) assert isinstance(C, SO3) \ and C.mat.dim() == 3 \ and C.mat.shape == (5, 3, 3) C_copy = SO3.identity(5, copy=True) assert isinstance(C_copy, SO3) \ and C_copy.mat.dim() == 3 \ and C_copy.mat.shape == (5, 3, 3)
def test_chordal_squared_loss_equality(): print('Equality of quaternion and rotation matrix chordal loss...') C1 = SO3.exp(torch.randn(1000, 3, dtype=torch.double)).as_matrix() C2 = SO3.exp(torch.randn(1000, 3, dtype=torch.double)).as_matrix() q1 = rotmat_to_quat(C1) q2 = rotmat_to_quat(C2) assert (allclose(rotmat_frob_squared_norm_loss(C1, C2), quat_chordal_squared_loss(q1, q2))) print('All passed.')
def test_normalize_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6], [0, 0, 0]])) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([1, 1, 1])).all() C.mat.add_(0.1) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([0, 0, 0])).all() C.normalize(inds=[0, 2]) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([1, 0, 1])).all() C.normalize() assert SO3.is_valid_matrix(C.mat).all()
def test_from_matrix(): C_good = SO3.from_matrix(torch.eye(3)) assert isinstance(C_good, SO3) \ and C_good.mat.dim() == 2 \ and C_good.mat.shape == (3, 3) \ and SO3.is_valid_matrix(C_good.mat).all() C_bad = SO3.from_matrix(torch.eye(3).add_(1e-3), normalize=True) assert isinstance(C_bad, SO3) \ and C_bad.mat.dim() == 2 \ and C_bad.mat.shape == (3, 3) \ and SO3.is_valid_matrix(C_bad.mat).all()
def test_left_jacobians(): phi_small = torch.Tensor([0., 0., 0.]) phi_big = torch.Tensor([np.pi / 2, np.pi / 3, np.pi / 4]) left_jacobian_small = SO3.left_jacobian(phi_small) inv_left_jacobian_small = SO3.inv_left_jacobian(phi_small) assert utils.allclose( torch.mm(left_jacobian_small, inv_left_jacobian_small), torch.eye(3)) left_jacobian_big = SO3.left_jacobian(phi_big) inv_left_jacobian_big = SO3.inv_left_jacobian(phi_big) assert utils.allclose(torch.mm(left_jacobian_big, inv_left_jacobian_big), torch.eye(3))
def test_dot_batch(): C1 = SO3(torch.Tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).expand(5, 3, 3)) C3 = SO3(torch.Tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])) pt1 = torch.Tensor([1, 2, 3]) pt3 = torch.Tensor([4, 5, 6]) pt3 = torch.Tensor([7, 8, 9]) pts = torch.cat( [pt1.unsqueeze(dim=0), pt3.unsqueeze(dim=0), pt3.unsqueeze(dim=0)], dim=0) # 3x3 ptsbatch = pts.unsqueeze(dim=0).expand(5, 3, 3) C1C1 = torch.bmm(C1.mat, C1.mat) C1C1_SO3 = C1.dot(C1).mat assert C1C1_SO3.shape == C1.mat.shape and utils.allclose(C1C1_SO3, C1C1) C1C3 = torch.matmul(C1.mat, C3.mat) C1C3_SO3 = C1.dot(C3).mat assert C1C3_SO3.shape == C1.mat.shape and utils.allclose(C1C3_SO3, C1C3) C1pt1 = torch.matmul(C1.mat, pt1) C1pt1_SO3 = C1.dot(pt1) assert C1pt1_SO3.shape == (C1.mat.shape[0], pt1.shape[0]) \ and utils.allclose(C1pt1_SO3, C1pt1) C1pt3 = torch.matmul(C1.mat, pt3) C1pt3_SO3 = C1.dot(pt3) assert C1pt3_SO3.shape == (C1.mat.shape[0], pt3.shape[0]) \ and utils.allclose(C1pt3_SO3, C1pt3) C1pts = torch.matmul(C1.mat, pts.transpose(1, 0)).transpose(2, 1) C1pts_SO3 = C1.dot(pts) assert C1pts_SO3.shape == (C1.mat.shape[0], pts.shape[0], pts.shape[1]) \ and utils.allclose(C1pts_SO3, C1pts) \ and utils.allclose(C1pt1, C1pts[:, 0, :]) \ and utils.allclose(C1pt3, C1pts[:, 1, :]) C1ptsbatch = torch.bmm(C1.mat, ptsbatch.transpose(2, 1)).transpose(2, 1) C1ptsbatch_SO3 = C1.dot(ptsbatch) assert C1ptsbatch_SO3.shape == ptsbatch.shape \ and utils.allclose(C1ptsbatch_SO3, C1ptsbatch) \ and utils.allclose(C1pt1, C1ptsbatch[:, 0, :]) \ and utils.allclose(C1pt3, C1ptsbatch[:, 1, :]) C3ptsbatch = torch.matmul(C3.mat, ptsbatch.transpose(2, 1)).transpose(2, 1) C3ptsbatch_SO3 = C3.dot(ptsbatch) assert C3ptsbatch_SO3.shape == ptsbatch.shape \ and utils.allclose(C3ptsbatch_SO3, C3ptsbatch) \ and utils.allclose(C3.dot(pt1), C3ptsbatch[:, 0, :]) \ and utils.allclose(C3.dot(pt3), C3ptsbatch[:, 1, :])
def test_perturb_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6]])) C_copy1 = copy.deepcopy(C) C_copy2 = copy.deepcopy(C) phi = torch.Tensor([0.1, 0.2, 0.3]) C_copy1.perturb(phi) assert utils.allclose(C_copy1.as_matrix(), (SO3.exp(phi).dot(C)).as_matrix()) phis = torch.Tensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) C_copy2.perturb(phis) assert utils.allclose(C_copy2.as_matrix(), (SO3.exp(phis).dot(C)).as_matrix())
def test_from_matrix_batch(): C_good = SO3.from_matrix(torch.eye(3).repeat(5, 1, 1)) assert isinstance(C_good, SO3) \ and C_good.mat.dim() == 3 \ and C_good.mat.shape == (5, 3, 3) \ and SO3.is_valid_matrix(C_good.mat).all() C_bad = copy.deepcopy(C_good.mat) C_bad[3].add_(0.1) C_bad = SO3.from_matrix(C_bad, normalize=True) assert isinstance(C_bad, SO3) \ and C_bad.mat.dim() == 3 \ and C_bad.mat.shape == (5, 3, 3) \ and SO3.is_valid_matrix(C_bad.mat).all()
def test_rot_angles(): print('Rotation angles...') C1 = SO3.exp(torch.randn(100, 3, dtype=torch.double)) C2 = SO3.exp(torch.randn(100, 3, dtype=torch.double)) angles_1 = (C1.dot(C2.inv())).log().norm(dim=1) * (180. / np.pi) angles_2 = quat_angle_diff(rotmat_to_quat(C1.as_matrix()), rotmat_to_quat(C2.as_matrix()), units='deg', reduce=False) angles_3 = rotmat_angle_diff(C1.as_matrix(), C2.as_matrix(), reduce=False) assert (allclose(angles_1, angles_2)) assert (allclose(angles_1, angles_3)) print('All passed.')
def h_hat(self, u): delta_R_prev = torch.eye(3).repeat(u.shape[0], 1, 1) delta_v_prev = torch.zeros(3).repeat(u.shape[0], 1) delta_p_prev = torch.zeros(3).repeat(u.shape[0], 1) for k in range(u.shape[1]): delta_R = delta_R_prev.matmul( SO3.exp(u[:, k, :3] * self.delta_t).as_matrix()) delta_v = delta_v_prev + bmv(delta_R, u[:, k, 3:]) * self.delta_t delta_p = delta_p_prev + delta_v * self.delta_t + bmv( delta_R, u[:, k, 3:] * self.delta_t) * (self.delta_t**2) / 2 delta_R_prev = SO3.from_matrix(delta_R, normalize=True).as_matrix() delta_v_prev = delta_v delta_p_prev = delta_p return torch.cat((SO3.from_matrix(delta_R).log(), delta_v, delta_p), 1)
def test_180_quat(): a = torch.randn(25, 3).to(torch.float64) a = a / a.norm(dim=1, keepdim=True) angle = (150) * (np.pi / 180.) aa = a * angle C = SO3.exp(aa).as_matrix() print(rotmat_to_quat(C))
def set_project_mat_from_KRT(self, K, R, T): """set projection matrix from KRT matrices Args: K: (B, 3, 3) R: (B, 3, 3) as matrices, or (B, 4) as quaternions T: (B, 3) """ if self.camera_mode not in ['projection']: raise ValueError('Only projection mode requires project mat (P)') bs = K.shape[0] if R.ndimension() == 3: Rot = R elif R.ndimension() == 2: if R.shape[1] == 4: # quaternion try: from liegroups.torch import SO3 except: raise ImportError( f"failed to 'from liegroups.torch import SO3'") import torch.nn.functional as F R = F.normalize(R, eps=1e-6) Rot = SO3.from_quaternion(R).mat if Rot.ndimension() == 2: Rot = Rot[ None, :, :] # liegroups tends to squeeze the results else: raise RuntimeError(f"invalid R.shape = {R.shape}") else: raise RuntimeError(f"invalid R.ndimension() = {R.ndimension()}") P = torch.bmm(K, torch.cat((Rot, T.view(-1, 3, 1)), dim=2)) self.transformer.P = P
def gen_sim_data_beachball(N_rotations, N_matches_per_rotation, sigma, factors, dtype=torch.double): ##Simulation #Create a random rotation C = SO3_torch.exp(torch.randn(N_rotations, 3, dtype=dtype)).as_matrix() #Create two sets of vectors (normalized to unit l2 norm) x_1 = torch.randn(3, N_rotations * N_matches_per_rotation, dtype=dtype) x_1 = x_1 / x_1.norm(dim=0, keepdim=True) region_masks = [ (x_1[0] < 0.) & (x_1[1] < 0.), (x_1[0] >= 0.) & (x_1[1] < 0.), (x_1[0] < 0.) & (x_1[1] >= 0.), (x_1[0] >= 0.) & (x_1[1] >= 0.) ] noise = torch.zeros_like(x_1) for r_i, region in enumerate(region_masks): noise[:, region] = factors[r_i] * sigma * torch.randn_like( noise[:, region]) x_1 = x_1.view(3, N_rotations, N_matches_per_rotation).transpose(0, 1) noise = noise.view(3, N_rotations, N_matches_per_rotation).transpose(0, 1) #Rotate and add noise x_2 = C.bmm(x_1) + noise return C, x_1, x_2
def gen_sim_data_fast(N_rotations, N_matches_per_rotation, sigma, max_rotation_angle=None, dtype=torch.double): ##Simulation #Create a random rotation axis = torch.randn(N_rotations, 3, dtype=dtype) axis = axis / axis.norm(dim=1, keepdim=True) if max_rotation_angle: max_angle = max_rotation_angle * np.pi / 180. else: max_angle = np.pi angle = max_angle * torch.rand(N_rotations, 1) C = SO3_torch.exp(angle * axis).as_matrix() if N_rotations == 1: C = C.unsqueeze(dim=0) #Create two sets of vectors (normalized to unit l2 norm) x_1 = torch.randn(N_rotations, 3, N_matches_per_rotation, dtype=dtype) x_1 = x_1 / x_1.norm(dim=1, keepdim=True) #Rotate and add noise noise = sigma * torch.randn_like(x_1) x_2 = C.bmm(x_1) + noise return C, x_1, x_2
def test_rotz_batch(): C_got = SO3.rotz(torch.Tensor([np.pi / 2, np.pi])) C_expected = torch.cat([ torch.Tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]]).unsqueeze_(dim=0), torch.Tensor([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]).unsqueeze_(dim=0) ], dim=0) assert utils.allclose(C_got.mat, C_expected)
def backward(self, grad_output): phi, R = self.saved_tensors grad = grad_output.new_empty((3, 3, 3)) e_0 = grad_output.new_tensor([1, 0, 0]).view(3, 1) e_1 = grad_output.new_tensor([0, 1, 0]).view(3, 1) e_2 = grad_output.new_tensor([0, 0, 1]).view(3, 1) I = grad_output.new_empty((3, 3)) torch.nn.init.eye_(I) if phi.norm() < 1e-8: grad[0, :, :] = SO3.wedge(e_0) grad[1, :, :] = SO3.wedge(e_1) grad[2, :, :] = SO3.wedge(e_2) else: fact = 1. / (phi.norm()**2) phi_wedge = SO3.wedge(phi) ImR = (I - R) grad[0, :, :] = fact * (phi[0] * phi_wedge + SO3.wedge(phi_wedge.mm(ImR.mm(e_0)))).mm(R) grad[1, :, :] = fact * (phi[1] * phi_wedge + SO3.wedge(phi_wedge.mm(ImR.mm(e_1)))).mm(R) grad[2, :, :] = fact * (phi[2] * phi_wedge + SO3.wedge(phi_wedge.mm(ImR.mm(e_2)))).mm(R) out = (grad_output * grad).sum((1, 2)).view(3, 1) return out
def test_dot(): C = SO3(torch.Tensor([[0, -1, 0], [1, 0, 0], [0, 0, 1]])) pt = torch.Tensor([1, 2, 3]) CC = C.mat.mm(C.mat) assert utils.allclose(C.dot(C).mat, CC) Cpt = C.mat.matmul(pt) assert utils.allclose(C.dot(pt), Cpt)
def process_ground_turth(self, gts): T_global = [] v_global = [] for gt in gts: t = gt[0:3] R = gt[3:12].reshape(3, 3) T = torch.eye(4) T[:3, 3] = t T[:3, :3] = R T_global.append(T) v = gt[12:] v_global.append(v) state_f2f = [] for combi in self.combinations: T_i = T_global[combi[0]] T_i_inv = inv_SE3(T_i) T_ip1 = T_global[combi[1]] T_i_ip1 = torch.matmul(T_i_inv, T_ip1) dx = T_i_ip1[:3, 3].contiguous() dq = SO3.from_matrix(T_i_ip1[:3, :3], normalize=False).log( ) # rotation_matrix_exp_to_log(T_i_ip1[:3, :3].unsqueeze(0).contiguous()).squeeze() if torch.isnan(dq).any() or torch.isinf(dq).any(): raise ValueError("gt-f2f:\n{}".format(dq)) #dq = quaternion_exp_to_log(dq).squeeze() state_f2f.append(torch.cat([dx, dq])) T_0 = T_global[0] T_0_inv = inv_SE3(T_0) state_f2g = [] for combi in self.combinations: T_ip1 = T_global[combi[1]] T_i_ip1 = torch.matmul(T_0_inv, T_ip1) dx = T_i_ip1[:3, 3].contiguous() dq = SO3.from_matrix(T_i_ip1[:3, :3]).to_quaternion() state_f2g.append(torch.cat([dx, dq])) gt_f2f = torch.stack(state_f2f).to(self.device, non_blocking=True) gt_f2g = torch.stack(state_f2g).to(self.device, non_blocking=True) return gt_f2f, gt_f2g
def forward(self, phi): angle = phi.norm() I = phi.new_empty((3, 3)) torch.nn.init.eye_(I) if angle < 1e-8: R = I + SO3.wedge(phi) self.save_for_backward(phi, R) return R axis = phi / angle s = torch.sin(angle) c = torch.cos(angle) outer_prod_axis = axis.view(3, 1).mm(axis.view(1, 3)) R = c * I + (1. - c) * outer_prod_axis + s * SO3.wedge(axis) self.save_for_backward(phi, R) return R
def test_rotmat_quat_large_conversions(): print('Large (angle=pi) rotation matrix to quaternion conversions...') axis = torch.randn(100, 3, dtype=torch.double) axis = axis / axis.norm(dim=1, keepdim=True) angle = np.pi C1 = SO3.exp(angle * axis).as_matrix() C2_new = quat_to_rotmat(rotmat_to_quat(C1)) assert (allclose(C1, C2_new)) print('All passed.')
def integrate_odo_fog(self, u_odo, u_fog, delta_t): if self.nclt: v = 1 / 2 * (u_odo[:, 0] + u_odo[:, 1]) else: v, _ = self.encoder2speed(u_odo, delta_t) xi = u_odo.new_zeros(u_odo.shape[0], 6) xi[:, 0] = v * delta_t xi[:, 5] = u_fog.squeeze() Rot = SO3.from_rpy(xi[:, 3:]).as_matrix() p = xi[:, :3] return Rot, p
def propagate(self, u_odo, u_fog, delta_t, compare): self.x_prev = self.x for i in range(u_odo.shape[0]): self.integrate_odo_fog(u_odo[i], u_fog[i], delta_t) if self.gp_odo_fog: self.x, G_cor = self.gp_odo_fog.correct( self.x, u_odo, u_fog, compute_G=(compare == 'filter')) else: G_cor = torch.zeros(u_odo.shape[0], 15, 9) if compare == 'filter': self.propagate_cov(u_odo[i], u_fog[i], delta_t, G_cor) else: self.x[3:6] = SO3.from_rpy(self.x[3:6]).to_rpy()
def compute_mate(self, t, x, chi, dataset_name): chi_est = torch.zeros(x.shape[0], 4, 4) chi_est[:, :3, :3] = SO3.from_rpy(x[:, 3:6]).as_matrix() chi_est[:, :3, 3] = x[:, :3] chi_est[:, 3, 3] = 1 chi_est = SE3.from_matrix(chi_est) chi = SE3.from_matrix(chi) error = (chi.inv().dot(chi_est)).log() mate_translation = error[:, :3].abs().mean() mate_rotation = error[:, 3:].abs().mean() return mate_translation, mate_rotation
def compute_jac_update(self, u_odo): J = self.J H = self.H Rot_prev = SO3.from_rpy(self.x_prev[3:6]).as_matrix() Rot_new = SO3.from_rpy(self.x[3:6]).as_matrix() # speed is not in state J[0, 3:6, 6:] = -Rot_prev.t()[:3, :2] J[-1, 3:6, 6:] = -J[0, 3:6, 6:] J[0, 6:9, 6:] = J[0, 6:9, 6:] * self.Delta_t v = torch.Tensor([1 / 2 * (u_odo[0][0] + u_odo[0][1]), 0, 0]) H[:3, 3:6] = -Rot_prev.t().mm(Rot_new) H[:3, 9:12] = -Rot_prev.t() H[3:6, 9:12] = -Rot_prev.t().mm( self.skew(self.x[:3] - self.x_prev[:3] - v * self.Delta_t - 1 / 2 * self.g * self.Delta_t**2)) H[6:9, :3] = Rot_prev.t() H[6:9, 12:15] = -Rot_prev.t() H[6:9, 9:12] = self.skew(self.x[:3] - self.x_prev[:3] - v * self.Delta_t - 1 / 2 * self.g * self.Delta_t**2) return H, J