def test_perturb(): C = SO3.exp(0.25 * np.pi * torch.ones(3)) C_copy = copy.deepcopy(C) phi = torch.Tensor([0.1, 0.2, 0.3]) C.perturb(phi) assert utils.allclose(C.as_matrix(), (SO3.exp(phi).dot(C_copy)).as_matrix())
def test_chordal_squared_loss_equality(): print('Equality of quaternion and rotation matrix chordal loss...') C1 = SO3.exp(torch.randn(1000, 3, dtype=torch.double)).as_matrix() C2 = SO3.exp(torch.randn(1000, 3, dtype=torch.double)).as_matrix() q1 = rotmat_to_quat(C1) q2 = rotmat_to_quat(C2) assert (allclose(rotmat_frob_squared_norm_loss(C1, C2), quat_chordal_squared_loss(q1, q2))) print('All passed.')
def test_rot_angles(): print('Rotation angles...') C1 = SO3.exp(torch.randn(100, 3, dtype=torch.double)) C2 = SO3.exp(torch.randn(100, 3, dtype=torch.double)) angles_1 = (C1.dot(C2.inv())).log().norm(dim=1) * (180. / np.pi) angles_2 = quat_angle_diff(rotmat_to_quat(C1.as_matrix()), rotmat_to_quat(C2.as_matrix()), units='deg', reduce=False) angles_3 = rotmat_angle_diff(C1.as_matrix(), C2.as_matrix(), reduce=False) assert (allclose(angles_1, angles_2)) assert (allclose(angles_1, angles_3)) print('All passed.')
def test_perturb_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6]])) C_copy1 = copy.deepcopy(C) C_copy2 = copy.deepcopy(C) phi = torch.Tensor([0.1, 0.2, 0.3]) C_copy1.perturb(phi) assert utils.allclose(C_copy1.as_matrix(), (SO3.exp(phi).dot(C)).as_matrix()) phis = torch.Tensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]) C_copy2.perturb(phis) assert utils.allclose(C_copy2.as_matrix(), (SO3.exp(phis).dot(C)).as_matrix())
def se3_to_SE3(self, f2f_x, f2f_r): batch_size, seq_size, _ = f2f_x.shape f2g_q = torch.zeros((batch_size, seq_size, 4), dtype=f2f_x.dtype, device=f2f_x.device) f2g_x = torch.zeros((batch_size, seq_size, 3), dtype=f2f_x.dtype, device=f2f_x.device) for b in range(batch_size): R_prev = torch.zeros((3, 3), dtype=f2f_x.dtype, device=f2f_x.device) R_prev[:] = torch.eye(3, dtype=f2f_x.dtype, device=f2f_x.device) t_prev = torch.zeros((3), dtype=f2f_x.dtype, device=f2f_x.device) for s in range(0, seq_size): t_cur = f2f_x[b, s] #q_cur = spatial.euler_to_rotation_matrix (f2f_r[b, s]) w_cur = f2f_r[b, s] R_cur = SO3.exp(w_cur).as_matrix() # spatial.quaternion_to_rotation_matrix(q_cur) if not torch.isclose(torch.det(R_cur), torch.FloatTensor([1.]).to(self.device)).all(): raise ValueError("Det error:\nR\n{}\nq:\n{}".format(R_cur, w_cur)) t_prev = torch.matmul(R_prev, t_cur) + t_prev R_prev = torch.matmul(R_prev, R_cur) if not torch.isclose(torch.det(R_prev), torch.FloatTensor([1.]).to(self.device)).all(): raise ValueError("Det error:\nR\n{}".format(R_prev)) f2g_q[b, s] = SO3.from_matrix(R_prev, normalize=True).to_quaternion() f2g_x[b, s] = t_prev return f2g_x, f2g_q
def gen_sim_data_beachball(N_rotations, N_matches_per_rotation, sigma, factors, dtype=torch.double): ##Simulation #Create a random rotation C = SO3_torch.exp(torch.randn(N_rotations, 3, dtype=dtype)).as_matrix() #Create two sets of vectors (normalized to unit l2 norm) x_1 = torch.randn(3, N_rotations * N_matches_per_rotation, dtype=dtype) x_1 = x_1 / x_1.norm(dim=0, keepdim=True) region_masks = [ (x_1[0] < 0.) & (x_1[1] < 0.), (x_1[0] >= 0.) & (x_1[1] < 0.), (x_1[0] < 0.) & (x_1[1] >= 0.), (x_1[0] >= 0.) & (x_1[1] >= 0.) ] noise = torch.zeros_like(x_1) for r_i, region in enumerate(region_masks): noise[:, region] = factors[r_i] * sigma * torch.randn_like( noise[:, region]) x_1 = x_1.view(3, N_rotations, N_matches_per_rotation).transpose(0, 1) noise = noise.view(3, N_rotations, N_matches_per_rotation).transpose(0, 1) #Rotate and add noise x_2 = C.bmm(x_1) + noise return C, x_1, x_2
def gen_sim_data_fast(N_rotations, N_matches_per_rotation, sigma, max_rotation_angle=None, dtype=torch.double): ##Simulation #Create a random rotation axis = torch.randn(N_rotations, 3, dtype=dtype) axis = axis / axis.norm(dim=1, keepdim=True) if max_rotation_angle: max_angle = max_rotation_angle * np.pi / 180. else: max_angle = np.pi angle = max_angle * torch.rand(N_rotations, 1) C = SO3_torch.exp(angle * axis).as_matrix() if N_rotations == 1: C = C.unsqueeze(dim=0) #Create two sets of vectors (normalized to unit l2 norm) x_1 = torch.randn(N_rotations, 3, N_matches_per_rotation, dtype=dtype) x_1 = x_1 / x_1.norm(dim=1, keepdim=True) #Rotate and add noise noise = sigma * torch.randn_like(x_1) x_2 = C.bmm(x_1) + noise return C, x_1, x_2
def test_180_quat(): a = torch.randn(25, 3).to(torch.float64) a = a / a.norm(dim=1, keepdim=True) angle = (150) * (np.pi / 180.) aa = a * angle C = SO3.exp(aa).as_matrix() print(rotmat_to_quat(C))
def h_imu(self, u): """ Transforms the imu measurement (gyro, acc) in pre-integrated measurement :param u: imu measurements, shape [k, 6] :return: pre-integrated measurement """ delta_R_prev = torch.eye(3) delta_v_prev = torch.zeros(3) delta_p_prev = torch.zeros(3) self.J = torch.zeros(u.shape[0], 9, 8) for k in range(u.shape[0]): self.J[k, :3, :3] = delta_R_prev * self.delta_t self.J[ k, 3:6, :3] = -delta_R_prev.mm(self.skew(u[k, 3:])) * self.delta_t self.J[k, 3:6, 3:6] = delta_R_prev * self.delta_t self.J[k, 3:6, :3] = -1 / 2 * delta_R_prev.mm(self.skew( u[k, 3:])) * (self.delta_t**2) self.J[k, 6:9, 3:6] = 1 / 2 * delta_R_prev * (self.delta_t**2) delta_R = delta_R_prev.mm( SO3.exp(u[k, :3] * self.delta_t).as_matrix()) delta_v = delta_v_prev + delta_R.mv(u[k, 3:] * self.delta_t) delta_p = delta_p_prev + delta_v * self.delta_t + delta_R.mv( u[k, 3:] * self.delta_t) * (self.delta_t**2) / 2 delta_R_prev = SO3.from_matrix(delta_R, normalize=True).as_matrix() delta_v_prev = delta_v delta_p_prev = delta_p return torch.cat((SO3.from_matrix(delta_R).log(), delta_v, delta_p), 0)
def correct(self, x, u_odo, u_fog, compute_G=False, full_cov=False): u_odo_fog = torch.cat((u_odo, u_fog), 1).unsqueeze(0) u_odo_fog.requires_grad = True Xnew = self.normalize(u_odo_fog) # take mean to speed up correction y_cor_nor, _ = self.gp_f.forward(Xnew, full_cov) # # sample corrections and take mean # N = 100 # mean, cov = self.gp_f.forward(Xnew, full_cov=True) # y_cor_nor = torch.zeros(6) # dist = torch.distributions.MultivariateNormal(loc=mean, cov) # for i in range(N): # y_cor_nor += 1/N * dist.sample() y_cor = self.unnormalize(y_cor_nor.t(), var="y_odo_fog").squeeze() G_cor = self.correct_cov(u_odo_fog, y_cor, compute_G) u_odo_fog.requires_grad = False y_cor = y_cor.detach() y_cor[[3, 4]] = 0 # pitch and roll corrections are set to 0 G_cor[[3, 4], :] = 0 Rot = SO3.from_rpy(x[3:6]).as_matrix() # correct state dRot_cor = SO3.exp(y_cor[3:]).as_matrix() x[:3] = x[:3] + Rot.mv(SE3.exp(y_cor).as_matrix()[:3, 3]) x[3:6] = SO3.from_matrix(Rot.mm(dRot_cor)).to_rpy() return x, G_cor
def test_rotmat_quat_large_conversions(): print('Large (angle=pi) rotation matrix to quaternion conversions...') axis = torch.randn(100, 3, dtype=torch.double) axis = axis / axis.norm(dim=1, keepdim=True) angle = np.pi C1 = SO3.exp(angle * axis).as_matrix() C2_new = quat_to_rotmat(rotmat_to_quat(C1)) assert (allclose(C1, C2_new)) print('All passed.')
def test_normalize_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6], [0, 0, 0]])) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([1, 1, 1])).all() C.mat.add_(0.1) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([0, 0, 0])).all() C.normalize(inds=[0, 2]) assert (SO3.is_valid_matrix(C.mat) == torch.ByteTensor([1, 0, 1])).all() C.normalize() assert SO3.is_valid_matrix(C.mat).all()
def h_hat(self, u): delta_R_prev = torch.eye(3).repeat(u.shape[0], 1, 1) delta_v_prev = torch.zeros(3).repeat(u.shape[0], 1) delta_p_prev = torch.zeros(3).repeat(u.shape[0], 1) for k in range(u.shape[1]): delta_R = delta_R_prev.matmul( SO3.exp(u[:, k, :3] * self.delta_t).as_matrix()) delta_v = delta_v_prev + bmv(delta_R, u[:, k, 3:]) * self.delta_t delta_p = delta_p_prev + delta_v * self.delta_t + bmv( delta_R, u[:, k, 3:] * self.delta_t) * (self.delta_t**2) / 2 delta_R_prev = SO3.from_matrix(delta_R, normalize=True).as_matrix() delta_v_prev = delta_v delta_p_prev = delta_p return torch.cat((SO3.from_matrix(delta_R).log(), delta_v, delta_p), 1)
def gen_sim_data(N_rotations, N_matches_per_rotation, sigma, angle_limits=[0., 180.], dtype=torch.double): ##Simulation #Create a random rotation axis = torch.randn(N_rotations, 3, dtype=dtype) axis = axis / axis.norm(dim=1, keepdim=True) fac = (np.pi/180.) angle = fac*(angle_limits[1] - angle_limits[0])*torch.rand(N_rotations, 1) + fac*angle_limits[0] C = SO3_torch.exp(angle*axis).as_matrix() if N_rotations == 1: C = C.unsqueeze(dim=0) #Create two sets of vectors (normalized to unit l2 norm) x_1 = torch.randn(N_rotations, 3, N_matches_per_rotation, dtype=dtype) x_1 = x_1/x_1.norm(dim=1,keepdim=True) #Rotate and add noise noise = sigma*torch.randn_like(x_1) x_2 = C.bmm(x_1) + noise return C, x_1, x_2
def vec2Cov(cls, p): """ Args: pred_cov [n x 3] : xx, yy, zz, Returns: cov [n x 3 x 3] : full covariance (actually it is diagonal) """ assert p.shape[1] == cls.covParamNumber N = p.shape[0] # I am not sure if it outpus R or RT wrt to Sophus library R = SO3.exp(p[:, 3:6]).mat covf = torch.zeros((N, 3, 3)) # on diagonal terms covf[:, 0, 0] = torch.exp(2 * p[:, 0]) covf[:, 1, 1] = torch.exp(2 * p[:, 1]) covf[:, 2, 2] = torch.exp(2 * p[:, 2]) output = torch.einsum("kip,kpl,kjl->kij", R, covf, R) # R.diag.R^T return output
def __getitem__(self, idx): # Select a random point cloud if self.test_mode: pointcloud_id = idx else: pointcloud_id = torch.randint(len(self.file_list), (1, )).item() if self.data is None: pc1 = torch.from_numpy( np.array(self._load_file(self.file_list[pointcloud_id]))) else: pc1 = self.data[pointcloud_id] #Matches the original code point_num = int(pc1.shape[0] / 2) #Sub sample pc1 = pc1[:point_num] batch_num = self.rotations_per_batch pc1 = pc1.view(1, point_num, 3).expand(batch_num, point_num, 3).transpose(1, 2) #batch*3*p_num C = SO3.exp(torch.randn(batch_num, 3, dtype=torch.double)).as_matrix() pc2 = torch.bmm(C, pc1) #(batch*point_num)*3*1 x = torch.empty(batch_num, 2, point_num, 3) x[:, 0, :, :] = pc1.transpose(1, 2) x[:, 1, :, :] = pc2.transpose(1, 2) if self.rotmat_targets: targets = C else: targets = rotmat_to_quat(C, ordering='xyzw') targets = targets.to(self.dtype) x = x.to(self.dtype) return (x, targets)
def test_exp_log(): C_big = SO3.exp(0.25 * np.pi * torch.ones(3)) assert utils.allclose(SO3.exp(SO3.log(C_big)).mat, C_big.mat) C_small = SO3.exp(torch.zeros(3)) assert utils.allclose(SO3.exp(SO3.log(C_small)).mat, C_small.mat)
def test(self): writer = self.tensor_writer model = self.model batch_time = AverageMeter('Time', ':6.3f') inference_time = AverageMeter('Inf-Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') progress = ProgressMeter(self.logger, len(self.test_dataloader), [batch_time, inference_time, losses], prefix='Test: ') seq_names = [] last_seq = None curr_seq = None # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() for idx, data in enumerate(self.test_dataloader): # check if we can run or are we stopped if not self.is_running: return 0 # prepare data self.data_permuter(data) imgs = self.data_permuter.res_imgs normals = self.data_permuter.res_normals imus = self.data_permuter.res_imu gts_f2f = self.data_permuter.res_gt_f2f gts_f2g = self.data_permuter.res_gt_f2g gts_global = self.data_permuter.res_gt_global if torch.isnan(gts_f2f).any() or torch.isinf(gts_f2f).any(): raise ValueError("gt-f2f:\n{}".format(gts_f2f)) if torch.isnan(gts_f2g).any() or torch.isinf(gts_f2g).any(): raise ValueError("gt-f2g:\n{}".format(gts_f2g)) # prepare ground truth tranlational and rotational part gt_f2f_t = gts_f2f[:, :, 0:3] gt_f2f_w = gts_f2f[:, :, 3:] gt_f2g_p = gts_f2g[:, :, 0:3] gt_f2g_q = gts_f2g[:, :, 3:7] # compute model predictions and loss start_inference = time.time() pred_f2f_t, pred_f2f_w = self.model([[imgs, normals], imus]) inference_time.update(time.time() - start_inference) pred_f2g_p, pred_f2g_q = self.se3_to_SE3( pred_f2f_t, pred_f2f_w) loss = self.criterion(pred_f2f_t, pred_f2f_w, pred_f2g_p, pred_f2g_q, gt_f2f_t, gt_f2f_w, gt_f2g_p, gt_f2g_q) # measure accuracy and record loss losses.update(loss.detach().item(), len(pred_f2f_t)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() batch_size = len(data['metas']) # get meta information for saving the odom. results for b in range(batch_size): meta = data['metas'][b] date, drive = meta['date'][0], meta['drive'][0] velo_ts = meta['velo-timestamps'] gt_global = data['gts'][b].cpu().numpy( ) # gts_global[0].cpu().numpy() seq_name = "{}_{}".format(date, drive) if seq_name not in seq_names: if last_seq is not None: last_seq.write_to_file() curr_seq = OdomSeqRes(date, drive, output_dir=self.out_dir) T_glob = np.identity(4) T_glob[:3, 3] = gt_global[0, 0:3] # t T_glob[:3, :3] = gt_global[0, 3:12].reshape(3, 3) # R curr_seq.add_local_prediction(velo_ts[0], 0., T_glob, T_glob) # add the file name and file-pointer to the list seq_names.append(seq_name) losses.reset() # global ground truth pose T_glob = np.identity(4) T_glob[:3, 3] = gt_global[1, 0:3] # t T_glob[:3, :3] = gt_global[1, 3:12].reshape(3, 3) # R gt_t = gt_f2f_t[b].detach().cpu().squeeze() gt_w = gt_f2f_w[b].detach().cpu().squeeze() pred_f2f_t_b = pred_f2f_t[b].detach().cpu().squeeze() pred_f2f_w_b = pred_f2f_w[b].detach().cpu().squeeze() #if self.has_imu and not np.all(data['valids']): # pred_f2f_t_b = gt_t # pred_f2f_w_b = gt_w T_local = np.identity(4) if self.args.param == 'xq': T_local[:3, 3] = pred_f2f_t_b.numpy() T_local[:3, :3] = SO3.exp(pred_f2f_w_b).as_matrix( ).numpy( ) # spatial.quaternion_to_rotation_matrix(pred_f2f_r).numpy() elif self.args.param == 'x': T_local[:3, 3] = pred_f2f_t_b.numpy() T_local[:3, :3] = SO3.exp(gt_w).as_matrix().numpy( ) # spatial.quaternion_to_rotation_matrix(gt_q).numpy() elif self.args.param == 'q': T_local[:3, 3] = gt_t.numpy() T_local[:3, :3] = SO3.exp( pred_f2f_w_b).as_matrix().numpy() else: T_local[:3, 3] = gt_t.numpy() T_local[:3, : 3] = spatial.quaternion_to_rotation_matrix( gt_w).numpy() curr_seq.add_local_prediction(velo_ts[1], losses.avg, T_local, T_glob) last_seq = curr_seq if idx % self.args.print_freq == 0: progress.display(idx) # update tensorboard step_val = idx self.tensor_writer.add_scalar\ ("Loss test", losses.avg, step_val) self.tensor_writer.flush() if curr_seq is not None: curr_seq.write_to_file()
def test_normalize(): C = SO3.exp(0.25 * np.pi * torch.ones(3)) C.mat.add_(0.1) C.normalize() assert SO3.is_valid_matrix(C.mat).all()
def test_inv_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6]])) assert utils.allclose(C.dot(C.inv()).mat, SO3.identity(C.mat.shape[0]).mat)
def test_inv(): C = SO3.exp(0.25 * np.pi * torch.ones(3)) assert utils.allclose(C.dot(C.inv()).mat, SO3.identity().mat)
def test_adjoint(): C = SO3.exp(0.25 * np.pi * torch.ones(3)) assert (C.adjoint() == C.mat).all()
def test_rotmat_quat_conversions(): print('Rotation matrix to quaternion conversions...') C1 = SO3.exp(torch.randn(100, 3, dtype=torch.double)).as_matrix() C2 = quat_to_rotmat(rotmat_to_quat(C1)) assert (allclose(C1, C2)) print('All passed.')
def test_adjoint_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [4, 5, 6]])) assert (C.adjoint() == C.mat).all()
def test_exp_log_batch(): C = SO3.exp(torch.Tensor([[1, 2, 3], [0, 0, 0]])) assert utils.allclose(SO3.exp(SO3.log(C)).mat, C.mat)
def update_state(self, K_prefix, S, dy): dx = K_prefix.mm(torch.gesv(dy, S)[0]).squeeze(1) # K*dy self.x[:3] += dx[:3] self.x[3:6] += (SO3.exp(dx[3:6]).dot(SO3.from_rpy( self.x[3:6]))).to_rpy() self.x[6:9] += dx[6:9]