예제 #1
0
    def test_steam(self, flip_y=False, ex_rotation_sv=[1, 0, 0, 0, 1, 0, 0, 0, 1]):
        with open('config/test2.json', 'r') as f:
            config = json.load(f)
        config['flip_y'] = flip_y
        config['steam']['ex_rotation_sv'] = ex_rotation_sv

        # initialize solver
        solver = SteamSolver(config)

        # create test data
        N = 100
        src = torch.randn((2, N), dtype=torch.float32)
        theta = np.pi / 8
        # if flip_y:
        #    R_gt = torch.tensor([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]], dtype=torch.float32)
        # else:
        R_gt = torch.tensor([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]], dtype=torch.float32)
        t_gt = torch.tensor([[1], [2]], dtype=torch.float32)
        out = R_gt @ src + t_gt

        zeropad = torch.nn.ZeroPad2d((0, 1, 0, 0))
        points1 = zeropad(src.T).unsqueeze(0)  # pseudo
        points2 = zeropad(out.T).unsqueeze(0)  # keypoint

        if config['flip_y']:
            points1[:, :, 1] *= -1.0
            points2[:, :, 1] *= -1.0

        t0 = 0
        t1 = 250000
        t2 = 500000
        time_src = get_times(t0).reshape(1, 1, 400)
        time_tgt = get_times(t1).reshape(1, 1, 400)
        t_ref_src = torch.tensor([0, t1]).reshape(1, 1, 2)
        t_ref_tgt = torch.tensor([t1, t2]).reshape(1, 1, 2)

        keypoint_ints = torch.ones((1, 1, N))
        match_weights = torch.ones((1, 1, N))

        R_out, t_out = solver.optimize(points2, points1, match_weights, keypoint_ints,
                                       time_tgt, time_src, t_ref_tgt, t_ref_src)

        T = get_T_ba(R_out, t_out, 0, 1)
        if flip_y:
            T_prime = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
            T = T_prime @ T @ T_prime
        R = T[:3, :3]
        t = T[:3, 3:]
        T_gt = np.identity(4, dtype=np.float32)
        T_gt[:2, :2] = R_gt.numpy()
        T_gt[:2, 3:] = t_gt.numpy()

        r_err = rotationError(get_inverse_tf(T) @ T_gt)
        t_err = translationError(get_inverse_tf(T) @ T_gt)
        self.assertTrue(r_err < 1e-4, "Rotation error: {}, \n{}\n !=\n {}".format(r_err, R, R_gt))
        self.assertTrue(t_err < 1e-4, "Translation error: {}, \n{}\n !=\n {}".format(t_err, t, t_gt))
예제 #2
0
    def test_solver_class(self):
        N = 100
        src = torch.randn(2, N)
        theta = np.pi / 8
        R_gt = torch.eye(2)
        R_gt[:2, :2] = torch.tensor([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
        t_gt = torch.tensor([[1], [2]])
        out = R_gt @ src + t_gt
        zeros_vec = np.zeros((N, 1), dtype=np.float32)

        # points must be list of N x 3
        points2 = out.T.detach().cpu().numpy()
        points1 = src.T.detach().cpu().numpy()

        # weights must be list of N x 3 x 3
        identity_weights = np.tile(np.expand_dims(np.eye(3, dtype=np.float32), 0), (N, 1, 1))

        # poses are window_size x 4 x 4
        window_size = 2
        poses = np.tile(
            np.expand_dims(np.eye(4, dtype=np.float32), 0),
            (window_size, 1, 1))

        # get timestamps
        t1 = 0
        t2 = 250000
        times1 = get_times(t1)
        times2 = get_times(t2)
        t_ref = [t1, t2]
        timestamps1 = getApproxTimeStamps([points1], [times1], flip_y=False)
        timestamps2 = getApproxTimeStamps([points2], [times2], flip_y=False)

        # run steam
        dt = 0.25
        solver = SteamCpp.SteamSolver(dt, window_size)
        solver.setMeas([np.concatenate((points2, zeros_vec), 1)],
                       [np.concatenate((points1, zeros_vec), 1)], [identity_weights],
                       timestamps2, timestamps1, t_ref)
        solver.optimize()

        # get pose output
        solver.getPoses(poses)

        # 2nd pose will be T_21
        R = torch.from_numpy(poses[1, :2, :2])
        t = torch.from_numpy(poses[1, :2, 3:])

        T = poses[1].reshape(4, 4)
        T_gt = np.identity(4, dtype=np.float32)
        T_gt[:2, :2] = R_gt.numpy()
        T_gt[:2, 3:] = t_gt.numpy()

        r_err = rotationError(get_inverse_tf(T) @ T_gt)
        t_err = translationError(get_inverse_tf(T) @ T_gt)
        self.assertTrue(r_err < 1e-4, "Rotation: {} != {}".format(R, R_gt))
        self.assertTrue(t_err < 1e-4, "Translation: {} != {}".format(t, t_gt))
예제 #3
0
def get_T_ba(R_pred, t_pred, a, b):
    T_b0 = np.eye(4)
    T_b0[:3, :3] = R_pred[0, b].numpy()
    T_b0[:3, 3:4] = t_pred[0, b].numpy()
    T_a0 = np.eye(4)
    T_a0[:3, :3] = R_pred[0, a].numpy()
    T_a0[:3, 3:4] = t_pred[0, a].numpy()
    return np.matmul(T_b0, get_inverse_tf(T_a0))
예제 #4
0
    def test_basic(self):
        # Create a test image
        cart_width = 100
        cart_res = 0.25
        img = np.zeros((cart_width, cart_width), dtype=np.float32)
        mask = np.zeros((cart_width, cart_width), dtype=np.float32)
        src_coords = [[24, 74], [24, 24], [74, 24], [74, 74]]
        for u, v in src_coords:
            img[u, v] = 1
        torch_img = torch.from_numpy(img)
        torch_img = torch_img.expand(2, 1, cart_width, cart_width)
        torch_mask = torch.from_numpy(mask)
        torch_mask = torch_mask.expand(2, 1, cart_width, cart_width)

        config = {
            'augmentation': {
                'rot_max': np.pi / 4
            },
            'batch_size': 1,
            'window_size': 2
        }
        T = np.identity(4, dtype=np.float32).reshape(1, 4, 4)
        T2 = np.identity(4, dtype=np.float32).reshape(1, 4, 4)
        T3 = np.concatenate((T, T2), axis=0)
        T = torch.from_numpy(T3)
        batch = {'data': torch_img, 'T_21': T, 'mask': torch_mask}

        np.random.seed(1234)
        batch = augmentBatch(batch, config)
        out = batch['data'][1].numpy().squeeze()
        T_out = batch['T_21'][1].numpy()
        print(T_out)
        print(T[0])
        coords = out.nonzero()
        out_coords = []
        for u, v in zip(coords[0], coords[1]):
            out_coords.append([u, v])

        src_metric = convert_to_metric(src_coords, cart_res, cart_width)
        out_metric = convert_to_metric(out_coords, cart_res, cart_width)

        outliers = 0
        for x, y in out_metric:
            outlier = 1
            xbar = np.array([x, y, 0, 1]).reshape(4, 1)
            xbar = np.matmul(get_inverse_tf(T_out), xbar)
            xn = xbar[0, 0]
            yn = xbar[1, 0]
            for xs, ys in src_metric:
                if np.sqrt((xn - xs)**2 + (yn - ys)**2) <= cart_res * 3:
                    outlier = 0
                    break
            outliers += outlier
        self.assertTrue(outliers == 0, 'outliers: {}'.format(outliers))
예제 #5
0
 def get_groundruth_ins(self, time1, time2, gt_path):
     """Extracts ground truth transform T_2_1 from INS data, from current time1 to time2
     Args:
         time1 (int): UNIX INT64 timestamp of the current frame
         time2 (int): UNIX INT64 timestamp of the next frame
         gt_path (AnyStr): path to the ground truth csv file
     Returns:
         T_2_1 (np.ndarray): 4x4 transformation matrix from current time to next
     """
     T = np.array(interpolate_ins_poses(gt_path, [time1], time2)[0])
     return self.T_radar_imu @ T @ get_inverse_tf(self.T_radar_imu)
예제 #6
0
    def test_basic(self):
        N = 100
        src = torch.randn(3, N)
        theta = np.pi / 8
        R_gt = torch.eye(3)
        R_gt[:2, :2] = torch.tensor([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
        t_gt = torch.tensor([[1], [2], [1]])
        out = R_gt @ src + t_gt

        # points must be list of N x 3
        p2_list = [out.T.detach().cpu().numpy()]
        p1_list = [src.T.detach().cpu().numpy()]

        # weights must be list of N x 3 x 3
        w_list = [torch.eye(3).repeat(N, 1, 1).detach().cpu().numpy()]

        # poses are window_size x (num sigmapoints + 1) x 4 x 4
        # vels are window_size x 6
        # num sigmapoints is 12
        window_size = 2
        poses = torch.eye(4).unsqueeze(0).repeat(window_size, 1, 1, 1).detach().cpu().numpy()
        vels = torch.zeros(window_size, 6).detach().cpu().numpy()

        # run steam
        dt = 1.0    # timestep for motion prior
        sigmapoints = False
        steampy.run_steam(p2_list, p1_list, w_list, poses, vels, sigmapoints, dt)

        # 2nd pose will be T_21
        R = torch.from_numpy(poses[1, 0, :3, :3])
        t = torch.from_numpy(poses[1, 0, :3, 3:])

        T = poses[1, 0].reshape(4, 4)
        T_gt = np.identity(4, dtype=np.float32)
        T_gt[:3, :3] = R_gt.numpy()
        T_gt[:3, 3:] = t_gt.numpy()

        r_err = rotationError(get_inverse_tf(T) @ T_gt)
        t_err = translationError(get_inverse_tf(T) @ T_gt)
        self.assertTrue(r_err < 1e-4, "Rotation: {} != {}".format(R, R_gt))
        self.assertTrue(t_err < 1e-4, "Translation: {} != {}".format(t, t_gt))
예제 #7
0
 def get_groundtruth_odometry(self, radar_time, gt_path):
     """Retrieves the groundtruth 4x4 transform from current time to next
     Args:
         radar_time (int): UNIX INT64 timestamp that we want groundtruth for (also the filename for radar)
         gt_path (AnyStr): path to the ground truth csv file
     Returns:
         T_2_1 (np.ndarray): 4x4 transformation matrix from current time to next
         time1 (int): UNIX INT64 timestamp of the current frame
         time2 (int): UNIX INT64 timestamp of the next frame
     """
     with open(gt_path, 'r') as f:
         f.readline()
         lines = f.readlines()
         for i, line in enumerate(lines):
             line = line.split(',')
             if int(line[9]) == radar_time:
                 T = get_transform_oxford(float(line[2]), float(
                     line[3]), float(line[7]))  # from next time to current
                 return get_inverse_tf(T), int(line[1]), int(
                     line[0])  # T_2_1 from current time step to the next
     assert (0), 'ground truth transform for {} not found in {}'.format(
         radar_time, gt_path)
예제 #8
0
    def get_groundtruth_odometry(self, radar_time, gt_path):
        """Retrieves the groundtruth 4x4 transform from current time to next
        Args:
            radar_time (int): UNIX INT64 time stamp that we want groundtruth for (also the filename for radar)
            gt_path (AnyStr): path to the ground truth csv file
        Returns:
            np.ndarray: 4x4 transformation matrix from current time to next (T_2_1)
        """
        def parse(gps_line):
            out = [float(x) for x in gps_line.split(',')]
            out[0] = int(gps_line.split(',')[0])
            return out

        gtfound = False
        min_delta = 0.1
        T_2_1 = np.identity(4, dtype=np.float32)
        with open(gt_path, 'r') as f:
            f.readline()
            lines = f.readlines()
            for i in range(len(lines) - 1):
                gt1 = parse(lines[i])
                delta = abs(float(gt1[0] - radar_time) / 1.0e9)
                if delta < min_delta:
                    gt2 = parse(lines[i + 1])
                    T_enu_r1 = np.matmul(get_transform_boreas(gt1), T_prime)
                    T_enu_r2 = np.matmul(get_transform_boreas(gt2), T_prime)
                    T_r2_r1 = np.matmul(get_inverse_tf(T_enu_r2),
                                        T_enu_r1)  # 4x4 SE(3)
                    heading, _, _ = rotToYawPitchRoll(T_r2_r1[0:3, 0:3])
                    T_2_1 = get_transform(T_r2_r1[0, 3], T_r2_r1[1, 3],
                                          heading)  # 4x4 SE(2)
                    min_delta = delta
                    gtfound = True
        assert (
            gtfound), 'ground truth transform for {} not found in {}'.format(
                radar_time, gt_path)
        return T_2_1
    def test0(self):
        v = 20.0
        omega = 90.0 * np.pi / 180.0
        print(v, omega)
        square = np.array([[25, -25, -25, 25, 25], [25, 25, -25, -25,
                                                    25]]).transpose()
        plt.figure(figsize=(10, 10), tight_layout=True)
        plt.axes().set_aspect('equal')
        plt.plot(square[:, 0], square[:, 1], "k")

        lines = get_lines(square)

        x1 = []
        y1 = []
        x2 = []
        y2 = []
        a1 = []
        a2 = []
        t1 = []
        t2 = []

        delta_t = 0.25 / 400.0
        time = 0.0

        desc1 = np.zeros((400, 2))
        desc2 = np.zeros((400, 2))

        x_pos_vec = []
        y_pos_vec = []
        theta_pos_vec = []

        for scan in range(2):
            for i in range(400):
                # Get sensor position
                theta_pos = wrapto2pi(omega * time)
                if omega == 0:
                    x_pos = v * time
                    y_pos = 0
                else:
                    x_pos = (v / omega) * np.sin(theta_pos)
                    y_pos = (v / omega) * (1 - np.cos(theta_pos))
                x_pos_vec.append(x_pos)
                y_pos_vec.append(y_pos)
                theta_pos_vec.append(theta_pos)

                theta_rad = i * 0.9 * np.pi / 180
                theta = theta_pos + theta_rad
                theta = wrapto2pi(theta)

                if scan == 0:
                    a1.append(theta_rad)
                    t1.append(time * 1e6)
                else:
                    a2.append(theta_rad)
                    t2.append(time * 1e6)

                if (0 <= theta and theta < 0.25 * PI) or (0.75 * PI <= theta and theta < 1.25 * PI) or \
                        (1.75 * PI <= theta and theta < 2 * PI):
                    m = np.tan(theta)
                    b = y_pos - m * x_pos
                    flag = False
                else:
                    m = np.cos(theta) / np.sin(theta)
                    b = x_pos - m * y_pos
                    flag = True

                dmin = 1.0e6
                x_true = 0
                y_true = 0
                eps = 1.0e-8
                for j in range(lines.shape[1]):
                    m2 = lines[1, j]
                    b2 = lines[2, j]
                    lflag = lines[0, j]
                    if flag is False and lflag == 0:
                        x_int = (b2 - b) / (m - m2)
                        y_int = m * x_int + b
                    elif flag is False and lflag == 1:
                        y_int = (m * b2 + b) / (1 - m * m2)
                        x_int = m2 * y_int + b2
                    elif flag is True and lflag == 0:
                        y_int = (m2 * b + b2) / (1 - m * m2)
                        x_int = m * y_int + b
                    else:
                        y_int = (b2 - b) / (m - m2 + eps)
                        x_int = m * y_int + b

                    if (0 <= theta and theta < PI and (y_int - y_pos) < 0) or \
                            (PI <= theta and theta < 2 * PI and (y_int - y_pos) > 0):
                        continue
                    if (((0 <= theta and theta < 0.5 * PI) or (1.5 * PI <= theta and theta < 2 * PI)) and
                            (x_int - x_pos) < 0) or \
                            (0.5 * PI <= theta and theta < 1.5 * PI and (x_int - x_pos) > 0):
                        continue
                    x_range = [lines[3, j], lines[5, j]]
                    y_range = [lines[4, j], lines[6, j]]
                    x_range.sort()
                    y_range.sort()
                    # if x_int < x_range[0] or x_int > x_range[1] or y_int < y_range[0] or y_int > y_range[1]:
                    #    continue

                    d = (x_pos - x_int)**2 + (y_pos - y_int)**2
                    if d < dmin:
                        dmin = d
                        x_true = x_int
                        y_true = y_int

                r = np.sqrt((x_pos - x_true)**2 + (y_pos - y_true)**2)
                if scan == 0:
                    desc1[i, 0] = x_true
                    desc1[i, 1] = y_true
                    x1.append(r * np.cos(theta_rad))
                    y1.append(r * np.sin(theta_rad))
                else:
                    desc2[i, 0] = x_true
                    desc2[i, 1] = y_true
                    x2.append(r * np.cos(theta_rad))
                    y2.append(r * np.sin(theta_rad))

                time += delta_t

        plt.scatter(x1, y1, 25.0, "r")
        plt.scatter(x_pos_vec, y_pos_vec, 25.0, 'k')
        plt.scatter(x2, y2, 25.0, "b")
        plt.savefig('output.pdf', bbox_inches='tight', pad_inches=0.0)

        # Perform NN matching using the descriptors from each cloud
        kdt = KDTree(desc2, leaf_size=1, metric='euclidean')
        nnresults = kdt.query(desc1, k=1, return_distance=False)

        matches = []
        N = desc1.shape[0]
        for i in range(N):
            if nnresults[i] in matches:
                matches.append(-1)
            else:
                matches.append(nnresults[i])

        p1 = np.zeros((N, 3))
        p2 = np.zeros((N, 3))
        t1prime = np.zeros((N, 1))
        t2prime = np.zeros((N, 1))
        j = 0
        for i in range(N):
            if matches[i] == -1:
                continue
            p1[j, 0] = x1[i]
            p1[j, 1] = y1[i]
            p2[j, 0] = x2[int(matches[i])]
            p2[j, 1] = y2[int(matches[i])]
            t1prime[j, 0] = t1[i]
            t2prime[j, 0] = t2[int(matches[i])]
            j += 1
        p1.resize((j, 3))
        p2.resize((j, 3))
        p1 = np.expand_dims(p1, axis=0)
        p2 = np.expand_dims(p2, axis=0)
        p1 = torch.from_numpy(p1)
        p2 = torch.from_numpy(p2)
        # t1prime = np.expand_dims(t1prime.resize((j, 1)), axis=0)
        # t2prime = np.expand_dims(t2prime.resize((j, 1)), axis=0)

        keypoint_ints = torch.ones(1, 1, p1.shape[1])
        match_weights = torch.ones(1, 1, p1.shape[1])
        t1 = np.array(t1, dtype=np.int64).reshape((1, 400, 1))
        t2 = np.array(t2, dtype=np.int64).reshape((1, 400, 1))
        t1 = torch.from_numpy(t1)
        t2 = torch.from_numpy(t2)
        t1_ = 250000
        t2_ = 500000
        t_ref_1 = torch.tensor([0, t1_]).reshape(1, 1, 2)
        t_ref_2 = torch.tensor([t1_, t2_]).reshape(1, 1, 2)

        with open('config/steam.json') as f:
            config = json.load(f)
        config['window_size'] = 2
        config['gpuid'] = 'cpu'
        config['qc_diag'] = [1.0, 1.0, 1.0, 1.0, 1.0, 1]
        config['steam']['use_ransac'] = False
        config['steam']['ransac_version'] = 0
        config['steam']['use_ctsteam'] = True

        solver = SteamSolver(config)
        R_tgt_src_pred, t_tgt_src_pred = solver.optimize(
            p2, p1, match_weights, keypoint_ints, t2, t1, t_ref_2, t_ref_1)
        T_pred = np.identity(4)
        T_pred[0:3, 0:3] = R_tgt_src_pred[0, 1].cpu().numpy()
        T_pred[0:3, 3:] = t_tgt_src_pred[0, 1].cpu().numpy()
        print('T_pred:\n{}'.format(T_pred))

        T_01 = np.identity(4)
        theta_pos = omega * 0.25
        if omega == 0:
            x_pos = v * time
            y_pos = 0
        else:
            x_pos = (v / omega) * np.sin(theta_pos)
            y_pos = (v / omega) * (1 - np.cos(theta_pos))
        T_01[0:3, 0:3] = yaw(-theta_pos)
        T_01[0, 3] = x_pos
        T_01[1, 3] = y_pos
        T_10 = get_inverse_tf(T_01)
        print('T_true:\n{}'.format(T_10))

        Terr = T_01 @ T_pred

        t_err = translationError(Terr)
        r_err = rotationError(Terr) * 180 / np.pi

        print('t_err: {} m r_err: {} deg'.format(t_err, r_err))
예제 #10
0
def plot_sequences(T_gt, T_pred, seq_lens, returnTensor=True, T_icra=None, savePDF=False, fnames=None, flip=True):
    """Creates a top-down plot of the predicted odometry results vs. ground truth."""
    seq_indices = []
    idx = 0
    for s in seq_lens:
        seq_indices.append(list(range(idx, idx + s - 1)))
        idx += (s - 1)

    matplotlib.rcParams.update({'font.size': 16, 'xtick.labelsize': 16, 'ytick.labelsize': 16,
                                'axes.linewidth': 1.5, 'font.family': 'serif', 'pdf.fonttype': 42})
    T_flip = np.identity(4)
    T_flip[1, 1] = -1
    T_flip[2, 2] = -1
    imgs = []
    for seq_i, indices in enumerate(seq_indices):
        T_gt_ = np.identity(4)
        T_pred_ = np.identity(4)
        T_icra_ = np.identity(4)
        if flip:
            T_gt_ = np.matmul(T_flip, T_gt_)
            T_pred_ = np.matmul(T_flip, T_pred_)
        x_gt = []
        y_gt = []
        x_pred = []
        y_pred = []
        x_icra = []
        y_icra = []
        for i in indices:
            T_gt_ = np.matmul(T_gt[i], T_gt_)
            T_pred_ = np.matmul(T_pred[i], T_pred_)
            enforce_orthog(T_gt_)
            enforce_orthog(T_pred_)
            T_gt_temp = get_inverse_tf(T_gt_)
            T_pred_temp = get_inverse_tf(T_pred_)
            x_gt.append(T_gt_temp[0, 3])
            y_gt.append(T_gt_temp[1, 3])
            x_pred.append(T_pred_temp[0, 3])
            y_pred.append(T_pred_temp[1, 3])
            if T_icra is not None:
                T_icra_ = np.matmul(T_icra[i], T_icra_)
                enforce_orthog(T_icra_)
                T_icra_temp = get_inverse_tf(T_icra_)
                x_icra.append(T_icra_temp[0, 3])
                y_icra.append(T_icra_temp[1, 3])

        plt.figure(figsize=(10, 10), tight_layout=True)
        plt.grid(color='k', which='both', linestyle='--', alpha=0.75, dashes=(8.5, 8.5))
        plt.axes().set_aspect('equal')
        plt.plot(x_gt, y_gt, 'k', linewidth=2.5, label='GT')
        if x_icra and y_icra:
            plt.plot(x_icra, y_icra, 'r', linewidth=2.5, label='MC-RANSAC')
        plt.plot(x_pred, y_pred, 'b', linewidth=2.5, label='HERO')
        plt.xlabel('x (m)', fontsize=16)
        plt.ylabel('y (m)', fontsize=16)
        plt.legend(loc="upper left", edgecolor='k', fancybox=False)
        if savePDF and fnames is not None:
            plt.savefig(fnames[seq_i], bbox_inches='tight', pad_inches=0.0)
        if returnTensor:
            imgs.append(convert_plt_to_tensor())
        else:
            imgs.append(convert_plt_to_img())
    return imgs
예제 #11
0
    gt_path = config['data_dir']
    seqs = [f for f in os.listdir(gt_path) if '2019' in f]
    seqs.sort()
    for seq in seqs:
        print('Extracting INS GT for: {}'.format(seq))
        with open(gt_path + seq + '/gt/radar_odometry.csv', 'r') as f:
            f.readline()
            odom_gt = f.readlines()
        f = open(gt_path + seq + '/gt/radar_odometry_ins.csv', 'w')
        f.write(header)

        gt_times = []
        for i in range(1, len(odom_gt)):
            gt_times.append(int(odom_gt[i].split(',')[1]))
        orig_time = int(odom_gt[0].split(',')[1])
        gt_times.append(int(odom_gt[-1].split(',')[0]))
        ins_path = gt_path + seq + '/gps/ins.csv'
        abs_poses = interpolate_ins_poses(ins_path, gt_times, orig_time)
        abs_poses.insert(0, np.identity(4, dtype=np.float32))
        for i in range(len(gt_times) - 1):
            T_0k = np.array(abs_poses[i])
            T_0kp1 = np.array(abs_poses[i + 1])
            T = get_inverse_tf(T_0k) @ T_0kp1  # T_k_kp1 (next time to current)
            T = T_radar_imu @ T @ get_inverse_tf(T_radar_imu)
            rpy = so3_to_euler(T[0:3, 0:3])
            phi = rpy[0, 2]
            odom = parse(odom_gt[i])
            f.write('{},{},{},{},{},{},{},{},{},{}\n'.format(
                odom[0], odom[1], T[0, 3], T[1, 3], 0, 0, 0, phi, odom[8],
                odom[9]))