예제 #1
0
 def forward_with_rotation_matrices_mask(self, xs, hat_xs):
     """Forward errors with rotation matrices"""
     N = xs.shape[0]
     masks = xs[:, :, 3].unsqueeze(1)
     masks = torch.nn.functional.conv1d(
         masks, self.weight, bias=None,
         stride=self.min_train_freq).double().transpose(1, 2)
     masks[masks < 1] = 0
     Xs = SO3.exp(xs[:, ::self.min_train_freq, :3].reshape(-1, 3).double())
     hat_xs = self.dt * hat_xs.reshape(-1, 3).double()
     Omegas = SO3.exp(hat_xs[:, :3])
     # compute increment at min_train_freq by decimation
     for k in range(self.min_N):
         Omegas = Omegas[::2].bmm(Omegas[1::2])
     rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:]
     loss = self.f_huber(rs)
     # compute increment from min_train_freq to max_train_freq
     for k in range(self.min_N, self.max_N):
         Omegas = Omegas[::2].bmm(Omegas[1::2])
         Xs = Xs[::2].bmm(Xs[1::2])
         masks = masks[:, ::2] * masks[:, 1::2]
         rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:]
         rs = rs[masks[:, self.N0:].squeeze(2) == 1]
         loss = loss + self.f_huber(rs[:, 2]) / (2**(k - self.min_N + 1))
     return loss
예제 #2
0
    def display_test(self, dataset, mode):
        self.roes = {
            'Rots': [],
            'yaws': [],
        }
        self.to_open_vins(dataset)
        for i, seq in enumerate(dataset.sequences):
            print('\n', 'Results for sequence ' + seq )
            self.seq = seq
            # get ground truth
            self.gt = dataset.load_gt(i)
            Rots = SO3.from_quaternion(self.gt['qs'].cuda())
            self.gt['Rots'] = Rots.cpu()
            self.gt['rpys'] = SO3.to_rpy(Rots).cpu()
            # get data and estimate
            self.net_us = pload(self.address, seq, 'results.p')['hat_xs']
            self.raw_us, _ = dataset[i]
            N = self.net_us.shape[0]
            self.gyro_corrections =  (self.raw_us[:, :3] - self.net_us[:N, :3])
            self.ts = torch.linspace(0, N*self.dt, N)

            self.convert()
            self.plot_gyro()
            self.plot_gyro_correction()
            plt.show()
예제 #3
0
    def plot_gyro(self):
        N = self.raw_us.shape[0]
        raw_us = self.raw_us[:, :3]
        net_us = self.net_us[:, :3]

        net_qs, imu_Rots, net_Rots = self.integrate_with_quaternions_superfast(N,
        raw_us, net_us)
        imu_rpys = 180/np.pi*SO3.to_rpy(imu_Rots).cpu()
        net_rpys = 180/np.pi*SO3.to_rpy(net_Rots).cpu()
        self.plot_orientation(imu_rpys, net_rpys, N)
        self.plot_orientation_error(imu_Rots, net_Rots, N)
예제 #4
0
    def align_traj(self):
        """yaw only and position alignment at initial time"""
        self.gt['rpys'] = SO3.to_rpy(self.gt['Rots'].cuda()).cpu()
        self.iekf['rpys'] = SO3.to_rpy(self.iekf['Rots'].cuda()).cpu()

        self.gt['ps'] -= self.gt['ps'][0].clone()
        self.iekf['ps'] -= self.iekf['ps'][0].clone()
        rpys = self.gt['rpys'][:2] - self.iekf['rpys'][:2]
        Rot = SO3.from_rpy(rpys[:, 0], rpys[:, 1], rpys[:, 2])
        Rot = Rot[0].repeat(self.iekf['ps'].shape[0], 1, 1)

        self.iekf['Rots'] = Rot.bmm(self.iekf['Rots'])
        self.iekf['vs'] = bmv(Rot, self.iekf['vs'])
        self.iekf['ps'] = bmv(Rot, self.iekf['ps'])
        self.iekf['rpys'] = SO3.to_rpy(self.iekf['Rots'].cuda()).cpu()
예제 #5
0
    def plot_orientation_error(self, imu_Rots, net_Rots, N):
        gt = self.gt['Rots'][:N].cuda()
        raw_err = 180/np.pi*SO3.log(bmtm(imu_Rots, gt)).cpu()
        net_err = 180/np.pi*SO3.log(bmtm(net_Rots, gt)).cpu()
        title = "$SO(3)$ orientation error"
        fig, axs = plt.subplots(3, 1, sharex=True, figsize=self.figsize)
        axs[0].set(ylabel='roll (deg)', title=title)
        axs[1].set(ylabel='pitch (deg)')
        axs[2].set(xlabel='$t$ (min)', ylabel='yaw (deg)')

        for i in range(3):
            axs[i].plot(self.ts, raw_err[:, i], color='red', label=r'raw IMU')
            axs[i].plot(self.ts, net_err[:, i], color='blue', label=r'net IMU')
            axs[i].set_ylim(-10, 10)
            axs[i].set_xlim(self.ts[0], self.ts[-1])
        self.savefig(axs, fig, 'orientation_error')
예제 #6
0
    def plot_velocity_in_body_frame(self):
        title = "Body velocity as function of time " + self.end_title
        true = bmv(self.gt['Rots'].transpose(1, 2), self.gt['vs'])
        mean = bmv(self.iekf['Rots'].transpose(1, 2), self.iekf['vs'])
        # get 3 sigma uncertainty
        P = torch.diag_embed(self.iekf['Ps'][:, :6],
                             offset=0,
                             dim1=-2,
                             dim2=-1)
        J = P.new_zeros(P.shape[0], 3, 6)
        J[:, :, :3] = SO3.wedge(mean)
        J[:, :, 3:6] = self.iekf['Rots'].transpose(1, 2)
        std = J.bmm(P).bmm(J.transpose(1, 2)).diagonal(dim1=1, dim2=2).sqrt()

        fig, axs = plt.subplots(3, 1, sharex=True, figsize=self.figsize)
        axs[0].set(ylabel='$(\mathbf{R}_n^T\mathbf{v}_n)^x$ (km/h)',
                   title=title)
        axs[1].set(ylabel='$(\mathbf{R}_n^T\mathbf{v}_n)^y$ (km/h)')
        axs[2].set(xlabel='$t$ (min)',
                   ylabel='$(\mathbf{R}_n^T\mathbf{v}_n)^z$ (km/h)')

        for i in range(3):
            axs[i].plot(self.ts, true[:, i], color="black")
            axs[i].plot(self.ts, mean[:, i], color="green")
            axs[i].plot(self.ts, (mean + std)[:, i], color='green', alpha=0.5)
            axs[i].plot(self.ts, (mean - std)[:, i], color='green', alpha=0.5)
            axs[i].set_xlim(self.ts[0], self.ts[-1])
        fig.legend([r'ground truth', r'IEKF', r'$3\sigma$'], ncol=3)
        self.savefig(axs, fig, 'body_velocity')
예제 #7
0
    def interpolate(x, t, t_int):
        """
            Interpolate ground truth at the sensor timestamps
            """

        # vector interpolation
        x_int = np.zeros((t_int.shape[0], x.shape[1]))
        for i in range(x.shape[1]):
            if i in [4, 5, 6, 7]:
                continue
            x_int[:, i] = np.interp(t_int, t, x[:, i])
        # quaternion interpolation
        t_int = torch.Tensor(t_int - t[0])
        t = torch.Tensor(t - t[0])
        qs = SO3.qnorm(torch.Tensor(x[:, 4:8]))
        x_int[:, 4:8] = SO3.qinterp(qs, t, t_int).numpy()
        return x_int
예제 #8
0
 def forward_with_rotation_matrices(self, xs, hat_xs):
     """Forward errors with rotation matrices"""
     N = xs.shape[0]
     Xs = SO3.exp(xs[:, ::self.min_train_freq].reshape(-1, 3).double())
     hat_xs = self.dt * hat_xs.reshape(-1, 3).double()
     Omegas = SO3.exp(hat_xs[:, :3])
     # compute increment at min_train_freq by decimation
     for k in range(self.min_N):
         Omegas = Omegas[::2].bmm(Omegas[1::2])
     rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:]
     loss = self.f_huber(rs)
     # compute increment from min_train_freq to max_train_freq
     for k in range(self.min_N, self.max_N):
         Omegas = Omegas[::2].bmm(Omegas[1::2])
         Xs = Xs[::2].bmm(Xs[1::2])
         rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:]
         loss = loss + self.f_huber(rs) / (2**(k - self.min_N + 1))
     return loss
예제 #9
0
파일: iekf.py 프로젝트: yxw027/RINS-W
 def dump(self, address, seq, zupts, covs):
     # turn cov
     J = torch.eye(9).repeat(self.Ps.shape[0], 1, 1)
     J[:, 3:6, :3] = SO3.wedge(self.vs)
     J[:, 6:9, :3] = SO3.wedge(self.ps)
     #self.Ps = axat(J, self.Ps[:, :9, :9])
     path = os.path.join(address, seq, 'iekf.p')
     mondict = {
         'Rots': self.Rots,
         'vs': self.vs,
         'ps': self.ps,
         'b_omegas': self.b_omegas,
         'b_accs': self.b_accs,
         'rs': self.rs,
         'Ps': self.Ps.diagonal(dim1=1, dim2=2),
         'zupts': zupts,
         'covs': covs,
     }
     for k, v in mondict.items():
         mondict[k] = v.float().detach().cpu()
     pdump(mondict, path)
예제 #10
0
    def plot_orientation_err(self):
        title = "Position error as function of time " + self.end_title
        err = SO3.log(bmtm(self.gt['Rots'].cuda(),
                           self.iekf['Rots'].cuda())).cpu()
        fig, axs = plt.subplots(3, 1, sharex=True, figsize=self.figsize)
        axs[0].set(ylabel='roll (deg)', title=title)
        axs[1].set(ylabel='pitch (deg)')
        axs[2].set(xlabel='$t$ (min)', ylabel='yaw (deg)')

        for i in range(3):
            axs[i].plot(self.ts, err[:, i], color="blue")
            axs[i].set_xlim(self.ts[0], self.ts[-1])
        self.savefig(axs, fig, 'orientation_error')
예제 #11
0
    def display_test(self, dataset_class, dataset_params, mode):
        dataset = dataset_class(**dataset_params, mode=mode)
        for i, seq in enumerate(dataset.sequences):
            print('\n',
                  '---------  Result for sequence ' + seq + '  ---------')
            self.seq = seq
            # get ground truth pose
            self.gt = dataset.load_gt(i)
            self.gt['Rots'] = SO3.from_quaternion(self.gt['qs'].cuda()).cpu()

            # get data and estimate
            self.us, self.zupt = dataset[i]
            self.iekf = self.get_iekf_results(seq)
            self.N = self.iekf['ps'].shape[0]
            N0 = self.us.shape[0] - self.N
            self.us = self.us[N0:]
            self.zupt = self.zupt[N0:]
            for key, val in self.gt.items():
                self.gt[key] = val[N0:]
            self.ts = torch.linspace(0, self.N * self.dt, self.N)

            self.align_traj()
            self.convert()
            self.plot_orientation()
            self.plot_velocity()
            self.plot_velocity_in_body_frame()
            self.plot_position()
            self.plot_horizontal_position()
            self.plot_bias_gyro()
            self.plot_bias_acc()
            self.plot_gyro()
            self.plot_acc()
            self.plot_zupt()
            self.plot_orientation_err()
            self.plot_velocity_err()
            self.plot_body_velocity_err()
            self.plot_position_err()
예제 #12
0
 def forward_with_quaternions(self, xs, hat_xs):
     """Forward errors with quaternion"""
     N = xs.shape[0]
     Xs = SO3.qexp(xs[:, ::self.min_train_freq].reshape(-1, 3).double())
     hat_xs = self.dt * hat_xs.reshape(-1, 3).double()
     Omegas = SO3.qexp(hat_xs[:, :3])
     # compute increment at min_train_freq by decimation
     for k in range(self.min_N):
         Omegas = SO3.qmul(Omegas[::2], Omegas[1::2])
     rs = SO3.qlog(SO3.qmul(SO3.qinv(Omegas), Xs)).reshape(N, -1,
                                                           3)[:, self.N0:]
     loss = self.f_huber(rs)
     # compute increment from min_train_freq to max_train_freq
     for k in range(self.min_N, self.max_N):
         Omegas = SO3.qmul(Omegas[::2], Omegas[1::2])
         Xs = SO3.qmul(Xs[::2], Xs[1::2])
         rs = SO3.qlog(SO3.qmul(SO3.qinv(Omegas), Xs))
         rs = rs.view(N, -1, 3)[:, self.N0:]
         loss = loss + self.f_huber(rs) / (2**(k - self.min_N + 1))
     return loss
예제 #13
0
    def integrate_with_quaternions_superfast(self, N, raw_us, net_us):
        imu_qs = SO3.qnorm(SO3.qexp(raw_us[:, :3].cuda().double()*self.dt))
        net_qs = SO3.qnorm(SO3.qexp(net_us[:, :3].cuda().double()*self.dt))
        Rot0 = SO3.qnorm(self.gt['qs'][:2].cuda().double())
        imu_qs[0] = Rot0[0]
        net_qs[0] = Rot0[0]

        N = np.log2(imu_qs.shape[0])
        for i in range(int(N)):
            k = 2**i
            imu_qs[k:] = SO3.qnorm(SO3.qmul(imu_qs[:-k], imu_qs[k:]))
            net_qs[k:] = SO3.qnorm(SO3.qmul(net_qs[:-k], net_qs[k:]))

        if int(N) < N:
            k = 2**int(N)
            k2 = imu_qs[k:].shape[0]
            imu_qs[k:] = SO3.qnorm(SO3.qmul(imu_qs[:k2], imu_qs[k:]))
            net_qs[k:] = SO3.qnorm(SO3.qmul(net_qs[:k2], net_qs[k:]))

        imu_Rots = SO3.from_quaternion(imu_qs).float()
        net_Rots = SO3.from_quaternion(net_qs).float()
        return net_qs.cpu(), imu_Rots, net_Rots
예제 #14
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        # threshold for ZUPT ground truth
        sm_velocity_max_threshold = 0.004  # m/s

        f = os.path.join(self.predata_dir, 'urban06.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "sensor_data",
                                    "xsens_imu.csv")
            path_gt = os.path.join(data_dir, seq, "global_pose.csv")
            # path_odo = os.path.join(data_dir, seq, "encoder.csv")
            return path_imu, path_gt

        time_factor = 1e9  # ns -> s

        def interpolate(x, t, t_int, angle=False):
            """
            Interpolate ground truth with sensors
            """
            x_int = np.zeros((t_int.shape[0], x.shape[1]))
            for i in range(x.shape[1]):
                if angle:
                    x[:, i] = np.unwrap(x[:, i])
                x_int[:, i] = np.interp(t_int, t, x[:, i])
            return x_int

        sequences = os.listdir(data_dir)
        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",")

            # Urban00-05 and campus00 have only quaternion and Euler data
            if not imu.shape[1] > 10:
                cprint("No IMU data for dataset " + sequence, 'yellow')
                continue
            gt = np.genfromtxt(path_gt, delimiter=",")

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            t = imu[:, 0]

            # take ground truth position
            p_gt = gt[:, [4, 8, 12]]
            p_gt = p_gt - p_gt[0]

            # take ground matrix pose
            Rot_gt = torch.Tensor(gt.shape[0], 3, 3)
            for j in range(3):
                Rot_gt[:, j] = torch.Tensor(gt[:, 1 + 4 * j:1 + 4 * j + 3])
            q_gt = SO3.to_quaternion(Rot_gt)
            # convert to angle orientation
            rpys = SO3.to_rpy(Rot_gt)
            t_gt = gt[:, 0]
            # interpolate ground-truth
            p_gt = interpolate(p_gt, t_gt, t)
            rpys = interpolate(rpys.numpy(), t_gt, t, angle=True)

            # convert from numpy
            ts = (t - t0) / time_factor
            p_gt = torch.Tensor(p_gt)
            rpys = torch.Tensor(rpys).float()
            q_gt = SO3.to_quaternion(
                SO3.from_rpy(rpys[:, 0], rpys[:, 1], rpys[:, 2]))
            imu = torch.Tensor(imu).float()

            # take IMU gyro and accelerometer and magnetometer
            imu = imu[:, 8:17]

            dt = ts[1:] - ts[:-1]
            # compute speed ground truth (apply smoothing)
            v_gt = torch.zeros(p_gt.shape[0], 3)
            for j in range(3):
                p_gt_smooth = savgol_filter(p_gt[:, j], 11, 1)
                v_j = (p_gt_smooth[1:] - p_gt_smooth[:-1]) / dt
                v_j_smooth = savgol_filter(v_j, 11, 0)
                v_gt[1:, j] = torch.Tensor(v_j_smooth)

            # ground truth specific motion measurement (binary)
            zupts = v_gt.norm(dim=1, keepdim=True) < sm_velocity_max_threshold
            zupts = zupts.float()
            # set ground truth consistent with ZUPT
            v_gt[zupts.squeeze() == 1] = 0

            # save for all training
            mondict = {
                'xs': zupts.float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")
            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")
예제 #15
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        f = os.path.join(self.predata_dir, 'dataset-room1_512_16_gt.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
            path_gt = os.path.join(data_dir, seq, "mav0", "mocap0", "data.csv")
            return path_imu, path_gt

        sequences = os.listdir(data_dir)

        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            if 'room' not in sequence:
                continue

            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
            gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            ts = imu[:, 0] / 1e9

            # interpolate
            t_gt = gt[:, 0] / 1e9
            gt = self.interpolate(gt, t_gt, ts)

            # take ground truth position
            p_gt = gt[:, 1:4]
            p_gt = p_gt - p_gt[0]

            # take ground true quaternion pose
            q_gt = SO3.qnorm(torch.Tensor(gt[:, 4:8]).double())
            Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()

            # convert from numpy
            p_gt = torch.Tensor(p_gt).double()
            v_gt = torch.zeros_like(p_gt).double()
            v_gt[1:] = (p_gt[1:] - p_gt[:-1]) / self.dt
            imu = torch.Tensor(imu[:, 1:]).double()

            # compute pre-integration factors for all training
            mtf = self.min_train_freq
            dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
            dRot_ij = SO3.dnormalize(dRot_ij.cuda())
            dxi_ij = SO3.log(dRot_ij).cpu()

            # masks with 1 when ground truth is available, 0 otherwise
            masks = dxi_ij.new_ones(dxi_ij.shape[0])
            tmp = np.searchsorted(t_gt, ts[:-mtf])
            diff_t = ts[:-mtf] - t_gt[tmp]
            masks[np.abs(diff_t) > 0.01] = 0

            # save all the sequence
            mondict = {
                'xs': torch.cat((dxi_ij, masks.unsqueeze(1)), 1).float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")

            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")
예제 #16
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        f = os.path.join(self.predata_dir, 'MH_01_easy.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
            path_gt = os.path.join(data_dir, seq, "mav0",
                                   "state_groundtruth_estimate0", "data.csv")
            return path_imu, path_gt

        sequences = os.listdir(data_dir)
        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
            gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            ts = imu[:, 0] / 1e9

            # interpolate
            gt = self.interpolate(gt, gt[:, 0] / 1e9, ts)

            # take ground truth position
            p_gt = gt[:, 1:4]
            p_gt = p_gt - p_gt[0]

            # take ground true quaternion pose
            q_gt = torch.Tensor(gt[:, 4:8]).double()
            q_gt = q_gt / q_gt.norm(dim=1, keepdim=True)
            Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()

            # convert from numpy
            p_gt = torch.Tensor(p_gt).double()
            v_gt = torch.tensor(gt[:, 8:11]).double()
            imu = torch.Tensor(imu[:, 1:]).double()

            # compute pre-integration factors for all training
            mtf = self.min_train_freq
            dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
            dRot_ij = SO3.dnormalize(dRot_ij.cuda())
            dxi_ij = SO3.log(dRot_ij).cpu()

            # save for all training
            mondict = {
                'xs': dxi_ij.float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")
            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")