def forward_with_rotation_matrices_mask(self, xs, hat_xs): """Forward errors with rotation matrices""" N = xs.shape[0] masks = xs[:, :, 3].unsqueeze(1) masks = torch.nn.functional.conv1d( masks, self.weight, bias=None, stride=self.min_train_freq).double().transpose(1, 2) masks[masks < 1] = 0 Xs = SO3.exp(xs[:, ::self.min_train_freq, :3].reshape(-1, 3).double()) hat_xs = self.dt * hat_xs.reshape(-1, 3).double() Omegas = SO3.exp(hat_xs[:, :3]) # compute increment at min_train_freq by decimation for k in range(self.min_N): Omegas = Omegas[::2].bmm(Omegas[1::2]) rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:] loss = self.f_huber(rs) # compute increment from min_train_freq to max_train_freq for k in range(self.min_N, self.max_N): Omegas = Omegas[::2].bmm(Omegas[1::2]) Xs = Xs[::2].bmm(Xs[1::2]) masks = masks[:, ::2] * masks[:, 1::2] rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:] rs = rs[masks[:, self.N0:].squeeze(2) == 1] loss = loss + self.f_huber(rs[:, 2]) / (2**(k - self.min_N + 1)) return loss
def plot_orientation_error(self, imu_Rots, net_Rots, N): gt = self.gt['Rots'][:N].cuda() raw_err = 180/np.pi*SO3.log(bmtm(imu_Rots, gt)).cpu() net_err = 180/np.pi*SO3.log(bmtm(net_Rots, gt)).cpu() title = "$SO(3)$ orientation error" fig, axs = plt.subplots(3, 1, sharex=True, figsize=self.figsize) axs[0].set(ylabel='roll (deg)', title=title) axs[1].set(ylabel='pitch (deg)') axs[2].set(xlabel='$t$ (min)', ylabel='yaw (deg)') for i in range(3): axs[i].plot(self.ts, raw_err[:, i], color='red', label=r'raw IMU') axs[i].plot(self.ts, net_err[:, i], color='blue', label=r'net IMU') axs[i].set_ylim(-10, 10) axs[i].set_xlim(self.ts[0], self.ts[-1]) self.savefig(axs, fig, 'orientation_error')
def forward_with_rotation_matrices(self, xs, hat_xs): """Forward errors with rotation matrices""" N = xs.shape[0] Xs = SO3.exp(xs[:, ::self.min_train_freq].reshape(-1, 3).double()) hat_xs = self.dt * hat_xs.reshape(-1, 3).double() Omegas = SO3.exp(hat_xs[:, :3]) # compute increment at min_train_freq by decimation for k in range(self.min_N): Omegas = Omegas[::2].bmm(Omegas[1::2]) rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:] loss = self.f_huber(rs) # compute increment from min_train_freq to max_train_freq for k in range(self.min_N, self.max_N): Omegas = Omegas[::2].bmm(Omegas[1::2]) Xs = Xs[::2].bmm(Xs[1::2]) rs = SO3.log(bmtm(Omegas, Xs)).reshape(N, -1, 3)[:, self.N0:] loss = loss + self.f_huber(rs) / (2**(k - self.min_N + 1)) return loss
def plot_orientation_err(self): title = "Position error as function of time " + self.end_title err = SO3.log(bmtm(self.gt['Rots'].cuda(), self.iekf['Rots'].cuda())).cpu() fig, axs = plt.subplots(3, 1, sharex=True, figsize=self.figsize) axs[0].set(ylabel='roll (deg)', title=title) axs[1].set(ylabel='pitch (deg)') axs[2].set(xlabel='$t$ (min)', ylabel='yaw (deg)') for i in range(3): axs[i].plot(self.ts, err[:, i], color="blue") axs[i].set_xlim(self.ts[0], self.ts[-1]) self.savefig(axs, fig, 'orientation_error')
def read_data(self, data_dir): r"""Read the data from the dataset""" f = os.path.join(self.predata_dir, 'dataset-room1_512_16_gt.p') if True and os.path.exists(f): return print("Start read_data, be patient please") def set_path(seq): path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv") path_gt = os.path.join(data_dir, seq, "mav0", "mocap0", "data.csv") return path_imu, path_gt sequences = os.listdir(data_dir) # read each sequence for sequence in sequences: print("\nSequence name: " + sequence) if 'room' not in sequence: continue path_imu, path_gt = set_path(sequence) imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1) gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1) # time synchronization between IMU and ground truth t0 = np.max([gt[0, 0], imu[0, 0]]) t_end = np.min([gt[-1, 0], imu[-1, 0]]) # start index idx0_imu = np.searchsorted(imu[:, 0], t0) idx0_gt = np.searchsorted(gt[:, 0], t0) # end index idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right') idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right') # subsample imu = imu[idx0_imu:idx_end_imu] gt = gt[idx0_gt:idx_end_gt] ts = imu[:, 0] / 1e9 # interpolate t_gt = gt[:, 0] / 1e9 gt = self.interpolate(gt, t_gt, ts) # take ground truth position p_gt = gt[:, 1:4] p_gt = p_gt - p_gt[0] # take ground true quaternion pose q_gt = SO3.qnorm(torch.Tensor(gt[:, 4:8]).double()) Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu() # convert from numpy p_gt = torch.Tensor(p_gt).double() v_gt = torch.zeros_like(p_gt).double() v_gt[1:] = (p_gt[1:] - p_gt[:-1]) / self.dt imu = torch.Tensor(imu[:, 1:]).double() # compute pre-integration factors for all training mtf = self.min_train_freq dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:]) dRot_ij = SO3.dnormalize(dRot_ij.cuda()) dxi_ij = SO3.log(dRot_ij).cpu() # masks with 1 when ground truth is available, 0 otherwise masks = dxi_ij.new_ones(dxi_ij.shape[0]) tmp = np.searchsorted(t_gt, ts[:-mtf]) diff_t = ts[:-mtf] - t_gt[tmp] masks[np.abs(diff_t) > 0.01] = 0 # save all the sequence mondict = { 'xs': torch.cat((dxi_ij, masks.unsqueeze(1)), 1).float(), 'us': imu.float(), } pdump(mondict, self.predata_dir, sequence + ".p") # save ground truth mondict = { 'ts': ts, 'qs': q_gt.float(), 'vs': v_gt.float(), 'ps': p_gt.float(), } pdump(mondict, self.predata_dir, sequence + "_gt.p")
def read_data(self, data_dir): r"""Read the data from the dataset""" f = os.path.join(self.predata_dir, 'MH_01_easy.p') if True and os.path.exists(f): return print("Start read_data, be patient please") def set_path(seq): path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv") path_gt = os.path.join(data_dir, seq, "mav0", "state_groundtruth_estimate0", "data.csv") return path_imu, path_gt sequences = os.listdir(data_dir) # read each sequence for sequence in sequences: print("\nSequence name: " + sequence) path_imu, path_gt = set_path(sequence) imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1) gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1) # time synchronization between IMU and ground truth t0 = np.max([gt[0, 0], imu[0, 0]]) t_end = np.min([gt[-1, 0], imu[-1, 0]]) # start index idx0_imu = np.searchsorted(imu[:, 0], t0) idx0_gt = np.searchsorted(gt[:, 0], t0) # end index idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right') idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right') # subsample imu = imu[idx0_imu:idx_end_imu] gt = gt[idx0_gt:idx_end_gt] ts = imu[:, 0] / 1e9 # interpolate gt = self.interpolate(gt, gt[:, 0] / 1e9, ts) # take ground truth position p_gt = gt[:, 1:4] p_gt = p_gt - p_gt[0] # take ground true quaternion pose q_gt = torch.Tensor(gt[:, 4:8]).double() q_gt = q_gt / q_gt.norm(dim=1, keepdim=True) Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu() # convert from numpy p_gt = torch.Tensor(p_gt).double() v_gt = torch.tensor(gt[:, 8:11]).double() imu = torch.Tensor(imu[:, 1:]).double() # compute pre-integration factors for all training mtf = self.min_train_freq dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:]) dRot_ij = SO3.dnormalize(dRot_ij.cuda()) dxi_ij = SO3.log(dRot_ij).cpu() # save for all training mondict = { 'xs': dxi_ij.float(), 'us': imu.float(), } pdump(mondict, self.predata_dir, sequence + ".p") # save ground truth mondict = { 'ts': ts, 'qs': q_gt.float(), 'vs': v_gt.float(), 'ps': p_gt.float(), } pdump(mondict, self.predata_dir, sequence + "_gt.p")