示例#1
0
    def init_normalize_factors(self, train_seqs):
        if os.path.exists(self.path_normalize_factors):
            mondict = pload(self.path_normalize_factors)
            return mondict['mean_u'], mondict['std_u']

        path = os.path.join(self.predata_dir, train_seqs[0] + '.p')
        if not os.path.exists(path):
            print("init_normalize_factors not computed")
            return 0, 0

        print('Start computing normalizing factors ...')
        cprint("Do it only on training sequences, it is vital!", 'yellow')
        # first compute mean
        num_data = 0

        for i, sequence in enumerate(train_seqs):
            pickle_dict = pload(self.predata_dir, sequence + '.p')
            us = pickle_dict['us']
            sms = pickle_dict['xs']
            if i == 0:
                mean_u = us.sum(dim=0)
                num_positive = sms.sum(dim=0)
                num_negative = sms.shape[0] - sms.sum(dim=0)
            else:
                mean_u += us.sum(dim=0)
                num_positive += sms.sum(dim=0)
                num_negative += sms.shape[0] - sms.sum(dim=0)
            num_data += us.shape[0]
        mean_u = mean_u / num_data
        pos_weight = num_negative / num_positive

        # second compute standard deviation
        for i, sequence in enumerate(train_seqs):
            pickle_dict = pload(self.predata_dir, sequence + '.p')
            us = pickle_dict['us']
            if i == 0:
                std_u = ((us - mean_u)**2).sum(dim=0)
            else:
                std_u += ((us - mean_u)**2).sum(dim=0)
        std_u = (std_u / num_data).sqrt()
        normalize_factors = {
            'mean_u': mean_u,
            'std_u': std_u,
        }
        print('... ended computing normalizing factors')
        print('pos_weight:', pos_weight)
        print('This values most be a training parameters !')
        print('mean_u    :', mean_u)
        print('std_u     :', std_u)
        print('num_data  :', num_data)
        pdump(normalize_factors, self.path_normalize_factors)
        return mean_u, std_u
示例#2
0
 def loop_test(self, dataset, criterion):
     """Forward loop over test data"""
     self.net.eval()
     for i in range(len(dataset)):
         seq = dataset.sequences[i]
         us, xs = dataset[i]
         with torch.no_grad():
             hat_xs = self.net(us.cuda().unsqueeze(0))
         loss = criterion(xs.cuda().unsqueeze(0), hat_xs)
         mkdir(self.address, seq)
         mondict = {
             'hat_xs': hat_xs[0].cpu(),
             'loss': loss.cpu().item(),
         }
         pdump(mondict, self.address, seq, 'results.p')
示例#3
0
 def __init__(self, res_dir, tb_dir, net_class, net_params, address, dt):
     self.res_dir = res_dir
     self.tb_dir = tb_dir
     self.net_class = net_class
     self.net_params = net_params
     self._ready = False
     self.train_params = {}
     self.figsize = (20, 12)
     self.dt = dt # (s)
     self.address, self.tb_address = self.find_address(address)
     if address is None:  # create new address
         pdump(self.net_params, self.address, 'net_params.p')
         ydump(self.net_params, self.address, 'net_params.yaml')
     else:  # pick the network parameters
         self.net_params = pload(self.address, 'net_params.p')
         self.train_params = pload(self.address, 'train_params.p')
         self._ready = True
     self.path_weights = os.path.join(self.address, 'weights.pt')
     self.net = self.net_class(**self.net_params)
     if self._ready:  # fill network parameters
         self.load_weights()
示例#4
0
文件: iekf.py 项目: yxw027/RINS-W
 def dump(self, address, seq, zupts, covs):
     # turn cov
     J = torch.eye(9).repeat(self.Ps.shape[0], 1, 1)
     J[:, 3:6, :3] = SO3.wedge(self.vs)
     J[:, 6:9, :3] = SO3.wedge(self.ps)
     #self.Ps = axat(J, self.Ps[:, :9, :9])
     path = os.path.join(address, seq, 'iekf.p')
     mondict = {
         'Rots': self.Rots,
         'vs': self.vs,
         'ps': self.ps,
         'b_omegas': self.b_omegas,
         'b_accs': self.b_accs,
         'rs': self.rs,
         'Ps': self.Ps.diagonal(dim1=1, dim2=2),
         'zupts': zupts,
         'covs': covs,
     }
     for k, v in mondict.items():
         mondict[k] = v.float().detach().cpu()
     pdump(mondict, path)
示例#5
0
    def train(self, dataset_class, dataset_params, train_params):
        """train the neural network. GPU is assumed"""
        self.train_params = train_params
        pdump(self.train_params, self.address, 'train_params.p')
        ydump(self.train_params, self.address, 'train_params.yaml')

        hparams = self.get_hparams(dataset_class, dataset_params, train_params)
        ydump(hparams, self.address, 'hparams.yaml')

        # define datasets
        dataset_train = dataset_class(**dataset_params, mode='train')
        dataset_train.init_train()
        dataset_val = dataset_class(**dataset_params, mode='val')
        dataset_val.init_val()

        # get class
        Optimizer = train_params['optimizer_class']
        Scheduler = train_params['scheduler_class']
        Loss = train_params['loss_class']

        # get parameters
        dataloader_params = train_params['dataloader']
        optimizer_params = train_params['optimizer']
        scheduler_params = train_params['scheduler']
        loss_params = train_params['loss']

        # define optimizer, scheduler and loss
        dataloader = DataLoader(dataset_train, **dataloader_params)
        optimizer = Optimizer(self.net.parameters(), **optimizer_params)
        scheduler = Scheduler(optimizer, **scheduler_params)
        criterion = Loss(**loss_params)

        # remaining training parameters
        freq_val = train_params['freq_val']
        n_epochs = train_params['n_epochs']

        # init net w.r.t dataset
        self.net = self.net.cuda()
        mean_u, std_u = dataset_train.mean_u, dataset_train.std_u
        self.net.set_normalized_factors(mean_u, std_u)

        # start tensorboard writer
        writer = SummaryWriter(self.tb_address)
        start_time = time.time()
        best_loss = torch.Tensor([float('Inf')])

        # define some function for seeing evolution of training
        def write(epoch, loss_epoch):
            writer.add_scalar('loss/train', loss_epoch.item(), epoch)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
            print('Train Epoch: {:2d} \tLoss: {:.4f}'.format(
                epoch, loss_epoch.item()))
            scheduler.step(epoch)

        def write_time(epoch, start_time):
            delta_t = time.time() - start_time
            print("Amount of time spent for epochs " +
                "{}-{}: {:.1f}s\n".format(epoch - freq_val, epoch, delta_t))
            writer.add_scalar('time_spend', delta_t, epoch)

        def write_val(loss, best_loss):
            if 0.5*loss <= best_loss:
                msg = 'validation loss decreases! :) '
                msg += '(curr/prev loss {:.4f}/{:.4f})'.format(loss.item(),
                    best_loss.item())
                cprint(msg, 'green')
                best_loss = loss
                self.save_net()
            else:
                msg = 'validation loss increases! :( '
                msg += '(curr/prev loss {:.4f}/{:.4f})'.format(loss.item(),
                    best_loss.item())
                cprint(msg, 'yellow')
            writer.add_scalar('loss/val', loss.item(), epoch)
            return best_loss

        # training loop !
        for epoch in range(1, n_epochs + 1):
            loss_epoch = self.loop_train(dataloader, optimizer, criterion)
            write(epoch, loss_epoch)
            scheduler.step(epoch)
            if epoch % freq_val == 0:
                loss = self.loop_val(dataset_val, criterion)
                write_time(epoch, start_time)
                best_loss = write_val(loss, best_loss)
                start_time = time.time()
        # training is over !

        # test on new data
        dataset_test = dataset_class(**dataset_params, mode='test')
        self.load_weights()
        test_loss = self.loop_val(dataset_test, criterion)
        dict_loss = {
            'final_loss/val': best_loss.item(),
            'final_loss/test': test_loss.item()
            }
        writer.add_hparams(hparams, dict_loss)
        ydump(dict_loss, self.address, 'final_loss.yaml')
        writer.close()
示例#6
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        # threshold for ZUPT ground truth
        sm_velocity_max_threshold = 0.004  # m/s

        f = os.path.join(self.predata_dir, 'urban06.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "sensor_data",
                                    "xsens_imu.csv")
            path_gt = os.path.join(data_dir, seq, "global_pose.csv")
            # path_odo = os.path.join(data_dir, seq, "encoder.csv")
            return path_imu, path_gt

        time_factor = 1e9  # ns -> s

        def interpolate(x, t, t_int, angle=False):
            """
            Interpolate ground truth with sensors
            """
            x_int = np.zeros((t_int.shape[0], x.shape[1]))
            for i in range(x.shape[1]):
                if angle:
                    x[:, i] = np.unwrap(x[:, i])
                x_int[:, i] = np.interp(t_int, t, x[:, i])
            return x_int

        sequences = os.listdir(data_dir)
        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",")

            # Urban00-05 and campus00 have only quaternion and Euler data
            if not imu.shape[1] > 10:
                cprint("No IMU data for dataset " + sequence, 'yellow')
                continue
            gt = np.genfromtxt(path_gt, delimiter=",")

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            t = imu[:, 0]

            # take ground truth position
            p_gt = gt[:, [4, 8, 12]]
            p_gt = p_gt - p_gt[0]

            # take ground matrix pose
            Rot_gt = torch.Tensor(gt.shape[0], 3, 3)
            for j in range(3):
                Rot_gt[:, j] = torch.Tensor(gt[:, 1 + 4 * j:1 + 4 * j + 3])
            q_gt = SO3.to_quaternion(Rot_gt)
            # convert to angle orientation
            rpys = SO3.to_rpy(Rot_gt)
            t_gt = gt[:, 0]
            # interpolate ground-truth
            p_gt = interpolate(p_gt, t_gt, t)
            rpys = interpolate(rpys.numpy(), t_gt, t, angle=True)

            # convert from numpy
            ts = (t - t0) / time_factor
            p_gt = torch.Tensor(p_gt)
            rpys = torch.Tensor(rpys).float()
            q_gt = SO3.to_quaternion(
                SO3.from_rpy(rpys[:, 0], rpys[:, 1], rpys[:, 2]))
            imu = torch.Tensor(imu).float()

            # take IMU gyro and accelerometer and magnetometer
            imu = imu[:, 8:17]

            dt = ts[1:] - ts[:-1]
            # compute speed ground truth (apply smoothing)
            v_gt = torch.zeros(p_gt.shape[0], 3)
            for j in range(3):
                p_gt_smooth = savgol_filter(p_gt[:, j], 11, 1)
                v_j = (p_gt_smooth[1:] - p_gt_smooth[:-1]) / dt
                v_j_smooth = savgol_filter(v_j, 11, 0)
                v_gt[1:, j] = torch.Tensor(v_j_smooth)

            # ground truth specific motion measurement (binary)
            zupts = v_gt.norm(dim=1, keepdim=True) < sm_velocity_max_threshold
            zupts = zupts.float()
            # set ground truth consistent with ZUPT
            v_gt[zupts.squeeze() == 1] = 0

            # save for all training
            mondict = {
                'xs': zupts.float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")
            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")
示例#7
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        f = os.path.join(self.predata_dir, 'dataset-room1_512_16_gt.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
            path_gt = os.path.join(data_dir, seq, "mav0", "mocap0", "data.csv")
            return path_imu, path_gt

        sequences = os.listdir(data_dir)

        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            if 'room' not in sequence:
                continue

            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
            gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            ts = imu[:, 0] / 1e9

            # interpolate
            t_gt = gt[:, 0] / 1e9
            gt = self.interpolate(gt, t_gt, ts)

            # take ground truth position
            p_gt = gt[:, 1:4]
            p_gt = p_gt - p_gt[0]

            # take ground true quaternion pose
            q_gt = SO3.qnorm(torch.Tensor(gt[:, 4:8]).double())
            Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()

            # convert from numpy
            p_gt = torch.Tensor(p_gt).double()
            v_gt = torch.zeros_like(p_gt).double()
            v_gt[1:] = (p_gt[1:] - p_gt[:-1]) / self.dt
            imu = torch.Tensor(imu[:, 1:]).double()

            # compute pre-integration factors for all training
            mtf = self.min_train_freq
            dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
            dRot_ij = SO3.dnormalize(dRot_ij.cuda())
            dxi_ij = SO3.log(dRot_ij).cpu()

            # masks with 1 when ground truth is available, 0 otherwise
            masks = dxi_ij.new_ones(dxi_ij.shape[0])
            tmp = np.searchsorted(t_gt, ts[:-mtf])
            diff_t = ts[:-mtf] - t_gt[tmp]
            masks[np.abs(diff_t) > 0.01] = 0

            # save all the sequence
            mondict = {
                'xs': torch.cat((dxi_ij, masks.unsqueeze(1)), 1).float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")

            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")
示例#8
0
    def read_data(self, data_dir):
        r"""Read the data from the dataset"""

        f = os.path.join(self.predata_dir, 'MH_01_easy.p')
        if True and os.path.exists(f):
            return

        print("Start read_data, be patient please")

        def set_path(seq):
            path_imu = os.path.join(data_dir, seq, "mav0", "imu0", "data.csv")
            path_gt = os.path.join(data_dir, seq, "mav0",
                                   "state_groundtruth_estimate0", "data.csv")
            return path_imu, path_gt

        sequences = os.listdir(data_dir)
        # read each sequence
        for sequence in sequences:
            print("\nSequence name: " + sequence)
            path_imu, path_gt = set_path(sequence)
            imu = np.genfromtxt(path_imu, delimiter=",", skip_header=1)
            gt = np.genfromtxt(path_gt, delimiter=",", skip_header=1)

            # time synchronization between IMU and ground truth
            t0 = np.max([gt[0, 0], imu[0, 0]])
            t_end = np.min([gt[-1, 0], imu[-1, 0]])

            # start index
            idx0_imu = np.searchsorted(imu[:, 0], t0)
            idx0_gt = np.searchsorted(gt[:, 0], t0)

            # end index
            idx_end_imu = np.searchsorted(imu[:, 0], t_end, 'right')
            idx_end_gt = np.searchsorted(gt[:, 0], t_end, 'right')

            # subsample
            imu = imu[idx0_imu:idx_end_imu]
            gt = gt[idx0_gt:idx_end_gt]
            ts = imu[:, 0] / 1e9

            # interpolate
            gt = self.interpolate(gt, gt[:, 0] / 1e9, ts)

            # take ground truth position
            p_gt = gt[:, 1:4]
            p_gt = p_gt - p_gt[0]

            # take ground true quaternion pose
            q_gt = torch.Tensor(gt[:, 4:8]).double()
            q_gt = q_gt / q_gt.norm(dim=1, keepdim=True)
            Rot_gt = SO3.from_quaternion(q_gt.cuda(), ordering='wxyz').cpu()

            # convert from numpy
            p_gt = torch.Tensor(p_gt).double()
            v_gt = torch.tensor(gt[:, 8:11]).double()
            imu = torch.Tensor(imu[:, 1:]).double()

            # compute pre-integration factors for all training
            mtf = self.min_train_freq
            dRot_ij = bmtm(Rot_gt[:-mtf], Rot_gt[mtf:])
            dRot_ij = SO3.dnormalize(dRot_ij.cuda())
            dxi_ij = SO3.log(dRot_ij).cpu()

            # save for all training
            mondict = {
                'xs': dxi_ij.float(),
                'us': imu.float(),
            }
            pdump(mondict, self.predata_dir, sequence + ".p")
            # save ground truth
            mondict = {
                'ts': ts,
                'qs': q_gt.float(),
                'vs': v_gt.float(),
                'ps': p_gt.float(),
            }
            pdump(mondict, self.predata_dir, sequence + "_gt.p")
示例#9
0
    parser.add_argument('--type',
                        choices=['conll03', 'ontonotes', 'ptb'],
                        help='multi task data type')
    parser.add_argument('--out',
                        type=str,
                        default='data',
                        help='processed data output dir')
    # fmt: on
    args = parser.parse_args()
    assert args.pos is not None
    return args


if __name__ == "__main__":
    args = get_args()
    set_seed(1)
    parse_table = {
        "conll03": prepare_conll03,
        "ontonotes": prepare_ontonotes,
        "ptb": prepare_ptb,
    }
    logger.info(args)
    assert args.type in parse_table
    task_lst, vocabs = parse_table[args.type](args)
    os.makedirs(args.out, exist_ok=True)
    data_summary(task_lst, vocabs)
    path = os.path.join(args.out, args.type + ".pkl")
    logger.info("saving data to " + path)
    pdump({"task_lst": task_lst, "vocabs": vocabs}, path)