Exemplo n.º 1
0
def main(configs):

    # train and validation dataloaders
    train_loader = make_data_loader(configs,
                                    "train",
                                    configs.train_batch_size,
                                    num_threads=configs.train_num_thread,
                                    shuffle=True)

    val_loader = make_data_loader(configs,
                                  "valid",
                                  1,
                                  num_threads=configs.val_num_thread,
                                  shuffle=False)

    trainer = BlindPnPTrainer(configs, train_loader, val_loader)

    trainer.train()
Exemplo n.º 2
0
def main(config, resume=False):
    train_loader = make_data_loader(
        config,
        config.train_phase,
        config.batch_size,
        num_threads=config.train_num_thread)
    # default: False
    if config.test_valid:
        val_loader = make_data_loader(
            config,
            config.val_phase,
            config.val_batch_size,
            num_threads=config.val_num_thread)
    else:
        val_loader = None

    Trainer = get_trainer(config.trainer)
    trainer = Trainer(
        config=config,
        data_loader=train_loader,
        val_data_loader=val_loader,
    )

    trainer.train()
Exemplo n.º 3
0
def main(config, resume=False):
    train_loader = make_data_loader(config,
                                    config.train_phase,
                                    config.batch_size,
                                    shuffle=True,
                                    repeat=True,
                                    num_workers=config.train_num_workers)
    if config.test_valid:
        val_loader = make_data_loader(config,
                                      config.val_phase,
                                      config.val_batch_size,
                                      shuffle=True,
                                      repeat=True,
                                      num_workers=config.val_num_workers)
    else:
        val_loader = None

    Trainer = get_trainer(config.trainer)
    trainer = Trainer(
        config=config,
        data_loader=train_loader,
        val_data_loader=val_loader,
    )

    trainer.train()

    if config.final_test:
        test_loader = make_data_loader(config,
                                       "test",
                                       config.val_batch_size,
                                       num_workers=config.val_num_workers)
        trainer.val_data_loader = test_loader
        test_dict = trainer._valid_epoch()
        test_loss = test_dict['loss']
        trainer.writer.add_scalar('test/loss', test_loss, config.max_epoch)
        logging.info(f" Test loss: {test_loss}")
Exemplo n.º 4
0
def main(config):
    test_loader = make_data_loader(
        config, config.test_phase, 1, num_threads=config.test_num_workers, shuffle=True)

    num_feats = 1

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    Model = load_model(config.model)
    model = Model(
        num_feats,
        config.model_n_out,
        bn_momentum=config.bn_momentum,
        conv1_kernel_size=config.conv1_kernel_size,
        normalize_feature=config.normalize_feature)
    checkpoint = torch.load(config.save_dir + '/checkpoint.pth')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()

    success_meter, rte_meter, rre_meter = AverageMeter(), AverageMeter(), AverageMeter()
    data_timer, feat_timer, reg_timer = Timer(), Timer(), Timer()

    test_iter = test_loader.__iter__()
    N = len(test_iter)
    n_gpu_failures = 0

    # downsample_voxel_size = 2 * config.voxel_size

    for i in range(len(test_iter)):
        data_timer.tic()
        try:
            data_dict = test_iter.next()
        except ValueError:
            n_gpu_failures += 1
            logging.info(f"# Erroneous GPU Pair {n_gpu_failures}")
            continue
        data_timer.toc()
        xyz0, xyz1 = data_dict['pcd0'], data_dict['pcd1']
        T_gth = data_dict['T_gt']
        xyz0np, xyz1np = xyz0.numpy(), xyz1.numpy()

        pcd0 = make_open3d_point_cloud(xyz0np)
        pcd1 = make_open3d_point_cloud(xyz1np)

        with torch.no_grad():
            feat_timer.tic()
            sinput0 = ME.SparseTensor(
                data_dict['sinput0_F'].to(device), coordinates=data_dict['sinput0_C'].to(device))
            F0 = model(sinput0).F.detach()
            sinput1 = ME.SparseTensor(
                data_dict['sinput1_F'].to(device), coordinates=data_dict['sinput1_C'].to(device))
            F1 = model(sinput1).F.detach()
            feat_timer.toc()

        feat0 = make_open3d_feature(F0, 32, F0.shape[0])
        feat1 = make_open3d_feature(F1, 32, F1.shape[0])

        reg_timer.tic()
        distance_threshold = config.voxel_size * 1.0
        ransac_result = o3d.registration.registration_ransac_based_on_feature_matching(
            pcd0, pcd1, feat0, feat1, distance_threshold,
            o3d.registration.TransformationEstimationPointToPoint(False), 4, [
                o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
                o3d.registration.CorrespondenceCheckerBasedOnDistance(
                    distance_threshold)
            ], o3d.registration.RANSACConvergenceCriteria(4000000, 10000))
        T_ransac = torch.from_numpy(
            ransac_result.transformation.astype(np.float32))
        reg_timer.toc()

        # Translation error
        rte = np.linalg.norm(T_ransac[:3, 3] - T_gth[:3, 3])
        rre = np.arccos((np.trace(T_ransac[:3, :3].t() @ T_gth[:3, :3]) - 1) / 2)

        # Check if the ransac was successful. successful if rte < 2m and rre < 5◦
        # http://openaccess.thecvf.com/content_ECCV_2018/papers/Zi_Jian_Yew_3DFeat-Net_Weakly_Supervised_ECCV_2018_paper.pdf
        if rte < 2:
            rte_meter.update(rte)

        if not np.isnan(rre) and rre < np.pi / 180 * 5:
            rre_meter.update(rre)

        if rte < 2 and not np.isnan(rre) and rre < np.pi / 180 * 5:
            success_meter.update(1)
        else:
            success_meter.update(0)
            logging.info(f"Failed with RTE: {rte}, RRE: {rre}")

        if i % 10 == 0:
            logging.info(
                f"{i} / {N}: Data time: {data_timer.avg}, Feat time: {feat_timer.avg}," +
                f" Reg time: {reg_timer.avg}, RTE: {rte_meter.avg}," +
                f" RRE: {rre_meter.avg}, Success: {success_meter.sum} / {success_meter.count}"
                + f" ({success_meter.avg * 100} %)")
            data_timer.reset()
            feat_timer.reset()
            reg_timer.reset()

    logging.info(
        f"RTE: {rte_meter.avg}, var: {rte_meter.var}," +
        f" RRE: {rre_meter.avg}, var: {rre_meter.var}, Success: {success_meter.sum} " +
        f"/ {success_meter.count} ({success_meter.avg * 100} %)")
Exemplo n.º 5
0
def main(config):
    test_loader = make_data_loader(config,
                                   config.test_phase,
                                   1,
                                   num_threads=config.test_num_thread,
                                   shuffle=False)

    num_feats = 1

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    Model = load_model(config.model)
    model = Model(num_feats,
                  config.model_n_out,
                  bn_momentum=config.bn_momentum,
                  conv1_kernel_size=config.conv1_kernel_size,
                  normalize_feature=config.normalize_feature)
    checkpoint = torch.load(config.save_dir + '/checkpoint.pth')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()

    success_meter, rte_meter, rre_meter = AverageMeter(), AverageMeter(
    ), AverageMeter()
    data_timer, feat_timer, reg_timer = Timer(), Timer(), Timer()

    test_iter = test_loader.__iter__()
    N = len(test_iter)
    n_gpu_failures = 0

    # downsample_voxel_size = 2 * config.voxel_size
    list_results_to_save = []
    for i in range(len(test_iter)):
        data_timer.tic()
        try:
            data_dict = test_iter.next()
        except ValueError:
            n_gpu_failures += 1
            logging.info(f"# Erroneous GPU Pair {n_gpu_failures}")
            continue
        data_timer.toc()
        xyz0, xyz1 = data_dict['pcd0'], data_dict['pcd1']
        T_gth = data_dict['T_gt']
        xyz0np, xyz1np = xyz0.numpy(), xyz1.numpy()
        #import pdb
        # pdb.set_trace()
        pcd0 = make_open3d_point_cloud(xyz0np)
        pcd1 = make_open3d_point_cloud(xyz1np)

        with torch.no_grad():
            feat_timer.tic()
            sinput0 = ME.SparseTensor(
                data_dict['sinput0_F'].to(device),
                coordinates=data_dict['sinput0_C'].to(device))
            F0 = model(sinput0).F.detach()
            sinput1 = ME.SparseTensor(
                data_dict['sinput1_F'].to(device),
                coordinates=data_dict['sinput1_C'].to(device))
            F1 = model(sinput1).F.detach()
            feat_timer.toc()

        # saving files to pkl
        print(i)
        dict_sample = {
            "pts_source": xyz0np,
            "feat_source": F0.cpu().detach().numpy(),
            "pts_target": xyz1np,
            "feat_target": F1.cpu().detach().numpy()
        }

        list_results_to_save.append(dict_sample)

    import pickle
    path_results_to_save = "fcgf.results.pkl"
    print('Saving results to ', path_results_to_save)
    pickle.dump(list_results_to_save, open(path_results_to_save, 'wb'))
    print('Saved!')
    import pdb
    pdb.set_trace()
Exemplo n.º 6
0
def main(config):
    # loading the test data
    val_data_loader = make_data_loader(configs, "valid", 1, num_threads=configs.val_num_thread, shuffle=False)

    # no gradients
    with torch.no_grad():
        # Model initialization
        Model = load_model(config.model)
        model = Model(config)

        # limited GPU
        if config.gpu_inds > -1:
            torch.cuda.set_device(config.gpu_inds)
            device = torch.device('cuda', config.gpu_inds)
        else:
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        model.to(device)

        # load the weights
        if config.weights:
            checkpoint = torch.load(config.weights)
            model.load_state_dict(checkpoint['state_dict'])

        logging.info(model)

        # evaluation model
        model.eval()

        num_data = 0
        data_timer, matching_timer = Timer(), Timer()
        tot_num_data = len(val_data_loader.dataset)

        data_loader_iter = val_data_loader.__iter__()

        # collecting the errors in rotation, errors in tranlsation, num of inliers, inlier ratios
        measure_list = ["err_q", "err_t", "inlier_ratio"]
        nb_outlier_ratios = config.outlier_ratios.shape[0]
        eval_res = {}
        for measure in measure_list:
            eval_res[measure] = np.zeros((nb_outlier_ratios, tot_num_data))

        # for gpu memory consideration
        max_nb_points = 20000

        for batch_idx in range(tot_num_data):

            data_timer.tic()
            matches, numInliers, input_3d_xyz, input_2d_xy, R_gt, t_gt, _ = data_loader_iter.next()
            data_timer.toc()

            p3d_cur = input_3d_xyz
            p2d_cur = input_2d_xy
            matches_cur = matches

            # for each outlier_ratio level
            for oind in range(nb_outlier_ratios):
                outlier_ratio = config.outlier_ratios[oind]
                if outlier_ratio > 0:
                    # adding outliers
                    nb_added_outlier_2d = min(int(round(numInliers.item() * outlier_ratio)), max_nb_points - numInliers.item())
                    nb_added_outlier_3d = min(int(round(numInliers.item() * outlier_ratio)), max_nb_points - numInliers.item())

                    if config.outlier_type == "synthetic":
                        # add synthetic outliers
                        p2d_outlier, p3d_outlier = generate_outliers_synthetic(input_2d_xy, input_3d_xyz, nb_added_outlier_2d, nb_added_outlier_3d)
                    elif config.outlier_type == "real":
                        # add real outliers, you can load real-outlier from the non-matchable-2d/3d.pkl
                        raise NameError('Please modify dataloader')
                        # p2d_outlier, p3d_outlier = generate_outliers_real(p2d_outlier_all, p3d_outlier_all, nb_added_outlier_2d, nb_added_outlier_3d)
                    else:
                        raise NameError('Invalid outlier type')

                    # padding the outliers
                    p2d_cur = torch.cat((input_2d_xy, p2d_outlier), -2)
                    p3d_cur = torch.cat((input_3d_xyz, p3d_outlier), -2)

                    # Fill ground-truth matching indexes with outliers
                    b = p2d_cur.size(0)
                    m = p3d_cur.size(-2)
                    n = p2d_cur.size(-2)
                    matches_cur = matches.new_full((b,m,n),0.0)
                    matches_cur[:,:numInliers.item(),:numInliers.item()] = matches

                # print([p3d_cur.size(1), p2d_cur.size(1)])

                p2d_cur, p3d_cur, R_gt, t_gt, matches_cur = p2d_cur.to(device), p3d_cur.to(device), R_gt.to(
                    device), t_gt.to(device), matches_cur.to(device)

                # Compute output
                matching_timer.tic()
                prob_matrix = model(p3d_cur, p2d_cur)
                matching_timer.toc()

                # compute the topK correspondences
                # note cv2.solvePnPRansac is not stable, sometimes wrong!
                # please use https://github.com/k88joshi/posest, and cite the original paper
                k = min(2000, round(p3d_cur.size(1) * p2d_cur.size(1)))  # Choose at most 2000 points in the testing stage
                _, P_topk_i = torch.topk(prob_matrix.flatten(start_dim=-2), k=k, dim=-1, largest=True, sorted=True)
                p3d_indices = P_topk_i / prob_matrix.size(-1)  # bxk (integer division)
                p2d_indices = P_topk_i % prob_matrix.size(-1)  # bxk
                # let's check the inliner ratios within the topK matches
                # retrieve the inlier/outlier 1/0 logit
                inlier_inds = matches_cur[:, p3d_indices, p2d_indices].cpu().numpy()
                # inlier ratio
                inlier_ratio = np.sum(inlier_inds) / k * 100.0

                # in case cannot be estimated
                err_q = np.pi
                err_t = np.inf
                # more than 5 2D-3D matches
                if k > 5:
                    # compute the rotation and translation error
                    K = np.float32(np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]))
                    dist_coeff = np.float32(np.array([0.0, 0.0, 0.0, 0.0]))
                    #  RANSAC p3p
                    p2d_np = p2d_cur[0, p2d_indices[0, :k], :].cpu().numpy()
                    p3d_np = p3d_cur[0, p3d_indices[0, :k], :].cpu().numpy()
                    retval, rvec, tvec, inliers = cv2.solvePnPRansac(
                        p3d_np, p2d_np, K, dist_coeff,
                        iterationsCount=1000,
                        reprojectionError=0.01,
                        flags=cv2.SOLVEPNP_P3P)
                    if rvec is not None and tvec is not None:
                        R_est, _ = cv2.Rodrigues(rvec)
                        t_est = tvec
                        err_q, err_t = evaluate_R_t(R_est, t_est, R_gt[0, :, :].cpu().numpy(), t_gt.cpu().numpy())

                torch.cuda.empty_cache()

                eval_res["err_q"][oind, batch_idx] = err_q
                eval_res["err_t"][oind, batch_idx] = err_t
                eval_res["inlier_ratio"][oind, batch_idx] = inlier_ratio

                logging.info(' '.join([
                    f"Validation iter {num_data} / {tot_num_data} : Data Loading Time: {data_timer.avg:.3f},",
                    f"outlier ratio: {outlier_ratio:.3f},",
                    f"Matching Time: {matching_timer.avg:.3f},",
                    f"err_rot: {err_q:.3f}, err_t: {err_t:.3f}, inlier_ratio: {inlier_ratio:.3f},",
                ]))
                data_timer.reset()

            num_data += 1

        # after checking all the validation samples, let's calculate statistics

        # for each outlier_ratio level
        for oind in range(nb_outlier_ratios):
            outlier_ratio = config.outlier_ratios[oind]
            eval_res_cur = {}
            for measure in measure_list:
                eval_res_cur[measure] = eval_res[measure][oind,:]

            recall = recalls(eval_res_cur)

            logging.info(' '.join([
                f" outlier_ratio: {outlier_ratio:.3f}, recall_rot: {recall[0]:.3f}, med. rot. : {recall[1]:.3f}, med. trans. : {recall[2]:.3f}, avg. inlier ratio: {recall[3]:.3f},",
            ]))