Beispiel #1
0
def eval_KITTI_scene(method, dloader, args):
    """
    Evaluate baseline methods on KITTI testset.
    """
    num_pair = dloader.dataset.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
    stats = np.zeros([num_pair, 12])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluation_metric = TransformationLoss(re_thre=5, te_thre=60)
    data_timer, model_timer = Timer(), Timer()
    with torch.no_grad():
        for i in tqdm(range(num_pair)):
            #################################
            # load data
            #################################
            data_timer.tic()
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next(
            )
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()

            if method == 'SM':
                pred_trans, pred_labels = SM(corr,
                                             src_keypts,
                                             tgt_keypts,
                                             args,
                                             top_ratio=0.05)

            elif method == 'RANSAC':
                pred_trans, pred_labels = RANSAC(corr, src_keypts, tgt_keypts,
                                                 args)

            elif method == 'GCRANSAC':
                pred_trans, pred_labels = GCRANSAC(corr, src_keypts,
                                                   tgt_keypts, args)

            else:
                exit(-1)

            model_time = model_timer.toc()
            loss, recall, Re, Te, RMSE = evaluation_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)
            class_stats = class_loss(pred_labels, gt_labels)

            #################################
            # record the evaluation results.
            #################################
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = -1

            if recall == 0:
                from evaluation.benchmark_utils import rot_to_euler
                R_gt, t_gt = gt_trans[0][:3, :3], gt_trans[0][:3, -1]
                euler = rot_to_euler(R_gt.detach().cpu().numpy())

                input_ir = float(torch.mean(gt_labels.float()))
                input_i = int(torch.sum(gt_labels))
                output_i = int(torch.sum(gt_labels[pred_labels > 0]))
                logging.info(
                    f"Pair {i}, GT Rot: {euler[0]:.2f}, {euler[1]:.2f}, {euler[2]:.2f}, Trans: {t_gt[0]:.2f}, {t_gt[1]:.2f}, {t_gt[2]:.2f}, RE: {float(Re):.2f}, TE: {float(Te):.2f}"
                )
                logging.info((
                    f"\tInput Inlier Ratio :{input_ir*100:.2f}%(#={input_i}), Output: IP={float(class_stats['precision'])*100:.2f}%(#={output_i}) IR={float(class_stats['recall'])*100:.2f}%"
                ))

    return stats
Beispiel #2
0
def train_parallel(rank, world_size, seed, config):
    # This function is performed in parallel in several processes, one for each available GPU
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '8882'
    dist.init_process_group(backend='nccl', world_size=world_size, rank=rank)
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.cuda.set_device(rank)
    device = 'cuda:%d' % torch.cuda.current_device()
    print("process %d, GPU: %s" % (rank, device))

    # create model
    config.model = PointDSC(
        in_dim=config.in_dim,
        num_layers=config.num_layers,
        num_channels=config.num_channels,
        num_iterations=config.num_iterations,
        inlier_threshold=config.inlier_threshold,
        sigma_d=config.sigma_d,
        ratio=config.ratio,
        k=config.k,
    )

    # create optimizer
    if config.optimizer == 'SGD':
        config.optimizer = optim.SGD(
            config.model.parameters(),
            lr=config.lr,
            momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    elif config.optimizer == 'ADAM':
        config.optimizer = optim.Adam(
            config.model.parameters(),
            lr=config.lr,
            betas=(0.9, 0.999),
            # momentum=config.momentum,
            weight_decay=config.weight_decay,
        )
    config.scheduler = optim.lr_scheduler.ExponentialLR(
        config.optimizer,
        gamma=config.scheduler_gamma,
    )

    # create dataset and dataloader
    DL_config = edict({
        'voxel_size': 0.3,
        'positive_pair_search_voxel_size_multiplier': 4,
        'use_random_rotation': False,
        'use_random_scale': False
    })
    config.train_loader = make_data_loader(config.dataset,
                                           DL_config,
                                           'train',
                                           config.batch_size,
                                           rank,
                                           world_size,
                                           seed,
                                           config.num_workers,
                                           shuffle=True)
    config.val_loader = make_data_loader(config.dataset,
                                         DL_config,
                                         'val',
                                         config.batch_size,
                                         rank,
                                         world_size,
                                         seed,
                                         config.num_workers,
                                         shuffle=False)

    config.train_feature_extractor = LidarFeatureExtractor(
        split='train',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=config.augment_axis,
        augment_rotation=config.augment_rotation,
        augment_translation=config.augment_translation,
        fcgf_weights_file=config.fcgf_weights_file)

    config.val_feature_extractor = LidarFeatureExtractor(
        split='val',
        in_dim=config.in_dim,
        inlier_threshold=config.inlier_threshold,
        num_node=config.num_node,
        use_mutual=config.use_mutual,
        augment_axis=0,
        augment_rotation=0.0,
        augment_translation=0.0,
        fcgf_weights_file=config.fcgf_weights_file)

    # create evaluation
    config.evaluate_metric = {
        "ClassificationLoss":
        ClassificationLoss(balanced=config.balanced),
        "SpectralMatchingLoss":
        SpectralMatchingLoss(balanced=config.balanced),
        "TransformationLoss":
        TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
    }
    config.metric_weight = {
        "ClassificationLoss": config.weight_classification,
        "SpectralMatchingLoss": config.weight_spectralmatching,
        "TransformationLoss": config.weight_transformation,
    }

    trainer = Trainer(config, rank)
    trainer.train()
Beispiel #3
0
def eval_KITTI_per_pair(model, dloader, feature_extractor, config, use_icp,
                        args, rank):
    """
    Evaluate our model on KITTI testset.
    """
    num_pair = dloader.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. icp_time
    stats = np.zeros([num_pair, 12])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluate_metric = TransformationLoss(re_thre=config.re_thre,
                                         te_thre=config.te_thre)
    data_timer, model_timer = Timer(), Timer()
    icp_timer = Timer()
    with torch.no_grad():
        for i in range(num_pair):
            #################################
            # load data
            #################################
            data_timer.tic()
            input_dict = dloader_iter.next()
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels, _, _ = feature_extractor.process_batch(
                input_dict)
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                    corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()
            res = model(data)
            pred_trans, pred_labels = res['final_trans'], res['final_labels']

            if args.solver == 'SVD':
                pass

            elif args.solver == 'RANSAC':
                # our method can be used with RANSAC as a outlier pre-filtering step.
                src_pcd = make_point_cloud(
                    src_keypts[0].detach().cpu().numpy())
                tgt_pcd = make_point_cloud(
                    tgt_keypts[0].detach().cpu().numpy())
                corr = np.array([
                    np.arange(src_keypts.shape[1]),
                    np.arange(src_keypts.shape[1])
                ])
                pred_inliers = np.where(
                    pred_labels.detach().cpu().numpy() > 0)[1]
                corr = o3d.utility.Vector2iVector(corr[:, pred_inliers].T)
                reg_result = o3d.registration.registration_ransac_based_on_correspondence(
                    src_pcd,
                    tgt_pcd,
                    corr,
                    max_correspondence_distance=config.inlier_threshold,
                    estimation_method=o3d.registration.
                    TransformationEstimationPointToPoint(False),
                    ransac_n=3,
                    criteria=o3d.registration.RANSACConvergenceCriteria(
                        max_iteration=5000, max_validation=5000))
                inliers = np.array(reg_result.correspondence_set)
                pred_labels = torch.zeros_like(gt_labels)
                pred_labels[0, inliers[:, 0]] = 1
                pred_trans = torch.eye(4)[None].to(src_keypts.device)
                pred_trans[:, :4, :4] = torch.from_numpy(
                    reg_result.transformation)

            model_time = model_timer.toc()
            icp_timer.tic()
            if use_icp:
                pred_trans = icp_refine(src_keypts, tgt_keypts, pred_trans)

            icp_time = icp_timer.toc()

            class_stats = class_loss(pred_labels, gt_labels)
            loss, recall, Re, Te, rmse = evaluate_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)
            pred_trans = pred_trans[0]

            # save statistics
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = icp_time

            if rank == 0:
                print(
                    f"{time.strftime('%m/%d %H:%M:%S')} Finished pair:{i}/{num_pair}",
                    flush=True)
                if recall == 0:
                    from evaluation.benchmark_utils import rot_to_euler
                    R_gt, t_gt = gt_trans[0][:3, :3], gt_trans[0][:3, -1]
                    euler = rot_to_euler(R_gt.detach().cpu().numpy())

                    input_ir = float(torch.mean(gt_labels.float()))
                    input_i = int(torch.sum(gt_labels))
                    output_i = int(torch.sum(gt_labels[pred_labels > 0]))

                    logging.info(
                        f"Pair {i}, GT Rot: {euler[0]:.2f}, {euler[1]:.2f}, {euler[2]:.2f}, Trans: {t_gt[0]:.2f}, {t_gt[1]:.2f}, {t_gt[2]:.2f}, RE: {float(Re):.2f}, TE: {float(Te):.2f}"
                    )
                    logging.info((
                        f"\tInput Inlier Ratio :{input_ir*100:.2f}%(#={input_i}), Output: IP={float(class_stats['precision'])*100:.2f}%(#={output_i}) IR={float(class_stats['recall'])*100:.2f}%"
                    ))

    return stats
Beispiel #4
0
                            num_node=config.num_node,
                            use_mutual=config.use_mutual,
                            downsample=config.downsample,
                            augment_axis=config.augment_axis,
                            augment_rotation=config.augment_rotation,
                            augment_translation=config.augment_translation,
                            )
    config.train_loader = get_dataloader(dataset=train_set, 
                                        batch_size=config.batch_size,
                                        num_workers=config.num_workers,
                                        )
    config.val_loader = get_dataloader(dataset=val_set,
                                        batch_size=config.batch_size,
                                        num_workers=config.num_workers,
                                        )
    
    # create evaluation
    config.evaluate_metric = {
        "ClassificationLoss": ClassificationLoss(balanced=config.balanced),
        "SpectralMatchingLoss": SpectralMatchingLoss(balanced=config.balanced),
        "TransformationLoss": TransformationLoss(re_thre=config.re_thre, te_thre=config.te_thre),
    }
    config.metric_weight = {
        "ClassificationLoss": config.weight_classification,
        "SpectralMatchingLoss": config.weight_spectralmatching,
        "TransformationLoss": config.weight_transformation,
    }


    trainer = Trainer(config)
    trainer.train()
Beispiel #5
0
def eval_3DMatch_scene(method, scene, scene_ind, dloader, args):
    """
    Evaluate baseline methods on 3DMatch testset [scene]
    """
    num_pair = dloader.dataset.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
    stats = np.zeros([num_pair, 12])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluation_metric = TransformationLoss(re_thre=15, te_thre=30)
    data_timer, model_timer = Timer(), Timer()
    with torch.no_grad():
        for i in tqdm(range(num_pair)):
            #################################
            # load data
            #################################
            data_timer.tic()
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next(
            )
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()

            if method == 'SM':
                pred_trans, pred_labels = SM(corr, src_keypts, tgt_keypts,
                                             args)

            elif method == 'PMC':
                pred_trans, pred_labels = PMC(corr, src_keypts, tgt_keypts,
                                              args)

            elif method == 'RANSAC':
                pred_trans, pred_labels = RANSAC(corr, src_keypts, tgt_keypts,
                                                 args)

            elif method == 'MAGSAC':
                import pymagsac
                # No rigid transformation support now.

            elif method == 'GCRANSAC':
                pred_trans, pred_labels = GCRANSAC(corr, src_keypts,
                                                   tgt_keypts, args)

            elif method == 'LS':
                # compute the transformation matrix by solving the least-square system
                corr = corr[:, gt_labels[0].bool(), ]
                src_keypts = src_keypts[:, gt_labels[0].bool(), ]
                tgt_keypts = tgt_keypts[:, gt_labels[0].bool(), ]
                # svd_trans = rigid_transform_3d(src_keypts, tgt_keypts)
                A = torch.cat(
                    [src_keypts,
                     torch.ones_like(src_keypts[:, :, 0:1])],
                    dim=-1)
                B = torch.cat(
                    [tgt_keypts,
                     torch.ones_like(tgt_keypts[:, :, 0:1])],
                    dim=-1)

                # https://pytorch.org/docs/stable/generated/torch.lstsq.html
                # Ct = torch.lstsq(input=B[0], A=A[0])
                # pred_trans = Ct.solution[0:4].T
                # pred_trans = pred_trans[None]
                # pred_labels = gt_labels
                BB = B.permute(0, 2, 1)  # [bs,4,n]
                AA = A.permute(0, 2, 1)  # [bs,4,n]
                pred_trans = torch.bmm(BB, torch.pinverse(AA))
                pred_labels = gt_labels

            else:
                exit(-1)

            model_time = model_timer.toc()
            loss, recall, Re, Te, RMSE = evaluation_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)
            class_stats = class_loss(pred_labels, gt_labels)

            #################################
            # record the evaluation results.
            #################################
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = scene_ind

    return stats
Beispiel #6
0
def eval_KITTI_per_pair(model, dloader, feature_extractor, config, args, rank):
    """
    Evaluate our model on KITTI testset.
    """
    num_pair = dloader.__len__()
    if args.max_samples is not None:
        num_pair = min(num_pair, args.max_samples)
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. icp_time
    # 12. recall_icp 13. RE_icp 14. TE_icp 15.num_pairs_init 16.inlier_ratio_init 17.num_pairs_filtered 18.inlier_ratio_filtered 19. drive 20.t0 21.t1
    stats = np.zeros([num_pair, 22])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluate_metric = TransformationLoss(re_thre=config.re_thre,
                                         te_thre=config.te_thre)
    data_timer, model_timer = Timer(), Timer()
    icp_timer = Timer()
    with torch.no_grad():
        for i in range(num_pair):
            #################################
            # load data
            #################################
            data_timer.tic()
            input_dict = dloader_iter.next()
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels, src_features, tgt_features = feature_extractor.process_batch(
                input_dict)
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                    corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            num_pairs_init, inlier_ratio_init, num_pairs_filtered, inlier_ratio_filtered = 0, 0, 0, 0
            if args.algo == 'PointDSC':
                model_timer.tic()
                res = model(data)
                pred_trans, pred_labels = res['final_trans'], res[
                    'final_labels']

                if args.solver == 'SVD':
                    pass

                elif args.solver == 'RANSAC':
                    # our method can be used with RANSAC as a outlier pre-filtering step.
                    src_pcd = make_point_cloud(
                        src_keypts[0].detach().cpu().numpy())
                    tgt_pcd = make_point_cloud(
                        tgt_keypts[0].detach().cpu().numpy())
                    corr = np.array([
                        np.arange(src_keypts.shape[1]),
                        np.arange(src_keypts.shape[1])
                    ])
                    pred_inliers = np.where(
                        pred_labels.detach().cpu().numpy() > 0)[1]
                    corr = o3d.utility.Vector2iVector(corr[:, pred_inliers].T)
                    reg_result = o3d.registration.registration_ransac_based_on_correspondence(
                        src_pcd,
                        tgt_pcd,
                        corr,
                        max_correspondence_distance=config.inlier_threshold,
                        estimation_method=o3d.registration.
                        TransformationEstimationPointToPoint(False),
                        ransac_n=3,
                        criteria=o3d.registration.RANSACConvergenceCriteria(
                            max_iteration=5000, max_validation=5000))
                    inliers = np.array(reg_result.correspondence_set)
                    pred_labels = torch.zeros_like(gt_labels)
                    pred_labels[0, inliers[:, 0]] = 1
                    pred_trans = torch.eye(4)[None].to(src_keypts.device)
                    pred_trans[:, :4, :4] = torch.from_numpy(
                        reg_result.transformation)

                model_time = model_timer.toc()

                src_pcd = make_point_cloud(
                    src_keypts.detach().cpu().numpy()[0])
                tgt_pcd = make_point_cloud(
                    tgt_keypts.detach().cpu().numpy()[0])
                initial_trans = pred_trans[0].detach().cpu().numpy()

            elif args.algo in ['RANSAC', 'GC']:

                initial_trans, model_time, src_pcd, tgt_pcd, num_pairs_init, \
                     inlier_ratio_init, num_pairs_filtered, inlier_ratio_filtered = \
                         FR(input_dict['pcd0'][0], input_dict['pcd1'][0], src_features, tgt_features, args, gt_trans[0,...].detach().cpu().numpy())
                pred_trans = torch.eye(4)[None].to(src_keypts.device)
                pred_trans[:, :4, :4] = torch.from_numpy(initial_trans)
                pred_labels = torch.zeros_like(gt_labels) + np.nan

            elif args.algo == 'TEASER':
                src_pcd = make_point_cloud(
                    input_dict['pcd0'][0].detach().cpu().numpy())
                tgt_pcd = make_point_cloud(
                    input_dict['pcd1'][0].detach().cpu().numpy())
                initial_trans, model_time = TEASER(src_pcd, tgt_pcd,
                                                   src_features, tgt_features,
                                                   input_dict['pcd0'][0], args)
                pred_trans = torch.eye(4)[None].to(src_keypts.device)
                pred_trans[:, :4, :4] = torch.from_numpy(initial_trans)
                pred_labels = torch.zeros_like(gt_labels) + np.nan

            else:
                assert False, "unkown value for args.algo: " + args.algo

            icp_timer.tic()
            # change the convension of transforamtion because open3d use left multi.
            refined_T = o3d.pipelines.registration.registration_icp(
                src_pcd, tgt_pcd, 0.6, initial_trans,
                o3d.pipelines.registration.
                TransformationEstimationPointToPoint()).transformation
            icp_time = icp_timer.toc()
            pred_trans_icp = torch.from_numpy(refined_T[None, :, :]).to(
                pred_trans.device).float()

            class_stats = class_loss(pred_labels, gt_labels)
            loss, recall, Re, Te, rmse = evaluate_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)
            loss, recall_icp, Re_icp, Te_icp, rmse = evaluate_metric(
                pred_trans_icp, gt_trans, src_keypts, tgt_keypts, pred_labels)
            pred_trans = pred_trans[0]

            # save statistics
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = icp_time
            stats[i, 12] = float(recall_icp / 100.0)  # success
            stats[i, 13] = float(Re_icp)  # Re (deg)
            stats[i, 14] = float(Te_icp)  # Te (cm)
            stats[i, 15] = num_pairs_init
            stats[i, 16] = inlier_ratio_init
            stats[i, 17] = num_pairs_filtered
            stats[i, 18] = inlier_ratio_filtered
            stats[i, 19] = input_dict['extra_packages'][0]['drive']
            stats[i, 20] = input_dict['extra_packages'][0]['t0']
            stats[i, 21] = input_dict['extra_packages'][0]['t1']

            if rank == 0:
                print(
                    f"{time.strftime('%m/%d %H:%M:%S')} Finished pair:{i}/{num_pair}",
                    flush=True)
                if recall == 0:
                    from evaluation.benchmark_utils import rot_to_euler
                    R_gt, t_gt = gt_trans[0][:3, :3], gt_trans[0][:3, -1]
                    euler = rot_to_euler(R_gt.detach().cpu().numpy())

                    input_ir = float(torch.mean(gt_labels.float()))
                    input_i = int(torch.sum(gt_labels))
                    output_i = int(torch.sum(gt_labels[pred_labels > 0]))

                    logging.info(
                        f"Pair {i}, GT Rot: {euler[0]:.2f}, {euler[1]:.2f}, {euler[2]:.2f}, Trans: {t_gt[0]:.2f}, {t_gt[1]:.2f}, {t_gt[2]:.2f}, RE: {float(Re):.2f}, TE: {float(Te):.2f}"
                    )
                    logging.info((
                        f"\tInput Inlier Ratio :{input_ir*100:.2f}%(#={input_i}), Output: IP={float(class_stats['precision'])*100:.2f}%(#={output_i}) IR={float(class_stats['recall'])*100:.2f}%"
                    ))

    return stats
Beispiel #7
0
def eval_redwood_scene(model, scene, scene_ind, dloader, config, use_icp):
    num_pair = dloader.dataset.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
    stats = np.zeros([num_pair, 12])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluate_metric = TransformationLoss(re_thre=config.re_thre,
                                         te_thre=config.te_thre)
    data_timer, model_timer = Timer(), Timer()
    with torch.no_grad():
        for i in tqdm(range(num_pair)):
            data_timer.tic()
            #################################
            # load data
            #################################
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next(
            )
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()
            res = model(data)
            pred_trans, pred_labels = res['final_trans'], res['final_labels']

            if use_icp:
                pred_trans = icp_refine(src_keypts, tgt_keypts, pred_trans)

            model_time = model_timer.toc()
            class_stats = class_loss(pred_labels, gt_labels)
            loss, recall, Re, Te, rmse = evaluate_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)
            pred_trans = pred_trans[0]

            #################################
            # record the evaluation results.
            #################################
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = scene_ind

    return stats
Beispiel #8
0
def eval_3DMatch_scene(model, scene, scene_ind, dloader, config, use_icp):
    """
    Evaluate our model on 3DMatch testset [scene]
    """
    num_pair = dloader.dataset.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
    stats = np.zeros([num_pair, 12])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluate_metric = TransformationLoss(re_thre=config.re_thre,
                                         te_thre=config.te_thre)
    data_timer, model_timer = Timer(), Timer()
    with torch.no_grad():
        for i in tqdm(range(num_pair)):
            #################################
            # load data
            #################################
            data_timer.tic()
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next(
            )
            corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
            data = {
                'corr_pos': corr,
                'src_keypts': src_keypts,
                'tgt_keypts': tgt_keypts,
                'testing': True,
            }
            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()
            res = model(data)
            pred_trans, pred_labels = res['final_trans'], res['final_labels']

            if args.solver == 'SVD':
                pass

            elif args.solver == 'RANSAC':
                # our method can be used with RANSAC as a outlier pre-filtering step.
                src_pcd = make_point_cloud(
                    src_keypts[0].detach().cpu().numpy())
                tgt_pcd = make_point_cloud(
                    tgt_keypts[0].detach().cpu().numpy())
                corr = np.array([
                    np.arange(src_keypts.shape[1]),
                    np.arange(src_keypts.shape[1])
                ])
                pred_inliers = np.where(
                    pred_labels.detach().cpu().numpy() > 0)[1]
                corr = o3d.utility.Vector2iVector(corr[:, pred_inliers].T)
                reg_result = o3d.registration.registration_ransac_based_on_correspondence(
                    src_pcd,
                    tgt_pcd,
                    corr,
                    max_correspondence_distance=config.inlier_threshold,
                    estimation_method=o3d.registration.
                    TransformationEstimationPointToPoint(False),
                    ransac_n=3,
                    criteria=o3d.registration.RANSACConvergenceCriteria(
                        max_iteration=5000, max_validation=5000))
                inliers = np.array(reg_result.correspondence_set)
                pred_labels = torch.zeros_like(gt_labels)
                pred_labels[0, inliers[:, 0]] = 1
                pred_trans = torch.eye(4)[None].to(src_keypts.device)
                pred_trans[:, :4, :4] = torch.from_numpy(
                    reg_result.transformation)

            if use_icp:
                pred_trans = icp_refine(src_keypts, tgt_keypts, pred_trans)

            model_time = model_timer.toc()
            class_stats = class_loss(pred_labels, gt_labels)
            loss, recall, Re, Te, rmse = evaluate_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)

            #################################
            # record the evaluation results.
            #################################
            # save statistics
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = scene_ind

    return stats
Beispiel #9
0
def eval_3DMatch_scene(model, scene_ind, dloader, config, args):
    num_pair = dloader.dataset.__len__()
    # 0.success, 1.RE, 2.TE, 3.input inlier number, 4.input inlier ratio,  5. output inlier number
    # 6. output inlier precision, 7. output inlier recall, 8. output inlier F1 score 9. model_time, 10. data_time 11. scene_ind
    stats = np.zeros([num_pair, 12])
    final_poses = np.zeros([num_pair, 4, 4])
    dloader_iter = dloader.__iter__()
    class_loss = ClassificationLoss()
    evaluate_metric = TransformationLoss(re_thre=config.re_thre,
                                         te_thre=config.te_thre)
    data_timer, model_timer = Timer(), Timer()
    with torch.no_grad():
        for i in tqdm(range(num_pair)):
            #################################
            # load data
            #################################
            data_timer.tic()
            if args.descriptor == 'fcgf':
                # using FCGF 5cm to build the initial correspondence
                corr, src_keypts, tgt_keypts, gt_trans, gt_labels = dloader_iter.next(
                )
                corr, src_keypts, tgt_keypts, gt_trans, gt_labels = \
                    corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda()
                data = {
                    'corr_pos': corr,
                    'src_keypts': src_keypts,
                    'tgt_keypts': tgt_keypts,
                    'testing': True,
                }
            elif args.descriptor == 'predator':
                # use Predator to build the inital correspondence
                data, gt_trans, gt_labels = get_predator_data(
                    i, args.num_points)
                src_keypts, tgt_keypts = data['src_keypts'], data['tgt_keypts']

            data_time = data_timer.toc()

            #################################
            # forward pass
            #################################
            model_timer.tic()
            res = model(data)
            pred_trans, pred_labels = res['final_trans'], res['final_labels']

            # evaluate raw FCGF + ransac
            # src_pcd = make_point_cloud(src_keypts[0].detach().cpu().numpy())
            # tgt_pcd = make_point_cloud(tgt_keypts[0].detach().cpu().numpy())
            # correspondence = np.array([np.arange(src_keypts.shape[1]), np.arange(src_keypts.shape[1])])
            # correspondence = o3d.utility.Vector2iVector(correspondence.T)
            # reg_result = o3d.registration.registration_ransac_based_on_correspondence(
            #     src_pcd, tgt_pcd, correspondence,
            #     max_correspondence_distance=config.inlier_threshold,
            #     estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
            #     ransac_n=3,
            #     criteria=o3d.registration.RANSACConvergenceCriteria(max_iteration=50000, max_validation=1000)
            # )
            # pred_trans = torch.eye(4)[None].to(gt_trans.device)
            # pred_trans[0, :4, :4] = torch.from_numpy(reg_result.transformation)
            # pred_labels = torch.zeros_like(gt_labels)
            # pred_labels[0, np.array(reg_result.correspondence_set)[:, 0]] = 1

            model_time = model_timer.toc()
            class_stats = class_loss(pred_labels, gt_labels)
            loss, recall, Re, Te, rmse = evaluate_metric(
                pred_trans, gt_trans, src_keypts, tgt_keypts, pred_labels)

            #################################
            # record the evaluation results.
            #################################
            # save statistics
            stats[i, 0] = float(recall / 100.0)  # success
            stats[i, 1] = float(Re)  # Re (deg)
            stats[i, 2] = float(Te)  # Te (cm)
            stats[i, 3] = int(torch.sum(gt_labels))  # input inlier number
            stats[i, 4] = float(torch.mean(
                gt_labels.float()))  # input inlier ratio
            stats[i, 5] = int(torch.sum(
                gt_labels[pred_labels > 0]))  # output inlier number
            stats[i, 6] = float(
                class_stats['precision'])  # output inlier precision
            stats[i, 7] = float(class_stats['recall'])  # output inlier recall
            stats[i, 8] = float(class_stats['f1'])  # output inlier f1 score
            stats[i, 9] = model_time
            stats[i, 10] = data_time
            stats[i, 11] = scene_ind

            final_poses[i] = pred_trans[0].detach().cpu().numpy()

    return stats, final_poses