Exemplo n.º 1
0
    def test(self):
        print('Start to evaluate on test datasets...')
        _logger, _log_path = prepare_logger(self.config,
                                            log_path=os.path.join(
                                                self.snapshot_dir, 'results'))

        pred_transforms = []
        total_rotation = []
        all_inlier_ratios = []

        num_iter = int(
            len(self.loader['test'].dataset) // self.loader['test'].batch_size)
        c_loader_iter = self.loader['test'].__iter__()

        self.model.eval()
        with torch.no_grad():
            for idx in tqdm(range(num_iter)):  # loop through this epoch
                inputs = c_loader_iter.next()

                ##################################
                # load inputs to device.
                for k, v in inputs.items():
                    if type(v) == list:
                        inputs[k] = [item.to(self.device) for item in v]
                    elif type(v) == dict:
                        pass
                    else:
                        inputs[k] = v.to(self.device)

                rot_trace = inputs['sample']['transform_gt'][:, 0, 0] + inputs['sample']['transform_gt'][:, 1, 1] + \
                        inputs['sample']['transform_gt'][:, 2, 2]
                rotdeg = torch.acos(
                    torch.clamp(0.5 * (rot_trace - 1), min=-1.0,
                                max=1.0)) * 180.0 / np.pi
                total_rotation.append(np.abs(to_array(rotdeg)))

                ###################################
                # forward pass
                feats, scores_overlap, scores_saliency = self.model(
                    inputs)  #[N1, C1], [N2, C2]
                scores_overlap = scores_overlap.detach().cpu()
                scores_saliency = scores_saliency.detach().cpu()

                len_src = inputs['stack_lengths'][0][0]
                src_feats, tgt_feats = feats[:len_src], feats[len_src:]
                src_pcd, tgt_pcd = inputs['src_pcd_raw'], inputs['tgt_pcd_raw']
                src_overlap, tgt_overlap = scores_overlap[:
                                                          len_src], scores_overlap[
                                                              len_src:]
                src_saliency, tgt_saliency = scores_saliency[:
                                                             len_src], scores_saliency[
                                                                 len_src:]

                ########################################
                # run probabilistic sampling
                n_points = 450
                src_scores = src_overlap * src_saliency
                tgt_scores = tgt_overlap * tgt_saliency

                if (src_pcd.size(0) > n_points):
                    idx = np.arange(src_pcd.size(0))
                    probs = (src_scores / src_scores.sum()).numpy().flatten()
                    idx = np.random.choice(idx,
                                           size=n_points,
                                           replace=False,
                                           p=probs)
                    src_pcd, src_feats = src_pcd[idx], src_feats[idx]
                if (tgt_pcd.size(0) > n_points):
                    idx = np.arange(tgt_pcd.size(0))
                    probs = (tgt_scores / tgt_scores.sum()).numpy().flatten()
                    idx = np.random.choice(idx,
                                           size=n_points,
                                           replace=False,
                                           p=probs)
                    tgt_pcd, tgt_feats = tgt_pcd[idx], tgt_feats[idx]

                ########################################
                # run ransac
                distance_threshold = 0.02
                ts_est = ransac_pose_estimation(
                    src_pcd,
                    tgt_pcd,
                    src_feats,
                    tgt_feats,
                    mutual=False,
                    distance_threshold=distance_threshold,
                    ransac_n=3)
                pred_transforms.append(ts_est)

        total_rotation = np.concatenate(total_rotation, axis=0)
        _logger.info(('Rotation range in data: {}(avg), {}(max)'.format(
            np.mean(total_rotation), np.max(total_rotation))))

        pred_transforms = torch.from_numpy(
            np.array(pred_transforms)).float()[:, None, :, :]

        c_loader_iter = self.loader['test'].__iter__()
        num_processed, num_total = 0, len(pred_transforms)
        metrics_for_iter = [
            defaultdict(list) for _ in range(pred_transforms.shape[1])
        ]

        with torch.no_grad():
            for idx in tqdm(range(num_iter)):  # loop through this epoch
                inputs = c_loader_iter.next()

                batch_size = 1
                for i_iter in range(pred_transforms.shape[1]):
                    cur_pred_transforms = pred_transforms[
                        num_processed:num_processed + batch_size, i_iter, :, :]
                    metrics = compute_metrics(inputs['sample'],
                                              cur_pred_transforms)
                    for k in metrics:
                        metrics_for_iter[i_iter][k].append(metrics[k])
                num_processed += batch_size

        for i_iter in range(len(metrics_for_iter)):
            metrics_for_iter[i_iter] = {
                k: np.concatenate(metrics_for_iter[i_iter][k], axis=0)
                for k in metrics_for_iter[i_iter]
            }
            summary_metrics = summarize_metrics(metrics_for_iter[i_iter])
            print_metrics(_logger,
                          summary_metrics,
                          title='Evaluation result (iter {})'.format(i_iter))
Exemplo n.º 2
0
    else:
        raise NotImplementedError
    return model


def main():
    # Load data_loader
    test_dataset = get_test_datasets(_args)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=_args.val_batch_size, shuffle=False)

    model = get_model()
    model.eval()
    evaluate(test_loader, model)
    _logger.info('Finished')


if __name__ == '__main__':
    # Arguments and logging
    parser = rpmnet_eval_arguments()
    _args = parser.parse_args()
    _logger, _log_path = prepare_logger(_args, log_path=_args.eval_save_path)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(_args.gpu)
    if _args.gpu >= 0 and (_args.method == 'rpm' or _args.method == 'rpmnet'):
        os.environ['CUDA_VISIBLE_DEVICES'] = str(_args.gpu)
        _device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
    else:
        _device = torch.device('cpu')

    main()
Exemplo n.º 3
0
import torch.utils.data
from tqdm import tqdm

from arguments import rpmnet_train_arguments
from common.colors import BLUE, ORANGE
from common.misc import prepare_logger
from common.torch import dict_all_to_device, CheckPointManager, TorchDebugger
from common.math_torch import se3
from data_loader.datasets import get_train_datasets
from eval import compute_metrics, summarize_metrics, print_metrics
from models.rpmnet import get_model

# Set up arguments and logging
parser = rpmnet_train_arguments()
_args = parser.parse_args()
_logger, _log_path = prepare_logger(_args)
if _args.gpu >= 0:
    os.environ['CUDA_VISIBLE_DEVICES'] = str(_args.gpu)
    _device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
else:
    _device = torch.device('cpu')


def main():
    train_set, val_set = get_train_datasets(_args)
    run(train_set, val_set)


def compute_losses(data: Dict, pred_transforms: List, endpoints: Dict,
                   loss_type: str = 'mae', reduction: str = 'mean') -> Dict:
    """Compute losses