예제 #1
0
 def evaluate_depth(self, batch):
     """Evaluate batch to produce depth metrics."""
     # Get predicted depth
     inv_depths = self.model(batch)['inv_depths']
     depth = inv2depth(inv_depths[0])
     # Post-process predicted depth
     batch['rgb'] = flip_lr(batch['rgb'])
     inv_depths_flipped = self.model(batch)['inv_depths']
     inv_depth_pp = post_process_inv_depth(inv_depths[0],
                                           inv_depths_flipped[0],
                                           method='mean')
     depth_pp = inv2depth(inv_depth_pp)
     batch['rgb'] = flip_lr(batch['rgb'])
     # Calculate predicted metrics
     metrics = OrderedDict()
     if 'depth' in batch:
         for mode in self.metrics_modes:
             if self.config.model.loss.mask_ego:
                 metrics[self.metrics_name +
                         mode] = compute_ego_depth_metrics(
                             self.config.model.params,
                             gt=batch['depth'],
                             pred=depth_pp if 'pp' in mode else depth,
                             path_to_ego_masks=batch['path_to_ego_mask'],
                             use_gt_scale='gt' in mode)
             else:
                 metrics[self.metrics_name + mode] = compute_depth_metrics(
                     self.config.model.params,
                     gt=batch['depth'],
                     pred=depth_pp if 'pp' in mode else depth,
                     use_gt_scale='gt' in mode)
     # Return metrics and extra information
     return {'metrics': metrics, 'inv_depth': inv_depth_pp}
예제 #2
0
 def evaluate_depth(self, batch):
     """Evaluate batch to produce depth metrics."""
     # Get predicted depth
     inv_depths = self.model(batch)["inv_depths"]
     save_image(inv_depths[0], "inv_depths.png")  # new
     depth = inv2depth(inv_depths[0])
     # Post-process predicted depth
     batch["rgb"] = flip_lr(batch["rgb"])
     inv_depths_flipped = self.model(batch)["inv_depths"]
     inv_depth_pp = post_process_inv_depth(inv_depths[0],
                                           inv_depths_flipped[0],
                                           method="mean")
     depth_pp = inv2depth(inv_depth_pp)
     batch["rgb"] = flip_lr(batch["rgb"])
     # Calculate predicted metrics
     metrics = OrderedDict()
     if "depth" in batch:
         for mode in self.metrics_modes:
             metrics[self.metrics_name + mode] = compute_depth_metrics(
                 self.config.model.params,
                 gt=batch["depth"],
                 pred=depth_pp if "pp" in mode else depth,
                 use_gt_scale="gt" in mode,
             )
     # Return metrics and extra information
     return {"metrics": metrics, "inv_depth": inv_depth_pp}
예제 #3
0
def main(args):
    # Get and sort ground-truth and predicted files
    exts = ('npz', 'png')
    gt_files, pred_files = [], []
    for ext in exts:
        gt_files.extend(glob(os.path.join(args.gt_folder, '*.{}'.format(ext))))
        pred_files.extend(
            glob(os.path.join(args.pred_folder, '*.{}'.format(ext))))
    # Sort ground-truth and prediction
    gt_files.sort()
    pred_files.sort()
    # Loop over all files
    metrics = []
    progress_bar = tqdm(zip(gt_files, pred_files), total=len(gt_files))
    for gt, pred in progress_bar:
        # Get and prepare ground-truth and predictions
        gt = torch.tensor(load_depth(gt)).unsqueeze(0).unsqueeze(0)
        pred = torch.tensor(load_depth(pred)).unsqueeze(0).unsqueeze(0)
        # Calculate metrics
        metrics.append(
            compute_depth_metrics(args,
                                  gt,
                                  pred,
                                  use_gt_scale=args.use_gt_scale))
    # Get and print average value
    metrics = (sum(metrics) / len(metrics)).detach().cpu().numpy()
    names = ['abs_rel', 'sqr_rel', 'rmse', 'rmse_log', 'a1', 'a2', 'a3']
    for name, metric in zip(names, metrics):
        print('{} = {}'.format(name, metric))
예제 #4
0
def evaluate_depth_maps(pred_folder, gt_folder, use_gt_scale, **kwargs):
    """
    Calculates depth metrics from a folder of predicted and ground-truth depth files

    Parameters
    ----------
    pred_folder : str
        Folder containing predicted depth maps (.npz with key 'depth')
    gt_folder : str
        Folder containing ground-truth depth maps (.npz with key 'depth')
    use_gt_scale : bool
        Using ground-truth median scaling or not
    kwargs : dict
        Extra parameters for depth evaluation
    """
    # Get and sort ground-truth files
    gt_files = glob(os.path.join(gt_folder, '*.npz'))
    gt_files.sort()
    # Get and sort predicted files
    pred_files = glob(os.path.join(pred_folder, '*.npz'))
    pred_files.sort()
    # Prepare configuration
    config = Namespace(**kwargs)
    # Loop over all files
    metrics = []
    for gt, pred in zip(gt_files, pred_files):
        # Get and prepare ground-truth
        gt = np.load(gt)['depth']
        gt = torch.tensor(gt).unsqueeze(0).unsqueeze(0)
        # Get and prepare predictions
        pred = np.load(pred)['depth']
        pred = torch.tensor(pred).unsqueeze(0).unsqueeze(0)
        # Calculate metrics
        metrics.append(
            compute_depth_metrics(config, gt, pred, use_gt_scale=use_gt_scale))
    # Get and print average value
    metrics = (sum(metrics) / len(metrics)).detach().cpu().numpy()
    names = ['abs_rel', 'sqr_rel', 'rmse', 'rmse_log', 'a1', 'a2', 'a3']
    for name, metric in zip(names, metrics):
        print('{} = {}'.format(name, metric))