Esempio n. 1
0
def setup(config):
    mde = ex.get_and_configure(config['mde'])
    # if mde == 'DORN':
    #     img_key = 'dorn_image'
    #     in_type = 'torch'
    #     in_order = 'nchw'
    #     out_type = 'torch'
    #     out_order = 'nchw'
    # elif mde == 'DenseDepth':
    #     img_key = 'image'
    #     in_type = 'numpy'
    #     in_order = 'nhwc'
    #     out_type = 'numpy'
    #     out_order = 'nhwc'
    # elif mde == 'MiDaS':
    #     img_key = 'midas_image'
    #     in_type = 'torch'
    #     in_order = 'nhwc'
    #     out_type = 'numpy'
    #     out_order = 'nhwc'
    return MDE(mde,
               key=config['img_key'],
               in_type=config['in_type'],
               in_order=config['in_order'],
               out_type=config['out_type'],
               out_order=config['out_order'])
def setup(config):
    scene_data = np.load(IN_DATA_DIR/f'{config["scene"]}.npy', allow_pickle=True)[()]
    model = ex.get_and_configure(config['method'])
    transform = Compose([ex.transforms[t] for t in config['transform']])
    if config['gpu'] is not None and torch.cuda.is_available():
        os.environ['CUDA_VISIBLE_DEVICES'] = config['gpu']
        device = torch.device('cuda')
        print(f"Using gpu {config['gpu']} (CUDA_VISIBLE_DEVICES = {os.environ['CUDA_VISIBLE_DEVICES']}).")
    else:
        device = torch.device('cpu')
        print("Using cpu.")
    return CapturedEvaluation(model=model, scene_data=scene_data, transform=transform, device=device)
def setup(config):
    model = ex.get_and_configure(config['method'])
    if config['method'] in ['mde', 'median', 'gt_hist']:
        dataset = NYUDepthv2(split=config['split'])
    else:
        dataset = NYUDepthv2Transient(split=config['split'], sbr=config['sbr'])
    transform = Compose([ex.transforms[t] for t in config['transform']])
    dataset.transform = transform
    if config['gpu'] is not None and torch.cuda.is_available():
        os.environ['CUDA_VISIBLE_DEVICES'] = config['gpu']
        device = torch.device('cuda')
        print(f"Using gpu {config['gpu']} (CUDA_VISIBLE_DEVICES = {os.environ['CUDA_VISIBLE_DEVICES']}).")
    else:
        device = torch.device('cpu')
        print("Using cpu.")
    return NYUv2Evaluation(model=model,
                           dataset=dataset,
                           device=device,
                           pre_cropped=config['pre_cropped'])
Esempio n. 4
0
def setup(config):
    mde_model = ex.get_and_configure('mde')
    preproc = TransientPreprocessor(config['source_n_sid_bins'],
                                    config['n_ambient_bins'],
                                    config['edge_coeff'], config['n_std'])
    if config['refl_est'] == 'gray':
        refl_est = refl_est_gray
    elif config['refl_est'] == 'red':
        refl_est = refl_est_red
    source_disc = SI(config['source_n_sid_bins'], config['source_alpha'],
                     config['source_beta'], config['source_offset'])
    crop = config['crop'] if not config['crop'] == [0, 0, 0, 0] else None
    return MDETransient(mde_model=mde_model,
                        preproc=preproc,
                        refl_est=refl_est,
                        source_disc=source_disc,
                        min_depth=config['min_depth'],
                        max_depth=config['max_depth'],
                        crop=crop,
                        radial=config['radial'],
                        fc=config['fc'],
                        image_key=config['image_key'],
                        transient_key=config['transient_key'])
        self.device = device

    def evaluate(self):
        # Load preprocessed data:
        data = self.transform(self.scene_data)
        data = default_collate([data])
        for k, v in data.items():
            data[k] = v.to(self.device)
        depth = self.model(data)
        pred = {'depth': depth}
        pred['metrics'] = self.compute_metrics(data, pred)
        return pred

    def compute_metrics(self, data, pred):
        p = pred['depth'].cpu().squeeze()
        d = data['depth'].cpu().squeeze()
        m = data['mask'].cpu().squeeze()
        metrics = get_depth_metrics(p, d, m)
        return metrics


if __name__ == '__main__':
    evaluator = ex.get_and_configure('CapturedEvaluation')
    pred = evaluator.evaluate()
    pred['depth'] = pred['depth'].numpy()
    scene = ex.config['scene']
    mde_name = ex.config['mde']
    output_dir = Path(ex.config['output_dir'])/f'{scene}'/f'{mde_name}'
    output_dir.mkdir(parents=True, exist_ok=True)
    np.save(output_dir/f'{ex.config["method"]}.npy', pred)
def setup(config):
    mde_model = ex.get_and_configure('mde')
    return MDEMedian(mde_model, gt_key=config['median_gt_key'])
@ex.setup('median')
def setup(config):
    mde_model = ex.get_and_configure('mde')
    return MDEMedian(mde_model, gt_key=config['median_gt_key'])


@ex.entity
class MDEMedian:
    def __init__(self, mde_model, gt_key, crop=NYUV2_CROP):
        self.mde_model = mde_model
        self.gt_key = gt_key
        self.crop = crop

    def __call__(self, data):
        init = self.mde_model(data)
        if self.crop is not None:
            # Set median based on crop (makes a big difference)
            init_median = torch.median(init[..., self.crop[0]:self.crop[1],
                                            self.crop[2]:self.crop[3]])
        else:
            init_median = torch.median(init)
        init_median = init_median.to(data[self.gt_key].device)
        init = init.to(data[self.gt_key].device)
        pred = init * (torch.median(data[self.gt_key]) / init_median)
        return pred


if __name__ == '__main__':
    from pdb import set_trace
    model = ex.get_and_configure('MDEMedian')
Esempio n. 8
0
        self.in_order = in_order  # Input channel location required by mde
        self.out_type = out_type  # Output type of mde
        self.out_order = out_order  # Output channel location of mde

    def __call__(self, data):
        """
        Wraps the mde so that it takes torch input and produces numpy output
        image_tensor is a torch float tensor in [0, 1] in NCHW format from the torch dataloader
        out is a torch tensor in NCHW format
        """
        img = data[self.key]
        if self.in_order == 'nhwc':
            img = img.permute(0, 2, 3, 1)
        if self.in_type == 'numpy':
            img = img.cpu().numpy()
        out = self.mde(img)
        if self.out_type == 'numpy':
            out = torch.from_numpy(out)
        if self.out_order == 'nhwc':
            out = out.permute(0, 3, 1, 2)
        return out


if __name__ == '__main__':
    from pdb import set_trace
    mde_model = ex.get_and_configure('MDE')
    test = {'image': torch.randn(1, 3, 480, 640)}
    output = mde_model(test)
    print(output.shape)
    print(type(output))
def setup(config):
    mde_model = ex.get_and_configure('mde')
    return MDEGTHist(mde_model, config['gt_hist_gt_key'])
def summarize(all_metric_dicts):
    """Metrics is a list of dictionaries, each with <metric>: value entries"""
    if len(all_metric_dicts) == 0:
        return {}
    summary = {}
    metric_names = all_metric_dicts[0].keys()
    for k in metric_names:
        summary[k] = np.mean([metric_dict[k] for metric_dict in all_metric_dicts])
    return summary

if __name__ == '__main__':
    # parser = configargparse.get_arg_parser()
    # ex.config = vars(parser.parse_args())
    # set_trace()
    evaluator = ex.get_and_configure('NYUv2Evaluation')
    # set_trace()
    print(f"Evaluating {ex.config['mde']} in " + \
          f"{ex.config['method']} mode.")
    preds = evaluator.evaluate()
    summary = summarize([p['metrics'] for p in preds])
    depth_preds_cropped = np.stack([p['depth_cropped'].cpu().squeeze() for p in preds], axis=0)
    config = cfg()
    method_name = ex.config['method']
    mde_name = ex.config['mde']
    output_dir = Path(ex.config['output_dir'])/f'{method_name}'/f'{mde_name}'
    if ex.config['method'] == 'transient':
        output_dir = output_dir/f'sbr_{ex.config["sbr"]}'
    output_dir.mkdir(parents=True, exist_ok=True)
    np.save(output_dir/'summary', summary)
    np.save(output_dir/'preds_cropped', depth_preds_cropped)