def main(model_config,
         dataset_type,
         save_outputs,
         output_dir,
         data_config,
         seed,
         small_run,
         entry,
         device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    # Load the data
    _, val, test = load_data()
    dataset = test if dataset_type == "test" else val

    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {} ({})".format(data_config["data_name"],
                                                       dataset_type))
        evaluate_model_on_dataset(model, dataset, small_run, device, save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(model, dataset, entry, device)
Ejemplo n.º 2
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         dataset_type, entry, device):
    # Load the model
    model = make_model(**model_config)
    model.sinkhorn_opt.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime

    # Load the data
    train, test = load_data(dorn_mode=False)
    dataset = train if dataset_type == "train" else test
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb"], input_["crop"][0, :], input_["depth_cropped"],
        torch.ones_like(input_["depth_cropped"]), device)

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        model.sinkhorn_opt.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                                       datetime.now().strftime('%b%d'),
                                                                       datetime.now().strftime('%H-%M-%S_') + \
                                                                       "densedepth_hist_match_wass"))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config, dataset_type, save_outputs, output_dir, data_config,
         seed, small_run, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)

    # Load the data
    train, test = load_data(dorn_mode=True)
    dataset = test if dataset_type == "test" else train

    print(
        list((name, entry.shape) for name, entry in dataset[0].items()
             if isinstance(entry, torch.Tensor)))
    init_randomness(seed)

    eval_fn = lambda input_, device: model.evaluate(
        input_["bgr"].to(device), input_["bgr_orig"].to(device), input_["crop"]
        [0, :], input_["depth_cropped"].to(device), input_["depth"].to(device),
        torch.ones_like(input_["depth_cropped"]).to(device), device)

    print("Evaluating the model on {} ({})".format(data_config["data_name"],
                                                   dataset_type))
    evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                              save_outputs, output_dir)
Ejemplo n.º 4
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         entry, device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime
    # model.writer = SummaryWriter(log_dir=os.path.join("runs",
    #                                                   datetime.now().strftime('%b%d'),
    #                                                   datetime.now().strftime('%H-%M-%S_') + \
    #                                                   "dorn_sinkhorn_opt"))

    # Load the data
    dataset = load_data(dorn_mode=True)
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb_cropped"].to(device), input_["rgb_cropped_orig"].to(
            device), input_["spad"].to(device), input_["mask_orig"].to(device),
        input_["depth_cropped_orig"].to(device), device)
    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         entry, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)

    # Load the data
    dataset = load_data(dorn_mode=True)
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb_cropped"].to(device), input_["rgb_cropped_orig"].to(
            device), input_["depth_cropped_orig"].to(device), input_[
                "mask_orig"].to(device), device)

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
Ejemplo n.º 6
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         dataset_type, entry):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime

    # Load the data
    train, test = load_data(dorn_mode=False)
    dataset = train if dataset_type == "train" else test
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb"], input_["crop"][0, :], input_["depth_cropped"], input_[
            "rawdepth_cropped"], input_["mask_cropped"],
        torch.ones_like(input_["depth_cropped"]))

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, None,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, None,
                                     save_outputs, output_dir)
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    # model.sid_obj.to(device)

    # Load the data
    dataset = load_data()

    init_randomness(seed)

    print("Evaluating the model on {}".format(data_config["data_name"]))
    evaluate_model_on_dataset(model, dataset, small_run, device, save_outputs,
                              output_dir)
def main(model_config, eval_config, data_config, seed, device):
    # Load the model
    model = make_model(**model_config)
    model.to(device)
    print(model)

    # Load the data
    _, val, test = load_data()
    dataset = test if eval_config["dataset"] == "test" else val

    init_randomness(seed)

    # Make dataloader
    dataloader = DataLoader(dataset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=2,
                            pin_memory=True,
                            worker_init_fn=worker_init_randomness)
    if eval_config["mode"] == "save_outputs":
        # Run the model on everything and save everything to disk.
        model.eval()
        safe_makedir(eval_config["output_dir"])
        with torch.no_grad():
            for i, data in enumerate(dataloader):
                print("Evaluating {}".format(i))
                model.write_eval(
                    data,
                    os.path.join(eval_config["output_dir"],
                                 "{}_out.npy".format(i)), device)

    elif eval_config["mode"] == "evaluate_metrics":
        # Load things and call the model's evaluate function on them.
        metrics = model.evaluate_dir(eval_config["output_dir"], device)
        with open(os.path.join(eval_config["output_dir"], "metrics.json"),
                  "w") as f:
            json.dump(metrics, f)
    else:
        print("Unrecognized mode: {}".format(eval_config["mode"]))
Ejemplo n.º 9
0
def main(model_config, dataset_type, save_outputs, output_dir, data_config,
         seed, small_run, entry, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)
    model.sinkhorn_opt.to(device)

    # Load the data
    train, test = load_data(dorn_mode=True)
    dataset = test if dataset_type == "test" else train

    from tensorboardX import SummaryWriter
    from datetime import datetime

    init_randomness(seed)

    eval_fn = lambda input_, device: model.evaluate(
        input_["bgr"].to(device), input_["bgr_orig"].to(device), input_["crop"]
        [0, :], input_["depth_cropped"].to(device),
        torch.ones_like(input_["depth_cropped"]).to(device), device)

    if entry is None:
        print("Evaluating the model on {} ({}).".format(
            data_config["data_name"], dataset_type))
        ex.observers.append(
            FileStorageObserver.create(os.path.join(output_dir, "runs")))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        model.sinkhorn_opt.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                          datetime.now().strftime('%b%d'),
                                                          datetime.now().strftime('%H-%M-%S_') + \
                                                          "dorn_hist_match_wass"))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config,
         save_outputs,
         output_dir,
         data_config,
         seed,
         small_run,
         entry,
         device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime
    model.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                      datetime.now().strftime('%b%d'),
                                                      datetime.now().strftime('%H-%M-%S_') + \
                                                      "densedepth_sinkhorn_opt"))

    # Load the data
    dataset = load_data(dorn_mode=False)
    eval_fn = lambda input_, device: model.evaluate(input_["rgb"], # RGB input
                                                    input_["rgb_cropped"], # rgb cropped for intensity scaling
                                                    input_["crop"], # 4-tuple of crop parameters
                                                    input_["spad"],  # simulated SPAD
                                                    input_["mask"], # Cropped mask
                                                    input_["depth_cropped"], # Ground truth depth
                                                    device)
    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, torch_cuda_device, save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, torch_cuda_device, save_outputs, output_dir)
Ejemplo n.º 11
0
def main(dataset_type, entry, save_outputs, output_dir, seed, small_run,
         device):

    # Load the data
    dataset = load_data(channels_first=False)

    # Load the model
    model = DenseDepth()

    init_randomness(seed)

    if entry is None:
        dataloader = DataLoader(
            dataset,
            batch_size=1,
            shuffle=False,
            num_workers=0,  # needs to be 0 to not crash autograd profiler.
            pin_memory=True)
        # if eval_config["save_outputs"]:

        with torch.no_grad():
            metric_list = [
                "delta1", "delta2", "delta3", "rel_abs_diff", "rmse", "mse",
                "log10", "weight"
            ]
            metrics = np.zeros((len(dataset) if not small_run else small_run,
                                len(metric_list)))
            entry_list = []
            outputs = []
            for i, data in enumerate(dataloader):
                # TESTING
                if small_run and i == small_run:
                    break
                entry = data["entry"][0]
                entry = entry if isinstance(entry, str) else entry.item()
                entry_list.append(entry)
                print("Evaluating {}".format(data["entry"][0]))
                # pred, pred_metrics = model.evaluate(data, device)
                pred, pred_metrics, pred_weight = model.evaluate(
                    data["rgb"].to(device), data["depth_cropped"].to(device),
                    torch.ones_like(data["depth_cropped"]).to(device))
                for j, metric_name in enumerate(metric_list[:-1]):
                    metrics[i, j] = pred_metrics[metric_name]

                metrics[i, -1] = pred_weight
                # Option to save outputs:
                if save_outputs:
                    outputs.append(pred.cpu().numpy())

            if save_outputs:
                np.save(
                    os.path.join(
                        output_dir,
                        "densedepth_{}_outputs.npy".format(dataset_type)),
                    np.concatenate(outputs, axis=0))

            # Save metrics using pandas
            metrics_df = pd.DataFrame(data=metrics,
                                      index=entry_list,
                                      columns=metric_list)
            metrics_df.to_pickle(path=os.path.join(
                output_dir, "densedepth_{}_metrics.pkl".format(dataset_type)))
            # Compute weighted averages:
            average_metrics = np.average(metrics_df.ix[:, :-1],
                                         weights=metrics_df.weight,
                                         axis=0)
            average_df = pd.Series(data=average_metrics,
                                   index=metric_list[:-1])
            average_df.to_csv(os.path.join(
                output_dir,
                "densedepth_{}_avg_metrics.csv".format(dataset_type)),
                              header=True)
            print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
                'd1', 'd2', 'd3', 'rel', 'rms', 'log_10'))
            print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
                  format(average_metrics[0], average_metrics[1],
                         average_metrics[2], average_metrics[3],
                         average_metrics[4], average_metrics[6]))
        print("wrote results to {}".format(output_dir))

    else:
        input_unbatched = dataset.get_item_by_id(entry)
        # for key in ["rgb", "albedo", "rawdepth", "spad", "mask", "rawdepth_orig", "mask_orig", "albedo_orig"]:
        #     input_[key] = input_[key].unsqueeze(0)
        from torch.utils.data._utils.collate import default_collate
        data = default_collate([input_unbatched])

        # Checks
        entry = data["entry"][0]
        entry = entry if isinstance(entry, str) else entry.item()
        print("Entry: {}".format(entry))
        # print("remove_dc: ", model.remove_dc)
        # print("use_intensity: ", model.use_intensity)
        # print("use_squared_falloff: ", model.use_squared_falloff)
        pred, pred_metrics, pred_weight = model.evaluate(
            data["rgb"].to(device), data["depth_cropped"].to(device),
            torch.ones_like(data["depth_cropped"]).to(device))
        if save_outputs:
            np.save(
                os.path.join(output_dir,
                             "{}_{}_out.npy".format(dataset_type, entry)))
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rms', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(pred_metrics["delta1"], pred_metrics["delta2"],
                     pred_metrics["delta3"], pred_metrics["rel_abs_diff"],
                     pred_metrics["rms"], pred_metrics["log10"]))
Ejemplo n.º 12
0
        sgd_iters=400,
        sinkhorn_iters=40,
        sigma=.5,
        lam=1e-2,
        kde_eps=1e-4,
        sinkhorn_eps=1e-4,
        remove_dc=spad_config["dc_count"] > 0.,
        use_intensity=spad_config["use_intensity"],
        use_squared_falloff=spad_config["use_squared_falloff"],
        lr=1e3)
    model.to(device)
    _, _, test = load_data(**data_config, spad_config=spad_config)

    dataloader = DataLoader(test, shuffle=True)
    start = perf_counter()
    init_randomness(95290421)
    input_ = test.get_item_by_id("kitchen_0002/1121")
    for key in [
            "rgb", "rgb_orig", "rawdepth", "spad", "mask", "rawdepth_orig",
            "mask_orig"
    ]:
        input_[key] = input_[key].unsqueeze(0).to(device)
    data_load_time = perf_counter() - start
    print("dataloader: {}".format(data_load_time))
    # print(input_["entry"])
    # print(model.hints_extractor[0].weight)

    # Checks
    print(input_["entry"])
    print("remove_dc: ", model.remove_dc)
    print("use_intensity: ", model.use_intensity)