def main(model_config,
         dataset_type,
         save_outputs,
         output_dir,
         data_config,
         seed,
         small_run,
         entry,
         device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    # Load the data
    _, val, test = load_data()
    dataset = test if dataset_type == "test" else val

    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {} ({})".format(data_config["data_name"],
                                                       dataset_type))
        evaluate_model_on_dataset(model, dataset, small_run, device, save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(model, dataset, entry, device)
Esempio n. 2
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         dataset_type, entry, device):
    # Load the model
    model = make_model(**model_config)
    model.sinkhorn_opt.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime

    # Load the data
    train, test = load_data(dorn_mode=False)
    dataset = train if dataset_type == "train" else test
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb"], input_["crop"][0, :], input_["depth_cropped"],
        torch.ones_like(input_["depth_cropped"]), device)

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        model.sinkhorn_opt.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                                       datetime.now().strftime('%b%d'),
                                                                       datetime.now().strftime('%H-%M-%S_') + \
                                                                       "densedepth_hist_match_wass"))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config, dataset_type, save_outputs, output_dir, data_config,
         seed, small_run, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)

    # Load the data
    train, test = load_data(dorn_mode=True)
    dataset = test if dataset_type == "test" else train

    print(
        list((name, entry.shape) for name, entry in dataset[0].items()
             if isinstance(entry, torch.Tensor)))
    init_randomness(seed)

    eval_fn = lambda input_, device: model.evaluate(
        input_["bgr"].to(device), input_["bgr_orig"].to(device), input_["crop"]
        [0, :], input_["depth_cropped"].to(device), input_["depth"].to(device),
        torch.ones_like(input_["depth_cropped"]).to(device), device)

    print("Evaluating the model on {} ({})".format(data_config["data_name"],
                                                   dataset_type))
    evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                              save_outputs, output_dir)
Esempio n. 4
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         entry, device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime
    # model.writer = SummaryWriter(log_dir=os.path.join("runs",
    #                                                   datetime.now().strftime('%b%d'),
    #                                                   datetime.now().strftime('%H-%M-%S_') + \
    #                                                   "dorn_sinkhorn_opt"))

    # Load the data
    dataset = load_data(dorn_mode=True)
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb_cropped"].to(device), input_["rgb_cropped_orig"].to(
            device), input_["spad"].to(device), input_["mask_orig"].to(device),
        input_["depth_cropped_orig"].to(device), device)
    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         entry, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)

    # Load the data
    dataset = load_data(dorn_mode=True)
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb_cropped"].to(device), input_["rgb_cropped_orig"].to(
            device), input_["depth_cropped_orig"].to(device), input_[
                "mask_orig"].to(device), device)

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
Esempio n. 6
0
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         dataset_type, entry):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime

    # Load the data
    train, test = load_data(dorn_mode=False)
    dataset = train if dataset_type == "train" else test
    eval_fn = lambda input_, device: model.evaluate(
        input_["rgb"], input_["crop"][0, :], input_["depth_cropped"], input_[
            "rawdepth_cropped"], input_["mask_cropped"],
        torch.ones_like(input_["depth_cropped"]))

    init_randomness(seed)

    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, None,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, None,
                                     save_outputs, output_dir)
def main(model_config, save_outputs, output_dir, data_config, seed, small_run,
         device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    # model.sid_obj.to(device)

    # Load the data
    dataset = load_data()

    init_randomness(seed)

    print("Evaluating the model on {}".format(data_config["data_name"]))
    evaluate_model_on_dataset(model, dataset, small_run, device, save_outputs,
                              output_dir)
Esempio n. 8
0
def main(model_config, dataset_type, save_outputs, output_dir, data_config,
         seed, small_run, entry, device):
    # Load the model
    model = make_model(**model_config)
    model.eval()
    model.to(device)
    model.sid_obj.to(device)
    model.sinkhorn_opt.to(device)

    # Load the data
    train, test = load_data(dorn_mode=True)
    dataset = test if dataset_type == "test" else train

    from tensorboardX import SummaryWriter
    from datetime import datetime

    init_randomness(seed)

    eval_fn = lambda input_, device: model.evaluate(
        input_["bgr"].to(device), input_["bgr_orig"].to(device), input_["crop"]
        [0, :], input_["depth_cropped"].to(device),
        torch.ones_like(input_["depth_cropped"]).to(device), device)

    if entry is None:
        print("Evaluating the model on {} ({}).".format(
            data_config["data_name"], dataset_type))
        ex.observers.append(
            FileStorageObserver.create(os.path.join(output_dir, "runs")))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, device,
                                  save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        model.sinkhorn_opt.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                          datetime.now().strftime('%b%d'),
                                                          datetime.now().strftime('%H-%M-%S_') + \
                                                          "dorn_hist_match_wass"))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, device,
                                     save_outputs, output_dir)
def main(model_config,
         save_outputs,
         output_dir,
         data_config,
         seed,
         small_run,
         entry,
         device):
    # Load the model
    model = make_model(**model_config)
    # model.sid_obj.to(device)
    # print(model)
    model.to(device)

    from tensorboardX import SummaryWriter
    from datetime import datetime
    model.writer = SummaryWriter(log_dir=os.path.join("runs",
                                                      datetime.now().strftime('%b%d'),
                                                      datetime.now().strftime('%H-%M-%S_') + \
                                                      "densedepth_sinkhorn_opt"))

    # Load the data
    dataset = load_data(dorn_mode=False)
    eval_fn = lambda input_, device: model.evaluate(input_["rgb"], # RGB input
                                                    input_["rgb_cropped"], # rgb cropped for intensity scaling
                                                    input_["crop"], # 4-tuple of crop parameters
                                                    input_["spad"],  # simulated SPAD
                                                    input_["mask"], # Cropped mask
                                                    input_["depth_cropped"], # Ground truth depth
                                                    device)
    init_randomness(seed)
    if entry is None:
        print("Evaluating the model on {}.".format(data_config["data_name"]))
        evaluate_model_on_dataset(eval_fn, dataset, small_run, torch_cuda_device, save_outputs, output_dir)
    else:
        print("Evaluating {}".format(entry))
        evaluate_model_on_data_entry(eval_fn, dataset, entry, torch_cuda_device, save_outputs, output_dir)