def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DenseDepth",
        "model_params": {
            "existing": os.path.join("models", "nyu.h5"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = None  # Keep as None
    save_outputs = True
    seed = 95290421  # changing seed does not impact evaluation
    small_run = 0
    dataset_type = "test"
    entry = None

    # print(data_config.keys())
    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        "{}_{}".format(dataset_type, small_run),
        model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(
        FileStorageObserver.create(os.path.join(output_dir, "runs")))

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
コード例 #2
0
def cfg(data_config, spad_config):
    model_config = {                            # Load pretrained model for testing
        "model_name": "SinkhornOpt",
        "model_params": {
            "sgd_iters": 300,
            "sinkhorn_iters": 40,
            "sigma": 0.5,
            "lam": 2e1,
            "kde_eps": 1e-4,
            "sinkhorn_eps": 1e-7,
            "dc_eps": 1e-5,
            "remove_dc": spad_config["dc_count"] > 0.,
            "use_intensity": spad_config["use_intensity"],
            "use_squared_falloff": spad_config["use_squared_falloff"],
            "lr": 1e5,
            "sid_bins": data_config["sid_bins"],
            "offset": data_config["offset"],
            "min_depth": data_config["min_depth"],
            "max_depth": data_config["max_depth"],
            "alpha": data_config["alpha"],
            "beta": data_config["beta"],
        },
        "model_state_dict_fn": None             # Keep as None
    }
    ckpt_file = None                            # Keep as None
    save_outputs = True
    seed = 95290421
    small_run = 0
    entry = None
    pdict = model_config["model_params"]
    comment = "_".join(["sgd_iters_{}".format(pdict["sgd_iters"]),
                        "sinkhorn_iters_{}".format(pdict["sinkhorn_iters"]),
                        "sigma_{}".format(pdict["sigma"]),
                        "lam_{}".format(pdict["lam"]),
                        "kde_eps_{}".format(pdict["kde_eps"]),
                        "sinkhorn_eps_{}".format(pdict["sinkhorn_eps"]),
                        ])
    del pdict
    # print(data_config.keys())
    fullcomment = comment + "_" + spad_config["spad_comment"]
    output_dir = os.path.join("results",
                              data_config["data_name"],    # e.g. nyu_depth_v2
                              "test_{}".format(small_run),
                              model_config["model_name"])  # e.g. DORN_nyu_nohints
    if fullcomment is not "":
        output_dir = os.path.join(output_dir, fullcomment)

    safe_makedir(output_dir)
    ex.observers.append(FileStorageObserver.create(os.path.join(output_dir, "runs")))

    # Devices are for pytorch.
    cuda_device = "0"                       # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = tf_cuda_device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(device,
                                                                os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)
        del model_update, _  # So sacred doesn't collect them.
コード例 #3
0
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DORN_nyu_hints_Unet",
        "model_params": {
            "hints_len":
            68,
            "spad_weight":
            1.,
            "in_channels":
            3,
            "in_height":
            257,
            "in_width":
            353,
            "sid_bins":
            data_config["sid_bins"],
            "offset":
            data_config["offset"],
            "min_depth":
            data_config["min_depth"],
            "max_depth":
            data_config["max_depth"],
            "alpha":
            data_config["alpha"],
            "beta":
            data_config["beta"],
            "frozen":
            True,
            "pretrained":
            True,
            "state_dict_file":
            os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = "checkpoints/Mar15/04-10-54_DORN_nyu_hints_nyu_depth_v2/checkpoint_epoch_9_name_fixed.pth.tar"
    # ckpt_file = None # Bayesian hints eval
    dataset_type = "val"
    save_outputs = True
    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        model_config["model_name"],  # e.g. DORN_nyu_nohints
        dataset_type)
    seed = 95290421
    small_run = False

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
コード例 #4
0
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DenseDepthHistogramMatchingWasserstein",
        "model_params": {
            "sgd_iters": 100,
            "sinkhorn_iters": 40,
            "sigma": 0.5,
            "lam": 1e1,
            "kde_eps": 1e-4,
            "sinkhorn_eps": 1e-7,
            "dc_eps": 1e-5,
            "lr": 1e5,
            "min_depth": 0.,
            "max_depth": 10.,
            "sid_bins": 68,
            "offset": 0.,
            "alpha": 0.6569154266167957,
            "beta": 9.972175646365525,
            "existing": os.path.join("models", "nyu.h5"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = None  # Keep as None
    save_outputs = True
    seed = 95290421  # changing seed does not impact evaluation
    small_run = 0
    dataset_type = "test"
    entry = None

    # print(data_config.keys())
    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        "{}_{}".format(dataset_type, small_run),
        model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(
        FileStorageObserver.create(os.path.join(output_dir, "runs")))

    cuda_device = "0,1"  # The visible gpus. First one is the tensorflow, second one is pytorch.
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DORN_nyu_nohints",
        "model_params": {
            "in_channels":
            3,
            "in_height":
            257,
            "in_width":
            353,
            "frozen":
            True,
            "pretrained":
            True,
            "state_dict_file":
            os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None
    }
    ckpt_file = None  # Keep as None
    save_outputs = True
    seed = 95290421
    small_run = 0
    entry = None

    # hyperparams = ["sgd_iters", "sinkhorn_iters", "sigma", "lam", "kde_eps", "sinkhorn_eps"]
    pdict = model_config["model_params"]
    del pdict

    # print(data_config.keys())
    output_dir = os.path.join(
        "results",
        data_config["data_name"],  # e.g. nyu_depth_v2
        "{}_{}".format("test", small_run),
        model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(
        FileStorageObserver.create(os.path.join(output_dir, "runs")))

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)
        del model_update, _  # So sacred doesn't collect them.
コード例 #6
0
def cfg(data_config):
    model_config = {                            # Load pretrained model for testing
        "model_name": "DORN_median_matching",
        "model_params": {
            "in_channels": 3,
            "in_height": 257,
            "in_width": 353,
            "sid_bins": 68,
            "offset": 0.,
            "min_depth": 0.,
            "max_depth": 10.,
            "alpha": 0.6569154266167957,
            "beta": 9.972175646365525,
            "frozen": True,
            "pretrained": True,
            "state_dict_file": os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None
    }

    ckpt_file = None                            # Keep as None
    dataset_type = "test"
    save_outputs = True
    seed = 95290421
    small_run = 0

    # print(data_config.keys())
    output_dir = os.path.join("results",
                              data_config["data_name"],    # e.g. nyu_depth_v2
                              "{}_{}".format(dataset_type, small_run),
                              model_config["model_name"])  # e.g. DORN_nyu_nohints

    safe_makedir(output_dir)
    ex.observers.append(FileStorageObserver.create(os.path.join(output_dir, "runs")))

    cuda_device = "0"                       # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(device,
                                                                os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
コード例 #7
0
def cfg(data_config):
    model_config = {}  # To be loaded from the checkpoint file.
    ckpt_file = "checkpoints/Mar07/02-18-30_DenoisingUnetModel_cifar10/checkpoint_epoch_0.pth.tar"
    eval_config = {
        "dataset": "val",  # {val, test}
        "mode": "save_outputs",  # {save_outputs, evaluate_metrics}
        "output_dir": "cifar10_eval"
    }
    seed = 95290421

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.
コード例 #8
0
def cfg(data_config):
    model_config = {  # Load pretrained model for testing
        "model_name": "DORN_bayesian_opt",
        "model_params": {
            "sgd_iters":
            20,
            "lr":
            1e-3,
            "hints_len":
            68,
            "spad_weight":
            1.,
            "in_channels":
            3,
            "in_height":
            257,
            "in_width":
            353,
            "sid_bins":
            data_config["sid_bins"],
            "offset":
            data_config["offset"],
            "min_depth":
            data_config["min_depth"],
            "max_depth":
            data_config["max_depth"],
            "alpha":
            data_config["alpha"],
            "beta":
            data_config["beta"],
            "frozen":
            True,
            "pretrained":
            True,
            "state_dict_file":
            os.path.join("models", "torch_params_nyuv2_BGR.pth.tar"),
        },
        "model_state_dict_fn": None  # Keep as None
    }
    ckpt_file = None  # Keep as None
    dataset_type = "val"
    eval_config = {
        "save_outputs":
        True,
        "evaluate_metrics":
        True,
        "output_dir":
        os.path.join("data", "results", model_config["model_name"],
                     dataset_type),
        "entry":
        None  # If we want to evaluate on a single entry
    }
    seed = 95290421
    small_run = False

    cuda_device = "0"  # The gpu index to run on. Should be a string
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
    # print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("using device: {} (CUDA_VISIBLE_DEVICES = {})".format(
        device, os.environ["CUDA_VISIBLE_DEVICES"]))
    if ckpt_file is not None:
        model_update, _, _ = load_checkpoint(ckpt_file)
        model_config.update(model_update)

        del model_update, _  # So sacred doesn't collect them.