Exemple #1
0
def init_data(args):
    # split the train set into train + val
    # for CIFAR, split 5k for val
    # for ImageNet, split 3k for val
    val_size = 5000 if 'cifar' in args.dataset else 3000
    train_loader, val_loader, _ = get_split_dataset(
        args.dataset, args.batch_size,
        args.n_worker, val_size,
        data_root=args.data_root,
        shuffle=False
    )  # same sampling
    return train_loader, val_loader
Exemple #2
0
        type=float,
        default=0.0,
        help=
        "Distance of camera from origin, default is average of z_far, z_near of dataset (only for non-DTU)",
    )
    parser.add_argument("--fps", type=int, default=30, help="FPS of video")
    return parser


args, conf = util.args.parse_args(extra_args)
args.resume = True

device = util.get_cuda(args.gpu_id[0])

dset = get_split_dataset(args.dataset_format,
                         args.datadir,
                         want_split=args.split,
                         training=False)

data = dset[args.subset]
data_path = data["path"]
print("Data instance loaded:", data_path)

images = data["images"]  # (NV, 3, H, W)

poses = data["poses"]  # (NV, 4, 4)
focal = data["focal"]
if isinstance(focal, float):
    # Dataset implementations are not consistent about
    # returning float or scalar tensor in case of fx=fy
    focal = torch.tensor(focal, dtype=torch.float32)
focal = focal[None]
Exemple #3
0
    )
    parser.add_argument(
        "--fixed_test",
        action="store_true",
        default=None,
        help="Freeze encoder weights and only train MLP",
    )
    return parser


args, conf = util.args.parse_args(extra_args,
                                  training=True,
                                  default_ray_batch_size=128)
device = util.get_cuda(args.gpu_id[0])

dset, val_dset, _ = get_split_dataset(args.dataset_format, args.datadir)
print("dset z_near {}, z_far {}, lindisp {}".format(dset.z_near, dset.z_far,
                                                    dset.lindisp))

net = make_model(conf["model"]).to(device=device)
net.stop_encoder_grad = args.freeze_enc
if args.freeze_enc:
    print("Encoder frozen")
    net.encoder.eval()

renderer = NeRFRenderer.from_conf(
    conf["renderer"],
    lindisp=dset.lindisp,
).to(device=device)

# Parallize
Exemple #4
0
    return parser


args, conf = util.args.parse_args(
    extra_args,
    default_conf="conf/resnet_fine_mv.conf",
    default_expname="shapenet",
)
args.resume = True

device = util.get_cuda(args.gpu_id)

extra_gpus = list(map(int, args.extra_gpus.split()))

dset = get_split_dataset(args.dataset_format,
                         args.datadir,
                         want_split=args.split)
data_loader = torch.utils.data.DataLoader(dset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=8,
                                          pin_memory=False)

output_dir = args.output.strip()
has_output = len(output_dir) > 0

total_psnr = 0.0
total_ssim = 0.0
cnt = 0

if has_output:
    return parser


args, conf = util.args.parse_args(
    extra_args, default_conf="conf/resnet_fine_mv.conf", default_expname="shapenet",
)
args.resume = True

device = util.get_cuda(args.gpu_id[0])

only_load_these_ids = None
if args.viewlist:
    with open(args.viewlist) as f:
        only_load_these_ids = [l.split()[1] for l in f.readlines()]
dset = get_split_dataset(
    args.dataset_format, args.datadir, want_split=args.split, training=False, only_load_these_ids=only_load_these_ids
)
data_loader = torch.utils.data.DataLoader(
    dset, batch_size=1, shuffle=False, num_workers=8, pin_memory=False
)

output_dir = args.output.strip()
has_output = len(output_dir) > 0

total_psnr = 0.0
total_ssim = 0.0
cnt = 0

if has_output:
    finish_path = os.path.join(output_dir, "finish.txt")
    os.makedirs(output_dir, exist_ok=True)