# Dataset
# This is for DTU when we parallelise over images
# we do not want to treat different images from same object as
# different objects
cfg['data']['split_model_for_images'] = False
dataset = config.get_dataset(cfg, mode='test', return_idx=True)

# Model
model = config.get_model(cfg, device=device, len_dataset=len(dataset))

checkpoint_io = CheckpointIO(out_dir, model=model)
checkpoint_io.load(cfg['test']['model_file'])

# Generator
generator = config.get_generator(model, cfg, device=device)

torch.manual_seed(0)
# Loader
test_loader = torch.utils.data.DataLoader(dataset,
                                          batch_size=1,
                                          num_workers=0,
                                          shuffle=True)

# Statistics
time_dicts = []
vis_file_dict = {}

# Generate
model.eval()
Exemple #2
0
                             batch_size=1,
                             shuffle=False,
                             repeat_count=1,
                             epoch=1)
# Loader
dataloader = dataset.loader()

model = config.get_model(cfg, dataset=dataset)
dummy_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-08)

checkpoint_io = CheckpointIO(model, dummy_optimizer, checkpoint_dir=out_dir)

checkpoint_io.load(cfg['test']['model_file'])

# Generator
generator = config.get_generator(model, cfg)

# Determine what to generate
generate_mesh = cfg["generation"]["generate_mesh"]
generate_pointcloud = cfg["generation"]["generate_pointcloud"]

if generate_mesh and not hasattr(generator, "generate_mesh"):
    generate_mesh = False
    print("Warning: generator does not support mesh generation.")

if generate_pointcloud and not hasattr(generator, "generate_pointcloud"):
    generate_pointcloud = False
    print("Warning: generator does not support pointcloud generation.")

# Statistics
time_dicts = []