Пример #1
0
    def __init__(self, cfg_path, pointcloud_n, dataset=None, model_file=None):
        self.cfg = config.load_config(cfg_path, 'configs/default.yaml')
        self.pointcloud_n = pointcloud_n
        self.dataset = dataset
        self.model = config.get_model(self.cfg, dataset)

        # Output directory of psgn model
        out_dir = self.cfg['training']['out_dir']
        # If model_file not specified, use the one from psgn model
        if model_file is None:
            model_file = self.cfg['test']['model_file']
        # Load model
        self.checkpoint_io = CheckpointIO(model=model, checkpoint_dir=out_dir)
        self.checkpoint_io.load(model_file)
                                         batch_size=10,
                                         num_workers=4,
                                         shuffle=False,
                                         collate_fn=data.collate_remove_none,
                                         worker_init_fn=data.worker_init_fn)

# For visualizations
vis_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=12,
                                         shuffle=True,
                                         collate_fn=data.collate_remove_none,
                                         worker_init_fn=data.worker_init_fn)
data_vis = next(iter(vis_loader))

# Model
model = config.get_model(cfg, device=device, dataset=train_dataset)

# Intialize training
npoints = 1000
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
trainer = config.get_trainer(model, optimizer, cfg, device=device)

checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
    load_dict = checkpoint_io.load('model.pt')
except FileExistsError:
    load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
metric_val_best = load_dict.get('loss_val_best',
out_time_file_class = os.path.join(generation_dir, 'time_generation.pkl')

batch_size = cfg['generation']['batch_size']
input_type = cfg['data']['input_type']
vis_n_outputs = cfg['generation']['vis_n_outputs']
mesh_extension = cfg['generation']['mesh_extension']

# Dataset
# This is for DTU when we parallelise over images
# we do not want to treat different images from same object as
# different objects
cfg['data']['split_model_for_images'] = False
dataset = config.get_dataset(cfg, mode='test', return_idx=True)

# Model
model = config.get_model(cfg, device=device, len_dataset=len(dataset))

checkpoint_io = CheckpointIO(out_dir, model=model)
checkpoint_io.load(cfg['test']['model_file'])

# Generator
generator = config.get_generator(model, cfg, device=device)

torch.manual_seed(0)
# Loader
test_loader = torch.utils.data.DataLoader(dataset,
                                          batch_size=1,
                                          num_workers=0,
                                          shuffle=True)

# Statistics
Пример #4
0
                                 epoch=1)
vis_dataset = config.get_dataset('val',
                                 cfg,
                                 batch_size=12,
                                 shuffle=False,
                                 repeat_count=1,
                                 epoch=1)

train_loader = train_dataset.loader()
val_loader = val_dataset.loader()
vis_loader = vis_dataset.loader()

data_vis = next(iter(vis_loader))

# Model
model = config.get_model(cfg, dataset=train_dataset)

# Intialize training
npoints = 1000
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-08)
# optimizer = tf.keras.optimizers.SGD(learning_rate=1e-4, momentum=0.9)

checkpoint_io = CheckpointIO(model, optimizer, model_selection_sign, out_dir)

try:
    checkpoint_io.load('model')
except FileExistsError:
    print("start from scratch")

epoch_it = checkpoint_io.ckpt.epoch_it
it = checkpoint_io.ckpt.it
Пример #5
0
vis_n_outputs = cfg["generation"]["vis_n_outputs"]
if vis_n_outputs is None:
    vis_n_outputs = -1

# Model
# Dataset
dataset = config.get_dataset('test',
                             cfg,
                             batch_size=1,
                             shuffle=False,
                             repeat_count=1,
                             epoch=1)
# Loader
dataloader = dataset.loader()

model = config.get_model(cfg, dataset=dataset)
dummy_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-08)

checkpoint_io = CheckpointIO(model, dummy_optimizer, checkpoint_dir=out_dir)

checkpoint_io.load(cfg['test']['model_file'])

# Generator
generator = config.get_generator(model, cfg)

# Determine what to generate
generate_mesh = cfg["generation"]["generate_mesh"]
generate_pointcloud = cfg["generation"]["generate_pointcloud"]

if generate_mesh and not hasattr(generator, "generate_mesh"):
    generate_mesh = False
Пример #6
0
if cfg['method'] != 'oflow':
    print('This script is only available for Occupancy Flow.')
    exit(0)

# Check if latent space pickle file exists
if not os.path.exists(latent_space_file_path):
    raise FileNotFoundError(("Latent space encoding does not exists: Please "
                             "run encode_latent_motion_space.py before "
                             "generating a motion transfer."))

# Motion from motion_model is transferred to shape_model
model_0 = {'model': '50002_jumping_jacks', 'start_idx': 110}
model_1 = {'model': '50002_light_hopping_loose', 'start_idx': 53}

# Model
model = config.get_model(cfg, device=device)

# Checkpoint
checkpoint_io = CheckpointIO(
    out_dir,
    initialize_from=cfg['model']['initialize_from'],
    initialization_file_name=cfg['model']['initialization_file_name'],
    model=model)
checkpoint_io.load(cfg['test']['model_file'])

# Generator
generator = config.get_generator(model, cfg, device=device)

# Generate
model.eval()
meshes, _ = generator.generate_latent_space_interpolation(
Пример #7
0
exit_after = args.exit_after

model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
    model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
    model_selection_sign = -1
else:
    raise ValueError('model_selection_mode must be '
                     'either maximize or minimize.')

# Output directory
if not os.path.exists(out_dir):
    os.makedirs(out_dir)

model = config.get_model(cfg, device='cuda')
model = torch.nn.DataParallel(model, device_ids=gpus).to(device)

# Intialize training
optimizer = optim.Adam(model.parameters(), lr=1e-4)
trainer = config.get_trainer(model, optimizer, cfg, device=device)

checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
    load_dict = checkpoint_io.load('model.pt')
except FileExistsError:
    load_dict = dict()

epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
metric_val_best = load_dict.get('loss_val_best',
Пример #8
0
# 70% train, 10% val, 20% test
dataset = ct.CTImagesDataset(root,
                             sampled_points=cfg['data']['points_subsample'])
dataset_length = len(dataset)
test_dataset = torch_data.Subset(dataset, list(range(2034, dataset_length)))

# Loader for test_dataset
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=1,
                                          num_workers=0,
                                          shuffle=False,
                                          collate_fn=data.collate_remove_none,
                                          worker_init_fn=data.worker_init_fn)

# Model
model = config.get_model(cfg, device=device, dataset=test_dataset)

# Initialize training
npoints = 1000
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
trainer = config.get_trainer(model, optimizer, cfg, device=device)

checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
    load_dict = checkpoint_io.load('model.pt')
except FileExistsError:
    load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
metric_val_best = load_dict.get('loss_val_best',
Пример #9
0
parser.add_argument('--sor_k', type=int, default=2, help='KNN in SOR')
parser.add_argument('--sor_alpha',
                    type=float,
                    default=1.1,
                    help='Threshold = mean + alpha * std')

args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')

device = torch.device("cuda")

args.threshold = cfg['test']['threshold']
args.input_npoint = cfg['data']['pointcloud_n']

# Model
model = config.get_model(cfg, device=device, dataset=None)
model.load_state_dict(torch.load(cfg['test']['model_file']))

# Generator
generator = config.get_generator(model, cfg, device=device)

# model and generator not updated
model.eval()
for p in model.parameters():
    p.requires_grad = False


def normalize_batch_pc(points):
    """points: [batch, K, 3]"""
    centroid = torch.mean(points, dim=1)  # [batch, 3]
    points -= centroid[:, None, :]  # center, [batch, K, 3]