Exemple #1
0
utils.makedirs(args.out_dir)
utils.makedirs(args.checkpoints_dir)

writer = writer.Writer(args)
device = torch.device('cuda', args.device_idx)
torch.set_num_threads(args.n_threads)

# deterministic
torch.manual_seed(args.seed)
cudnn.benchmark = False
cudnn.deterministic = True

# load dataset
template_fp = osp.join('template', 'template.obj')
meshdata = MeshData(args.data_fp,
                    template_fp,
                    split=args.split,
                    test_exp=args.test_exp)
train_loader = DataLoader(meshdata.train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True)
test_loader = DataLoader(meshdata.test_dataset, batch_size=args.batch_size)

# generate/load transform matrices
transform_fp = osp.join(args.data_fp, 'transform.pkl')
if not osp.exists(transform_fp):
    print('Generating transform matrices...')
    mesh = Mesh(filename=template_fp)
    ds_factors = [4, 4, 4, 4]
    _, A, D, U, F = mesh_sampling.generate_transform_matrices(mesh, ds_factors)
    tmp = {'face': F, 'adj': A, 'down_transform': D, 'up_transform': U}
Exemple #2
0
    spiral_indices_list = [
        utils.preprocess_spiral(tmp['face'][idx], seq_length[idx],
                                tmp['vertices'][idx], dilation[idx]).to(device)
        for idx in range(len(tmp['face']) - 1)
    ]
    down_transform_list = [
        utils.to_sparse(down_transform).to(device)
        for down_transform in tmp['down_transform']
    ]
    up_transform_list = [
        utils.to_sparse(up_transform).to(device)
        for up_transform in tmp['up_transform']
    ]

    meshdata = MeshData("data/CoMA",
                        "data/CoMA/template/template.obj",
                        split="interpolation",
                        test_exp="bareteeth")

    mean = meshdata.mean
    std = meshdata.std

    model = AE(3, [32, 32, 32, 64], 16, spiral_indices_list,
               down_transform_list, up_transform_list).to(device)
    checkpoint = torch.load(
        "out/interpolation_exp/checkpoints/checkpoint_300.pt")
    model.load_state_dict(checkpoint["model_state_dict"])
    model.eval()

    print(len(meshdata.train_dataset))
    train_loader = DataLoader(meshdata.train_dataset,
                              batch_size=1,
Exemple #3
0
# TODO read model_checkpoint path from command line
if not torch.cuda.is_available():
    model.load_state_dict(
        torch.load(osp.join(args.checkpoints_dir, 'vae_checkpoint.pt'),
                   map_location=torch.device('cpu'))['model_state_dict'])
else:
    model.load_state_dict(
        torch.load(osp.join(args.checkpoints_dir,
                            'vae_checkpoint.pt'))['model_state_dict'])

model.eval()

# generate random measurement matrix
template_mesh = Mesh(filename=template_fp)
meshdata = MeshData(args.data_fp,
                    template_fp,
                    split=args.split,
                    test_exp=args.test_exp)
test_loader = DataLoader(meshdata.test_dataset, batch_size=args.batch_size)

# get a random test example
for i, data in enumerate(test_loader):
    v = torch.squeeze(data.x[0, :, :])
    if i == 42:  #61
        break

# sample simple measurement matrix, which just subselects a set of vertices,
# this enables to use the a regular norm for the gradient descent
A_diag = np.random.binomial(1, 0.005, template_mesh.v.shape[0])
num_measurements = np.count_nonzero(A_diag)
A = torch.from_numpy(np.diag(A_diag)).float()
Exemple #4
0
]
del tmp['face']
del tmp['vertices']
down_transform_list = [
    utils.to_sparse(down_transform).to(device)
    for down_transform in tmp['down_transform']
]
del tmp['down_transform']
up_transform_list = [
    utils.to_sparse(up_transform).to(device)
    for up_transform in tmp['up_transform']
]
del tmp

meshdata = MeshData(args.data_fp,
                    template_fp,
                    split='interpolation',
                    test_exp='bareteeth')

test_loader = DataLoader(meshdata.test_dataset, batch_size=100, shuffle=True)
mean = meshdata.mean
std = meshdata.std
del meshdata

for i, (data, idx) in enumerate(test_loader):
    batch = data
    if i == 0:
        break
del test_loader

#### AE ####
model_ae = AE(args.in_channels, args.out_channels, args.latent_channels,