Exemplo n.º 1
0
    def main():
        meshplot.offline()

        import argparse
        import os
        parser = argparse.ArgumentParser(
            description='Geometry tools for overfitSDF')
        parser.add_argument('input_mesh', help='path to input mesh')

        args = parser.parse_args()

        mesh = Mesh(meshPath=args.input_mesh)

        # first test mesh is loaded correctly
        mesh.show()

        sdf = SDF(mesh)

        cubeMarcher = CubeMarcher()
        grid = cubeMarcher.createGrid(64)

        S = sdf.query(grid)

        #marchedMesh = cubeMarcher.march(grid, S)
        '''
Exemplo n.º 2
0
def sample(network, save_manager, latent_size, name="", nb_samples=10):
    mp.offline()

    example = torch.ones(latent_size)

    samples = []
    for i in range(nb_samples):
        samples.append(torch.rand_like(example))
    samples = torch.stack(samples, dim=0)

    out_points_list, out_batch_list = network.decode(samples)
    out_points = out_points_list[0]
    out_batch = out_batch_list[0]

    plot = None
    for i in range(nb_samples):
        cloud_out = out_points[out_batch == i].detach().numpy()

        if plot is None:
            plot = mp.subplot(cloud_out,
                              c=cloud_out[:, 0],
                              s=[nb_samples, 1, i],
                              shading={"point_size": 0.2})
        else:
            mp.subplot(cloud_out,
                       c=cloud_out[:, 0],
                       data=plot,
                       s=[nb_samples, 1, i],
                       shading={"point_size": 0.2})

    save_manager.save_mesh_plot(plot, "sample_{}".format(name))
def sample_dataset(save_manager, data_generator, nb_files=10):
    mp.offline()
    train_loader, val_loader = data_generator.generate_loaders()

    i = 0
    for batch_obj in train_loader:
        if i == nb_files:
            break

        points = batch_obj.pos
        batch = batch_obj.batch

        plot = None
        nb_clouds = max(batch) + 1
        for j in range(nb_clouds):
            current_points = points[batch == j].detach().numpy()
            if plot is None:
                plot = mp.subplot(
                    current_points, c=current_points[:, 0],
                    s=[nb_clouds, 1, j], shading={"point_size": 0.2}
                )
            else:
                mp.subplot(
                    current_points, c=current_points[:, 0],
                    data=plot, s=[nb_clouds, 1, j], shading={"point_size": 0.2}
                )

        i += 1

        save_manager.save_mesh_plot(plot, "dataset_samples{}".format(i))
 def save_html(self, file_stem, output_folder):
     v, f, facet_colors = self.load_data(file_stem)
     if v is None:
         print(f"The data for {file_stem} could not be loaded.  Skipping")
         return   
     output_pathname = output_folder / (file_stem + ".html")
     mp.offline()
     p = mp.plot(v, f, c=facet_colors)
     p.save(str(output_pathname))
Exemplo n.º 5
0
def vae_clouds(network, data_generator, save_manager, name=""):
    mp.offline()
    train_loader, val_loader = data_generator.generate_loaders()

    for batch in val_loader:
        in_points_list, in_batch_list, mean, variance = network.encode(batch)
        out_points_list, out_batch_list = network.decode(mean)

        batch_size = torch.max(in_batch_list[0]) + 1

        plot = None
        for j in range(batch_size):
            cloud_in = in_points_list[0][in_batch_list[0] == j].detach().numpy()
            cloud_out = out_points_list[0][out_batch_list[0] == j].detach().numpy()

            if plot is None:
                plot = mp.subplot(
                    cloud_in, c=cloud_in[:, 0],
                    s=[6*batch_size, 1, 6*j], shading={"point_size": 0.2}
                )
            else:
                mp.subplot(
                    cloud_in, c=cloud_in[:, 0],
                    data=plot, s=[6*batch_size, 1, 6*j], shading={"point_size": 0.2}
                )
            mp.subplot(
                cloud_out, c=cloud_out[:, 0],
                data=plot, s=[6*batch_size, 1, 6*j+1], shading={"point_size": 0.2}
            )

            cloud_in = in_points_list[1][in_batch_list[1] == j].detach().numpy()
            cloud_out = out_points_list[1][out_batch_list[1] == j].detach().numpy()

            mp.subplot(
                cloud_in, c=cloud_in[:, 0],
                data=plot, s=[6 * batch_size, 1, 6 * j + 2], shading={"point_size": 0.2}
            )
            mp.subplot(
                cloud_out, c=cloud_out[:, 0],
                data=plot, s=[6 * batch_size, 1, 6 * j + 3], shading={"point_size": 0.2}
            )

            cloud_in = in_points_list[2][in_batch_list[2] == j].detach().numpy()
            cloud_out = out_points_list[2][out_batch_list[2] == j].detach().numpy()

            mp.subplot(
                cloud_in, c=cloud_in[:, 0],
                data=plot, s=[6 * batch_size, 1, 6 * j + 4], shading={"point_size": 0.2}
            )
            mp.subplot(
                cloud_out, c=cloud_out[:, 0],
                data=plot, s=[6 * batch_size, 1, 6 * j + 5], shading={"point_size": 0.2}
            )

        save_manager.save_mesh_plot(plot, "auto_encode_clouds_{}".format(name))
Exemplo n.º 6
0
def morph(network, data_generator, save_manager, name="", nb_steps=5):
    mp.offline()
    train_loader, val_loader = data_generator.generate_loaders()

    for batch in val_loader:
        in_points_list, in_batch_list, mean, variance = network.encode(batch)

        print(mean.size(0))
        for i in range(mean.size(0) - 1):
            mean1 = mean[i]
            for j in range(mean.size(0) - 1 - i):
                mean2 = mean[i + 1 + j]

                interpol_means = []
                for alpha in np.arange(0.0, 1.0 + 1 / nb_steps, 1 / nb_steps):
                    if alpha >= 1.001:
                        break

                    interpol_means.append((1 - alpha) * mean1 + alpha * mean2)

                interpol_stack = torch.stack(interpol_means, dim=0)

                out_points_list, out_batch_list = network.decode(
                    interpol_stack)
                batch_size = torch.max(out_batch_list[0]) + 1
                print(batch_size)

                plot = None
                for plot_nb in range(batch_size):
                    cloud_out = out_points_list[0][out_batch_list[0] ==
                                                   plot_nb].detach().numpy()

                    if plot is None:
                        plot = mp.subplot(cloud_out,
                                          c=cloud_out[:, 0],
                                          s=[batch_size, 1, plot_nb],
                                          shading={"point_size": 0.2})
                    else:
                        mp.subplot(cloud_out,
                                   c=cloud_out[:, 0],
                                   data=plot,
                                   s=[batch_size, 1, plot_nb],
                                   shading={"point_size": 0.2})

                print(i)
                print(j)
                nb = (mean.size(0) - 1) * i + j
                print(nb)
                save_manager.save_mesh_plot(plot,
                                            "morph_{}_{}".format(name, nb))

        break
Exemplo n.º 7
0
import numpy as np
import igl
import meshplot as mp
import wildmeshing as wm

mp.offline()

v = np.array([
    [-10., -0.5, -50],
    [-10., 0.5, -50],
    [10., 0.5, -50],
    [10., -0.5, -50],
    #
    [-10., -0.5, 50],
    [-10., 0.5, 50],
    [10., 0.5, 50],
    [10., -0.5, 50]
])

f = np.array([[0, 1, 2], [2, 3, 0], [4, 6, 5], [6, 4, 7], [2, 6, 7], [3, 2, 7],
              [1, 5, 6], [1, 6, 2], [1, 4, 5], [4, 1, 0], [0, 7, 4], [7, 0,
                                                                      3]])

# igl.write_triangle_mesh("bbb.obj", v, f)
# p = mp.plot(v, f, shading={"wireframe": True, "point_size": 5}, return_plot=True, filename="plot.html")

# wm.tetrahedralize("bbb.obj", "test.mesh", edge_length_r=0.0263)

n_v = -1
index = -1
with open("test.mesh", "r") as in_file:
import igl
import meshplot
meshplot.offline()

import numpy as np

import os
root_folder = os.getcwd()

v, f = igl.read_triangle_mesh(
    os.path.join(root_folder, "data", "armadillo.obj"))

#sample points on a 64x64x64 grid
n = 64
K = np.linspace(-1.0, 1.0, n)
pts = np.array([[x, y, z] for x in K for y in K for z in K])

S, _, _ = igl.signed_distance(
    pts, v, f, sign_type=igl.SIGNED_DISTANCE_TYPE_FAST_WINDING_NUMBER)

nV, nF = igl.marching_cubes(S, pts, n, n, n, 0.0)

meshplot.plot(nV, nF)
Exemplo n.º 9
0
def main():
    meshplot.offline()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    parser = argparse.ArgumentParser()
    # TODO: change default values
    parser.add_argument("-i", "--input", help="Path to .npy file. Default: 'data/chair.npy'",
                        default='data/chair.npy')
    parser.add_argument("-e", "--epochs", type=int, help="Number of training epochs. Default: 50", default=100)
    parser.add_argument("-b", "--batch", type=int, help="Batch size. Default: 5000", default=16384)
    parser.add_argument("-r", "--rate", type=float, help="learning rate. Default: 1e-4", default=1e-4)
    args = parser.parse_args()

    file_path = args.input
    n_epochs = args.epochs
    lr = args.rate
    bs = args.batch

    with open(file_path, 'rb') as f:
        xyz = np.load(f)
        dataset_size = xyz.shape[0]
        features = torch.from_numpy(xyz)
        labels = np.load(f)
    # balanced sampling
    sampler = get_balancedsampler(labels)
    dataset = TensorDataset(features, torch.from_numpy(labels))
    trainset, valset, testset = random_split(dataset, [250000, 10000, 40000])

    train_loader = DataLoader(
        trainset,
        shuffle=True,
        batch_size=bs)

    validation_loader = DataLoader(
        valset,
        shuffle=False,
        batch_size=bs
    )

    test_loader = DataLoader(
        testset, shuffle=False, batch_size=bs
    )

    model = SingleShapeSDF([512, 512, 512]).to(device)

    # loss_fn = torch.nn.MSELoss(reduction='sum')
    loss_fn = deepsdfloss
    # test_overfitting(model, train_loader, loss_fn)
    train_history, validation_history = test_training(model, train_loader, validation_loader, loss_fn,
                                                      n_epochs=n_epochs, learning_rate=lr)

    model.eval()
    test_loss = 0
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            x, y = data[0].to(device), data[1].unsqueeze(1).to(device)
            y_pred = model.forward(x)
            loss = loss_fn(y_pred, y)
            test_loss += loss.item()
        data = next(iter(test_loader))
        x, y = data[0].to(device), data[1].unsqueeze(1).to(device)
        y_pred = model.forward(x)
        meshplot.plot(x.cpu().numpy(), c=y_pred.cpu().numpy(), shading={"point_size": 0.2}, filename="debug/predicted.html")
        meshplot.plot(x.cpu().numpy(), c=y.cpu().numpy(), shading={"point_size": 0.2}, filename="debug/target.html")
    print(f"TEST LOSS: {test_loss/4}")





    plot_training_curve(train_history, validation_history, test_loss)

    # TODO: validation with another metric (not deepsdf loss)
    # TODO: what metric do they use in the paper?




    visualize_voxels(model, grid_res=20)
    visualize_marchingcubes(model, grid_res=100)

    torch.save(model.state_dict(), "SingleShapeSDF-512.pt")