Esempio n. 1
0
def show_models():
    TRANSITION_TIME = 2
    viewer = MeshRenderer()

    while True:
        for sample_index in range(SAMPLE_COUNT):
            try:
                start = time.perf_counter()
                end = start + TRANSITION_TIME
                while time.perf_counter() < end:
                    progress = min(
                        (time.perf_counter() - start) / TRANSITION_TIME, 1.0)
                    if ROTATE_MODEL:
                        viewer.rotation = (
                            147 +
                            (sample_index + progress) / SAMPLE_COUNT * 360 * 6,
                            40)
                    code = torch.tensor(spline(float(sample_index) + progress),
                                        dtype=torch.float32,
                                        device=device)
                    viewer.set_mesh(
                        sdf_net.get_mesh(code,
                                         voxel_resolution=64,
                                         sphere_only=False,
                                         level=SURFACE_LEVEL))

            except KeyboardInterrupt:
                viewer.stop()
                return
Esempio n. 2
0
def create_image_sequence():
    ensure_directory('images')
    frame_index = 0
    viewer = MeshRenderer(size=1080, start_thread=False)
    progress_bar = tqdm(total=SAMPLE_COUNT * TRANSITION_FRAMES)

    for sample_index in range(SAMPLE_COUNT):
        for step in range(TRANSITION_FRAMES):
            code = torch.tensor(
                spline(float(sample_index) + step / TRANSITION_FRAMES),
                dtype=torch.float32,
                device=device)
            if ROTATE_MODEL:
                viewer.rotation = (
                    147 + frame_index /
                    (SAMPLE_COUNT * TRANSITION_FRAMES) * 360 * 6, 40)
            viewer.set_mesh(
                sdf_net.get_mesh(code,
                                 voxel_resolution=128,
                                 sphere_only=False,
                                 level=SURFACE_LEVEL))
            image = viewer.get_image(flip_red_blue=True)
            cv2.imwrite("images/frame-{:05d}.png".format(frame_index), image)
            frame_index += 1
            progress_bar.update()

    print("\n\nUse this command to create a video:\n")
    print(
        'ffmpeg -framerate 30 -i images/frame-%05d.png -c:v libx264 -profile:v high -crf 19 -pix_fmt yuv420p video.mp4'
    )
Esempio n. 3
0
                                                 device=device)
        indices[BATCH_SIZE // 2:] = torch.tensor(np.random.choice(
            negative_indices, BATCH_SIZE // 2),
                                                 device=device)

        sdf_net.zero_grad()
        predicted_sdf = sdf_net(points[indices, :], latent_code)
        batch_sdf = sdf[indices]
        loss = torch.mean(torch.abs(predicted_sdf - batch_sdf))
        loss.backward()
        optimizer.step()

        if loss.item() < error_targets[image_index]:
            try:
                viewer.set_mesh(
                    sdf_net.get_mesh(latent_code[0, :],
                                     voxel_resolution=64,
                                     raise_on_empty=True))
                if save_images:
                    image = viewer.get_image(flip_red_blue=True)
                    cv2.imwrite("images/frame-{:05d}.png".format(image_index),
                                image)
                image_index += 1
            except ValueError:
                pass
        step += 1
        print('Step {:04d}, Image {:04d}, loss: {:.6f}, target: {:.6f}'.format(
            step, image_index, loss.item(), error_targets[image_index]))
    except KeyboardInterrupt:
        viewer.stop()
        break
Esempio n. 4
0
def create_tsne_plot(codes,
                     voxels=None,
                     labels=None,
                     filename="plot.pdf",
                     indices=None):
    from sklearn.manifold import TSNE
    from matplotlib.offsetbox import OffsetImage, AnnotationBbox

    width, height = 40, 52

    print("Calculating t-sne embedding...")
    tsne = TSNE(n_components=2)
    embedded = tsne.fit_transform(codes)

    print("Plotting...")
    fig, ax = plt.subplots()
    plt.axis('off')
    margin = 0.0128
    plt.margins(margin * height / width, margin)

    x = embedded[:, 0]
    y = embedded[:, 1]
    x = np.interp(x, (x.min(), x.max()), (0, 1))
    y = np.interp(y, (y.min(), y.max()), (0, 1))

    ax.scatter(x, y, c=labels, s=40, cmap='Set1')
    fig.set_size_inches(width, height)

    if voxels is not None:
        print("Creating images...")
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        for i in tqdm(range(voxels.shape[0])):
            viewer.set_voxels(voxels[i, :, :, :].cpu().numpy())
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    if indices is not None:
        print("Creating images...")
        dataset_directories = open('data/models.txt', 'r').readlines()
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        import trimesh
        import logging
        logging.getLogger('trimesh').setLevel(1000000)
        for i in tqdm(range(len(indices))):
            mesh = trimesh.load(
                os.path.join(dataset_directories[index].strip(),
                             'model_normalized.obj'))
            viewer.set_mesh(mesh, center_and_scale=True)
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    print("Saving PDF...")

    extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
    plt.savefig(filename, bbox_inches=extent, dpi=200)
Esempio n. 5
0
    indices = []
    for label in range(COUNT):
        objects = (dataset.labels == label).nonzero()
        indices.append(objects[random.randint(0, objects.shape[0] - 1)].item())

    latent_codes = latent_codes[indices, :]

    plot = ImageGrid(COUNT, 2, create_viewer=False)
    dataset_directories = directories = open('data/models.txt',
                                             'r').readlines()

    for i in range(COUNT):
        mesh = trimesh.load(
            os.path.join(dataset_directories[index].strip(),
                         'model_normalized.obj'))
        viewer.set_mesh(mesh, center_and_scale=True)
        viewer.model_color = dataset.get_color(i)
        image = viewer.get_image(crop=True)
        plot.set_image(image, i, 0)

        image = render_image(sdf_net,
                             latent_codes[i, :],
                             color=dataset.get_color(i),
                             crop=True)
        plot.set_image(image, i, 1)
    viewer.delete_buffers()
    plot.save("plots/deepsdf-reconstruction-classes.pdf")

if "autoencoder" in sys.argv:
    from dataset import dataset as dataset
    dataset.load_voxels(device)