Пример #1
0
def create_image_sequence():
    ensure_directory('images')
    frame_index = 0
    viewer = MeshRenderer(size=1080, start_thread=False)
    progress_bar = tqdm(total=SAMPLE_COUNT * TRANSITION_FRAMES)

    for sample_index in range(SAMPLE_COUNT):
        for step in range(TRANSITION_FRAMES):
            code = torch.tensor(
                spline(float(sample_index) + step / TRANSITION_FRAMES),
                dtype=torch.float32,
                device=device)
            if ROTATE_MODEL:
                viewer.rotation = (
                    147 + frame_index /
                    (SAMPLE_COUNT * TRANSITION_FRAMES) * 360 * 6, 40)
            viewer.set_mesh(
                sdf_net.get_mesh(code,
                                 voxel_resolution=128,
                                 sphere_only=False,
                                 level=SURFACE_LEVEL))
            image = viewer.get_image(flip_red_blue=True)
            cv2.imwrite("images/frame-{:05d}.png".format(frame_index), image)
            frame_index += 1
            progress_bar.update()

    print("\n\nUse this command to create a video:\n")
    print(
        'ffmpeg -framerate 30 -i images/frame-%05d.png -c:v libx264 -profile:v high -crf 19 -pix_fmt yuv420p video.mp4'
    )
Пример #2
0
class ImageGrid():
    def __init__(self,
                 width,
                 height=1,
                 cell_width=3,
                 cell_height=None,
                 margin=0.2,
                 create_viewer=True,
                 crop=True):
        print("Plotting...")
        self.width = width
        self.height = height
        cell_height = cell_height if cell_height is not None else cell_width

        self.figure, self.axes = plt.subplots(height,
                                              width,
                                              figsize=(width * cell_width,
                                                       height * cell_height),
                                              gridspec_kw={
                                                  'left': 0,
                                                  'right': 1,
                                                  'top': 1,
                                                  'bottom': 0,
                                                  'wspace': margin,
                                                  'hspace': margin
                                              })
        self.figure.patch.set_visible(False)

        self.crop = crop
        if create_viewer:
            from rendering import MeshRenderer
            self.viewer = MeshRenderer(start_thread=False)
        else:
            self.viewer = None

    def set_image(self, image, x=0, y=0):
        cell = self.axes[
            y, x] if self.height > 1 and self.width > 1 else self.axes[x + y]
        cell.imshow(image)
        cell.axis('off')
        cell.patch.set_visible(False)

    def set_voxels(self, voxels, x=0, y=0, color=None):
        if color is not None:
            self.viewer.model_color = color
        self.viewer.set_voxels(voxels)
        image = self.viewer.get_image(crop=self.crop)
        self.set_image(image, x, y)

    def save(self, filename):
        plt.axis('off')
        extent = self.figure.get_window_extent().transformed(
            self.figure.dpi_scale_trans.inverted())
        plt.savefig(filename, bbox_inches=extent, dpi=400)
        if self.viewer is not None:
            self.viewer.delete_buffers()
Пример #3
0
        indices[BATCH_SIZE // 2:] = torch.tensor(np.random.choice(
            negative_indices, BATCH_SIZE // 2),
                                                 device=device)

        sdf_net.zero_grad()
        predicted_sdf = sdf_net(points[indices, :], latent_code)
        batch_sdf = sdf[indices]
        loss = torch.mean(torch.abs(predicted_sdf - batch_sdf))
        loss.backward()
        optimizer.step()

        if loss.item() < error_targets[image_index]:
            try:
                viewer.set_mesh(
                    sdf_net.get_mesh(latent_code[0, :],
                                     voxel_resolution=64,
                                     raise_on_empty=True))
                if save_images:
                    image = viewer.get_image(flip_red_blue=True)
                    cv2.imwrite("images/frame-{:05d}.png".format(image_index),
                                image)
                image_index += 1
            except ValueError:
                pass
        step += 1
        print('Step {:04d}, Image {:04d}, loss: {:.6f}, target: {:.6f}'.format(
            step, image_index, loss.item(), error_targets[image_index]))
    except KeyboardInterrupt:
        viewer.stop()
        break
Пример #4
0
def create_tsne_plot(codes,
                     voxels=None,
                     labels=None,
                     filename="plot.pdf",
                     indices=None):
    from sklearn.manifold import TSNE
    from matplotlib.offsetbox import OffsetImage, AnnotationBbox

    width, height = 40, 52

    print("Calculating t-sne embedding...")
    tsne = TSNE(n_components=2)
    embedded = tsne.fit_transform(codes)

    print("Plotting...")
    fig, ax = plt.subplots()
    plt.axis('off')
    margin = 0.0128
    plt.margins(margin * height / width, margin)

    x = embedded[:, 0]
    y = embedded[:, 1]
    x = np.interp(x, (x.min(), x.max()), (0, 1))
    y = np.interp(y, (y.min(), y.max()), (0, 1))

    ax.scatter(x, y, c=labels, s=40, cmap='Set1')
    fig.set_size_inches(width, height)

    if voxels is not None:
        print("Creating images...")
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        for i in tqdm(range(voxels.shape[0])):
            viewer.set_voxels(voxels[i, :, :, :].cpu().numpy())
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    if indices is not None:
        print("Creating images...")
        dataset_directories = open('data/models.txt', 'r').readlines()
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        import trimesh
        import logging
        logging.getLogger('trimesh').setLevel(1000000)
        for i in tqdm(range(len(indices))):
            mesh = trimesh.load(
                os.path.join(dataset_directories[index].strip(),
                             'model_normalized.obj'))
            viewer.set_mesh(mesh, center_and_scale=True)
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    print("Saving PDF...")

    extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
    plt.savefig(filename, bbox_inches=extent, dpi=200)
Пример #5
0
        objects = (dataset.labels == label).nonzero()
        indices.append(objects[random.randint(0, objects.shape[0] - 1)].item())

    latent_codes = latent_codes[indices, :]

    plot = ImageGrid(COUNT, 2, create_viewer=False)
    dataset_directories = directories = open('data/models.txt',
                                             'r').readlines()

    for i in range(COUNT):
        mesh = trimesh.load(
            os.path.join(dataset_directories[index].strip(),
                         'model_normalized.obj'))
        viewer.set_mesh(mesh, center_and_scale=True)
        viewer.model_color = dataset.get_color(i)
        image = viewer.get_image(crop=True)
        plot.set_image(image, i, 0)

        image = render_image(sdf_net,
                             latent_codes[i, :],
                             color=dataset.get_color(i),
                             crop=True)
        plot.set_image(image, i, 1)
    viewer.delete_buffers()
    plot.save("plots/deepsdf-reconstruction-classes.pdf")

if "autoencoder" in sys.argv:
    from dataset import dataset as dataset
    dataset.load_voxels(device)
    dataset.load_labels(device)