Ejemplo n.º 1
0
    def __init__(self,
                 width,
                 height=1,
                 cell_width=3,
                 cell_height=None,
                 margin=0.2,
                 create_viewer=True,
                 crop=True):
        print("Plotting...")
        self.width = width
        self.height = height
        cell_height = cell_height if cell_height is not None else cell_width

        self.figure, self.axes = plt.subplots(height,
                                              width,
                                              figsize=(width * cell_width,
                                                       height * cell_height),
                                              gridspec_kw={
                                                  'left': 0,
                                                  'right': 1,
                                                  'top': 1,
                                                  'bottom': 0,
                                                  'wspace': margin,
                                                  'hspace': margin
                                              })
        self.figure.patch.set_visible(False)

        self.crop = crop
        if create_viewer:
            from rendering import MeshRenderer
            self.viewer = MeshRenderer(start_thread=False)
        else:
            self.viewer = None
Ejemplo n.º 2
0
    def show(self):
        from rendering import MeshRenderer
        import time
        from tqdm import tqdm

        viewer = MeshRenderer()
        for item in tqdm(self):
            viewer.set_voxels(item.numpy())
            time.sleep(0.5)
Ejemplo n.º 3
0
class ImageGrid():
    def __init__(self,
                 width,
                 height=1,
                 cell_width=3,
                 cell_height=None,
                 margin=0.2,
                 create_viewer=True,
                 crop=True):
        print("Plotting...")
        self.width = width
        self.height = height
        cell_height = cell_height if cell_height is not None else cell_width

        self.figure, self.axes = plt.subplots(height,
                                              width,
                                              figsize=(width * cell_width,
                                                       height * cell_height),
                                              gridspec_kw={
                                                  'left': 0,
                                                  'right': 1,
                                                  'top': 1,
                                                  'bottom': 0,
                                                  'wspace': margin,
                                                  'hspace': margin
                                              })
        self.figure.patch.set_visible(False)

        self.crop = crop
        if create_viewer:
            from rendering import MeshRenderer
            self.viewer = MeshRenderer(start_thread=False)
        else:
            self.viewer = None

    def set_image(self, image, x=0, y=0):
        cell = self.axes[
            y, x] if self.height > 1 and self.width > 1 else self.axes[x + y]
        cell.imshow(image)
        cell.axis('off')
        cell.patch.set_visible(False)

    def set_voxels(self, voxels, x=0, y=0, color=None):
        if color is not None:
            self.viewer.model_color = color
        self.viewer.set_voxels(voxels)
        image = self.viewer.get_image(crop=self.crop)
        self.set_image(image, x, y)

    def save(self, filename):
        plt.axis('off')
        extent = self.figure.get_window_extent().transformed(
            self.figure.dpi_scale_trans.inverted())
        plt.savefig(filename, bbox_inches=extent, dpi=400)
        if self.viewer is not None:
            self.viewer.delete_buffers()
Ejemplo n.º 4
0
def show_models():
    TRANSITION_TIME = 2
    viewer = MeshRenderer()

    while True:
        for sample_index in range(SAMPLE_COUNT):
            try:
                start = time.perf_counter()
                end = start + TRANSITION_TIME
                while time.perf_counter() < end:
                    progress = min(
                        (time.perf_counter() - start) / TRANSITION_TIME, 1.0)
                    if ROTATE_MODEL:
                        viewer.rotation = (
                            147 +
                            (sample_index + progress) / SAMPLE_COUNT * 360 * 6,
                            40)
                    code = torch.tensor(spline(float(sample_index) + progress),
                                        dtype=torch.float32,
                                        device=device)
                    viewer.set_mesh(
                        sdf_net.get_mesh(code,
                                         voxel_resolution=64,
                                         sphere_only=False,
                                         level=SURFACE_LEVEL))

            except KeyboardInterrupt:
                viewer.stop()
                return
Ejemplo n.º 5
0
def create_image_sequence():
    ensure_directory('images')
    frame_index = 0
    viewer = MeshRenderer(size=1080, start_thread=False)
    progress_bar = tqdm(total=SAMPLE_COUNT * TRANSITION_FRAMES)

    for sample_index in range(SAMPLE_COUNT):
        for step in range(TRANSITION_FRAMES):
            code = torch.tensor(
                spline(float(sample_index) + step / TRANSITION_FRAMES),
                dtype=torch.float32,
                device=device)
            if ROTATE_MODEL:
                viewer.rotation = (
                    147 + frame_index /
                    (SAMPLE_COUNT * TRANSITION_FRAMES) * 360 * 6, 40)
            viewer.set_mesh(
                sdf_net.get_mesh(code,
                                 voxel_resolution=128,
                                 sphere_only=False,
                                 level=SURFACE_LEVEL))
            image = viewer.get_image(flip_red_blue=True)
            cv2.imwrite("images/frame-{:05d}.png".format(frame_index), image)
            frame_index += 1
            progress_bar.update()

    print("\n\nUse this command to create a video:\n")
    print(
        'ffmpeg -framerate 30 -i images/frame-%05d.png -c:v libx264 -profile:v high -crf 19 -pix_fmt yuv420p video.mp4'
    )
Ejemplo n.º 6
0
import torch.optim as optim
import torch.nn.functional as F

import numpy as np
from itertools import count
import time
import random
from tqdm import tqdm
import sys

from model.sdf_net import SDFNet, LATENT_CODE_SIZE, LATENT_CODES_FILENAME
from util import device

if "nogui" not in sys.argv:
    from rendering import MeshRenderer
    viewer = MeshRenderer()

POINTCLOUD_SIZE = 200000

points = torch.load('data/sdf_points.to').to(device)
sdf = torch.load('data/sdf_values.to').to(device)

MODEL_COUNT = points.shape[0] // POINTCLOUD_SIZE
BATCH_SIZE = 20000
SDF_CUTOFF = 0.1
sdf.clamp_(-SDF_CUTOFF, SDF_CUTOFF)
signs = sdf.cpu().numpy() > 0

SIGMA = 0.01

LOG_FILE_NAME = "plots/sdf_net_training.csv"
Ejemplo n.º 7
0
               linewidths=1,
               edgecolors=(0.1, 0.1, 0.1, 1.0),
               zorder=3)

    fig.savefig(filename,
                bbox_inches=Bbox([[0, 0], [size_inches, size_inches]]),
                dpi=dpi)
    plt.close(fig)


frame_latent_codes = torch.tensor(frame_latent_codes,
                                  dtype=torch.float32,
                                  device=device)

print("Rendering...")
viewer = MeshRenderer(size=1080, start_thread=False)


def render_frame(frame_index):
    viewer.model_color = frame_colors[frame_index, :]
    with torch.no_grad():
        if USE_VAE:
            viewer.set_voxels(vae.decode(frame_latent_codes[frame_index, :]))
        else:
            viewer.set_mesh(
                sdf_net.get_mesh(frame_latent_codes[frame_index, :],
                                 voxel_resolution=128,
                                 sphere_only=True,
                                 level=SURFACE_LEVEL))
    image_mesh = viewer.get_image(flip_red_blue=True)
Ejemplo n.º 8
0
from model.sdf_net import SDFNet
from rendering import MeshRenderer
import sys
import cv2
LATENT_CODE_SIZE = 0

MODEL_PATH = 'examples/chair.obj'

mesh = trimesh.load(MODEL_PATH)
points, sdf = sample_sdf_near_surface(mesh)

save_images = 'save' in sys.argv

if save_images:
    viewer = MeshRenderer(start_thread=False, size=1080)
    ensure_directory('images')
else:
    viewer = MeshRenderer()

points = torch.tensor(points, dtype=torch.float32, device=device)
sdf = torch.tensor(sdf, dtype=torch.float32, device=device)
sdf.clamp_(-0.1, 0.1)

sdf_net = SDFNet(latent_code_size=LATENT_CODE_SIZE).to(device)
optimizer = torch.optim.Adam(sdf_net.parameters(), lr=1e-5)

BATCH_SIZE = 20000
latent_code = torch.zeros((BATCH_SIZE, LATENT_CODE_SIZE), device=device)
indices = torch.zeros(BATCH_SIZE, dtype=torch.int64, device=device)
Ejemplo n.º 9
0
def create_tsne_plot(codes,
                     voxels=None,
                     labels=None,
                     filename="plot.pdf",
                     indices=None):
    from sklearn.manifold import TSNE
    from matplotlib.offsetbox import OffsetImage, AnnotationBbox

    width, height = 40, 52

    print("Calculating t-sne embedding...")
    tsne = TSNE(n_components=2)
    embedded = tsne.fit_transform(codes)

    print("Plotting...")
    fig, ax = plt.subplots()
    plt.axis('off')
    margin = 0.0128
    plt.margins(margin * height / width, margin)

    x = embedded[:, 0]
    y = embedded[:, 1]
    x = np.interp(x, (x.min(), x.max()), (0, 1))
    y = np.interp(y, (y.min(), y.max()), (0, 1))

    ax.scatter(x, y, c=labels, s=40, cmap='Set1')
    fig.set_size_inches(width, height)

    if voxels is not None:
        print("Creating images...")
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        for i in tqdm(range(voxels.shape[0])):
            viewer.set_voxels(voxels[i, :, :, :].cpu().numpy())
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    if indices is not None:
        print("Creating images...")
        dataset_directories = open('data/models.txt', 'r').readlines()
        from rendering import MeshRenderer
        viewer = MeshRenderer(start_thread=False)
        import trimesh
        import logging
        logging.getLogger('trimesh').setLevel(1000000)
        for i in tqdm(range(len(indices))):
            mesh = trimesh.load(
                os.path.join(dataset_directories[index].strip(),
                             'model_normalized.obj'))
            viewer.set_mesh(mesh, center_and_scale=True)
            viewer.model_color = dataset.get_color(labels[i])
            image = viewer.get_image(crop=True, output_size=128)
            box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'),
                                 (x[i], y[i]),
                                 frameon=True)
            ax.add_artist(box)

    print("Saving PDF...")

    extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
    plt.savefig(filename, bbox_inches=extent, dpi=200)
Ejemplo n.º 10
0
    for i in range(COUNT):
        plot.set_voxels(voxels[i, :, :, :], i, 0, color=dataset.get_color(i))
        plot.set_voxels(reconstructed_vae[i, :, :, :], i, 1)

    plot.save("plots/vae-reconstruction-classes.pdf")

if "autodecoder-classes" in sys.argv:
    from dataset import dataset as dataset
    dataset.load_labels(device='cpu')
    from rendering.raymarching import render_image
    from rendering import MeshRenderer
    import logging
    logging.getLogger('trimesh').setLevel(1000000)

    viewer = MeshRenderer(start_thread=False)

    COUNT = dataset.label_count

    sdf_net, latent_codes = load_sdf_net(return_latent_codes=True)
    indices = []
    for label in range(COUNT):
        objects = (dataset.labels == label).nonzero()
        indices.append(objects[random.randint(0, objects.shape[0] - 1)].item())

    latent_codes = latent_codes[indices, :]

    plot = ImageGrid(COUNT, 2, create_viewer=False)
    dataset_directories = directories = open('data/models.txt',
                                             'r').readlines()
Ejemplo n.º 11
0
import torch
import time
import numpy as np
import sys

from rendering import MeshRenderer
from model.gan import Generator, LATENT_CODE_SIZE
from util import device, standard_normal_distribution

generator = Generator()
if "wgan" in sys.argv:
    generator.filename = "wgan-generator.to"
generator.load()
generator.eval()

viewer = MeshRenderer()

STEPS = 20

TRANSITION_TIME = 0.4
WAIT_TIME = 0.8


def get_random():
    return standard_normal_distribution.sample(
        sample_shape=(LATENT_CODE_SIZE, )).to(device)


previous_model = None
next_model = get_random()
Ejemplo n.º 12
0
import random
import numpy as np
import sys

from rendering import MeshRenderer
from model.autoencoder import Autoencoder, LATENT_CODE_SIZE
from util import device
from datasets import VoxelDataset

dataset = VoxelDataset.glob('data/chairs/voxels_32/**.npy')

autoencoder = Autoencoder(is_variational='classic' not in sys.argv)
autoencoder.load()
autoencoder.eval()

viewer = MeshRenderer()

STEPS = 40

SHAPE = (LATENT_CODE_SIZE, )

TRANSITION_TIME = 1.2
WAIT_TIME = 1.2

SAMPLE_FROM_LATENT_DISTRIBUTION = 'sample' in sys.argv


def get_latent_distribution():
    print("Calculating latent distribution...")
    indices = random.sample(list(range(len(dataset))), min(1000, len(dataset)))
    voxels = torch.stack([dataset[i] for i in indices]).to(device)