def show(self): from rendering import MeshRenderer import time from tqdm import tqdm viewer = MeshRenderer() for item in tqdm(self): viewer.set_voxels(item.numpy()) time.sleep(0.5)
class ImageGrid(): def __init__(self, width, height=1, cell_width=3, cell_height=None, margin=0.2, create_viewer=True, crop=True): print("Plotting...") self.width = width self.height = height cell_height = cell_height if cell_height is not None else cell_width self.figure, self.axes = plt.subplots(height, width, figsize=(width * cell_width, height * cell_height), gridspec_kw={ 'left': 0, 'right': 1, 'top': 1, 'bottom': 0, 'wspace': margin, 'hspace': margin }) self.figure.patch.set_visible(False) self.crop = crop if create_viewer: from rendering import MeshRenderer self.viewer = MeshRenderer(start_thread=False) else: self.viewer = None def set_image(self, image, x=0, y=0): cell = self.axes[ y, x] if self.height > 1 and self.width > 1 else self.axes[x + y] cell.imshow(image) cell.axis('off') cell.patch.set_visible(False) def set_voxels(self, voxels, x=0, y=0, color=None): if color is not None: self.viewer.model_color = color self.viewer.set_voxels(voxels) image = self.viewer.get_image(crop=self.crop) self.set_image(image, x, y) def save(self, filename): plt.axis('off') extent = self.figure.get_window_extent().transformed( self.figure.dpi_scale_trans.inverted()) plt.savefig(filename, bbox_inches=extent, dpi=400) if self.viewer is not None: self.viewer.delete_buffers()
def create_tsne_plot(codes, voxels=None, labels=None, filename="plot.pdf", indices=None): from sklearn.manifold import TSNE from matplotlib.offsetbox import OffsetImage, AnnotationBbox width, height = 40, 52 print("Calculating t-sne embedding...") tsne = TSNE(n_components=2) embedded = tsne.fit_transform(codes) print("Plotting...") fig, ax = plt.subplots() plt.axis('off') margin = 0.0128 plt.margins(margin * height / width, margin) x = embedded[:, 0] y = embedded[:, 1] x = np.interp(x, (x.min(), x.max()), (0, 1)) y = np.interp(y, (y.min(), y.max()), (0, 1)) ax.scatter(x, y, c=labels, s=40, cmap='Set1') fig.set_size_inches(width, height) if voxels is not None: print("Creating images...") from rendering import MeshRenderer viewer = MeshRenderer(start_thread=False) for i in tqdm(range(voxels.shape[0])): viewer.set_voxels(voxels[i, :, :, :].cpu().numpy()) viewer.model_color = dataset.get_color(labels[i]) image = viewer.get_image(crop=True, output_size=128) box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'), (x[i], y[i]), frameon=True) ax.add_artist(box) if indices is not None: print("Creating images...") dataset_directories = open('data/models.txt', 'r').readlines() from rendering import MeshRenderer viewer = MeshRenderer(start_thread=False) import trimesh import logging logging.getLogger('trimesh').setLevel(1000000) for i in tqdm(range(len(indices))): mesh = trimesh.load( os.path.join(dataset_directories[index].strip(), 'model_normalized.obj')) viewer.set_mesh(mesh, center_and_scale=True) viewer.model_color = dataset.get_color(labels[i]) image = viewer.get_image(crop=True, output_size=128) box = AnnotationBbox(OffsetImage(image, zoom=0.5, cmap='gray'), (x[i], y[i]), frameon=True) ax.add_artist(box) print("Saving PDF...") extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) plt.savefig(filename, bbox_inches=extent, dpi=200)
from rendering import MeshRenderer viewer = MeshRenderer(start_thread=False) indices = random.sample(list(range(dataset.size)), 20) voxels = dataset.voxels[indices, :, :, :] autoencoder = load_autoencoder() print("Generating codes...") with torch.no_grad(): codes = autoencoder.encode(voxels) reconstructed = autoencoder.decode(codes).cpu().numpy() codes = codes.cpu().numpy() print("Plotting...") fig, axs = plt.subplots(len(indices), 3, figsize=(10, 32)) for i in range(len(indices)): viewer.set_voxels(voxels[i, :, :, :].cpu().numpy()) image = viewer.get_image(output_size=512) axs[i, 0].imshow(image, cmap='gray') axs[i, 0].axis('off') axs[i, 1].bar(range(codes.shape[1]), codes[i, :]) axs[i, 1].set_ylim((-3, 3)) viewer.set_voxels(reconstructed[i, :, :, :]) image = viewer.get_image(output_size=512) axs[i, 2].imshow(image, cmap='gray') axs[i, 2].axis('off') plt.savefig("plots/autoencoder-examples.pdf", bbox_inches='tight', dpi=400) if "autoencoder_examples_2" in sys.argv: from dataset import dataset as dataset
def get_random(): return standard_normal_distribution.sample( sample_shape=(LATENT_CODE_SIZE, )).to(device) previous_model = None next_model = get_random() for epoch in count(): try: previous_model = next_model next_model = get_random() for step in range(STEPS + 1): progress = step / STEPS model = None if step < STEPS: model = previous_model * (1 - progress) + next_model * progress else: model = next_model viewer.set_voxels(generator(model).squeeze().detach().cpu()) time.sleep(TRANSITION_TIME / STEPS) time.sleep(WAIT_TIME) except KeyboardInterrupt: viewer.stop() break
def get_random(): if SAMPLE_FROM_LATENT_DISTRIBUTION: return latent_distribution.sample(sample_shape=SHAPE).to(device) else: index = random.randint(0, len(dataset) - 1) return autoencoder.encode(dataset[index].to(device)) previous_model = None next_model = get_random() for epoch in count(): try: previous_model = next_model next_model = get_random() start = time.perf_counter() end = start + TRANSITION_TIME while time.perf_counter() < end: progress = min((time.perf_counter() - start) / TRANSITION_TIME, 1.0) model = previous_model * (1 - progress) + next_model * progress voxels = autoencoder.decode(model).detach().cpu() viewer.set_voxels(voxels) time.sleep(WAIT_TIME) except KeyboardInterrupt: viewer.stop() break