예제 #1
0
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np

from autoencoder import Autoencoder, LATENT_CODE_SIZE
from config import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

from image_loader import ImageDataset
dataset = ImageDataset(quality=(1, 2))
BATCH_SIZE = 256

data_loader = DataLoader(dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=False,
                         num_workers=8)

autoencoder = Autoencoder(is_variational=USE_VARIATIONAL_AUTOENCODER)
autoencoder.load_state_dict(torch.load(AUTOENCODER_FILENAME))
autoencoder.eval()

latent_codes = np.zeros((len(dataset), LATENT_CODE_SIZE), dtype=np.float32)
position = 0

with torch.no_grad():
    for batch in tqdm(data_loader):
        current = autoencoder.encode(batch.to(device))
        latent_codes[position:position +
                     current.shape[0], :] = current.cpu().numpy()
예제 #2
0
import json

latent_codes = np.load('data/latent_codes.npy')
codes = np.load('data/latent_codes_embedded_moved.npy')
TILE_FILE_FORMAT = 'data/tiles/{:d}/{:d}/{:d}.jpg'

DEPTH_OFFSET = 8

TILE_SIZE = 256
IMAGE_SIZE = 128
TILE_DEPTH = 8
SHADOW_RADIUS = 12
SHADOW_VALUE = 0.8

from image_loader import ImageDataset
dataset = ImageDataset()

codes_by_depth = []
hashes_by_depth = []


def create_tile(depth, x, y):
    tile_file_name = TILE_FILE_FORMAT.format(depth + DEPTH_OFFSET, x, y)
    if os.path.exists(tile_file_name):
        return

    tile = Image.new("RGB", (TILE_SIZE, TILE_SIZE), (255, 255, 255))
    is_empty = True

    if depth < TILE_DEPTH:
        for a in range(2):
예제 #3
0
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from skimage import io
import numpy as np

from autoencoder import Autoencoder, LATENT_CODE_SIZE
from config import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

from image_loader import ImageDataset

dataset = ImageDataset(return_hashes=True)

SAMPLE_SIZE = 400
indices = [int(i / SAMPLE_SIZE * len(dataset)) for i in range(SAMPLE_SIZE)]
dataset.hashes = [dataset.hashes[i] for i in indices]

data_loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)

autoencoder = Autoencoder(is_variational=USE_VARIATIONAL_AUTOENCODER)
autoencoder.load_state_dict(torch.load(AUTOENCODER_FILENAME))
autoencoder.eval()

STEPS = 5

with torch.no_grad():
    for sample in tqdm(data_loader):
        image, hash = sample
        hash = hash[0]
예제 #4
0
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import utils

from tqdm import tqdm

from autoencoder import Autoencoder

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
AUTOENCODER_FILENAME = 'trained_models/autoencoder.to'

from image_loader import ImageDataset
dataset = ImageDataset(return_hashes=True)

data_loader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)

autoencoder = Autoencoder()
autoencoder.load_state_dict(torch.load(AUTOENCODER_FILENAME))
autoencoder.eval()

with torch.no_grad():
    for sample in tqdm(data_loader):
        image, hash = sample
        hash = hash[0]

        output = autoencoder.decode(
            autoencoder.encode(image.to(device).unsqueeze(0)))
예제 #5
0
from itertools import count
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
import os
from collections import deque

from autoencoder import Autoencoder
from config import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

from image_loader import ImageDataset
dataset = ImageDataset(quality=2)
BATCH_SIZE = 32

data_loader = DataLoader(dataset,
                         batch_size=BATCH_SIZE,
                         shuffle=True,
                         num_workers=4)

autoencoder = Autoencoder(is_variational=USE_VARIATIONAL_AUTOENCODER)

if os.path.exists(AUTOENCODER_FILENAME):
    print("Found autoencoder model, resuming training on existing model.")
    autoencoder.load_state_dict(torch.load(AUTOENCODER_FILENAME), strict=False)
else:
    print("Found no autoencoder model, training a new one.")