import imageio
import numpy as np
import sys
import matplotlib.pyplot as plt
from datetime import datetime

from load_dataset import load_train_patch, load_val_data
from model import PUNET
import utils
import vgg

# Processing command arguments
dataset_dir, model_dir, result_dir, vgg_dir, dslr_dir, phone_dir,\
    arch, LEVEL, inst_norm, num_maps_base, restore_iter, patch_w, patch_h,\
        batch_size, train_size, learning_rate, eval_step, num_train_iters, save_mid_imgs = \
            utils.process_command_args(sys.argv)

# Defining the size of the input and target image patches
PATCH_WIDTH, PATCH_HEIGHT = patch_w//2, patch_h//2

DSLR_SCALE = float(1) / (2 ** (max(LEVEL,0) - 1))
TARGET_WIDTH = int(PATCH_WIDTH * DSLR_SCALE)
TARGET_HEIGHT = int(PATCH_HEIGHT * DSLR_SCALE)
TARGET_DEPTH = 3
TARGET_SIZE = TARGET_WIDTH * TARGET_HEIGHT * TARGET_DEPTH

np.random.seed(0)

# Defining the model architecture
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
    time_start = datetime.now()
예제 #2
0
from torch.autograd import Variable
import numpy
from load_data import *
import loss

# defining size of the training image patches

PATCH_WIDTH = 100
PATCH_HEIGHT = 100
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3

# processing command arguments

phone, batch_size, train_size, learning_rate, num_train_iters, \
w_content, w_color, w_texture, w_tv, w_ssim,\
dped_dir, vgg_dir, eval_step = lutils.process_command_args(sys.argv)

#np.random.seed(0)

# loading training and test data

print("Loading test data...")
test_data, test_answ = load_test_data(phone, dped_dir, PATCH_SIZE)
print("Test data was loaded\n")

print("Loading training data...")
train_data = trainset(dped_dir + '/iphone/', dped_dir + '/canon/')
#train_data  = trainset(dped_dir + '/train/raw/', dped_dir + '/train/target/')
trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
print("Training data was loaded\n")
# <resolution> = {orig, high, medium, small, tiny}
# <use_gpu> = {true, false}

from scipy import misc
import numpy as np
import tensorflow as tf
from model import resnet
import utils
import os
import sys
from PIL import Image
import pywt
import cv2

# process command arguments
phone, resolution, use_gpu = utils.process_command_args(sys.argv)

# get all available image resolutions
res_sizes = utils.get_resolutions()

# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(
    res_sizes, phone, resolution)

# disable gpu if specified
config = tf.ConfigProto(
    device_count={'GPU': 0}) if use_gpu == "false" else None

# create placeholders for input images
x_ = tf.compat.v1.placeholder(tf.float32, [None, IMAGE_SIZE])
x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
예제 #4
0
import sys

from load_data import LoadData, LoadVisualData
from msssim import MSSSIM
from model import PyNET
from vgg import vgg_19
from utils import normalize_batch, process_command_args

to_image = transforms.Compose([transforms.ToPILImage()])

np.random.seed(0)
torch.manual_seed(0)

# Processing command arguments

level, batch_size, learning_rate, restore_epoch, num_train_epochs, dataset_dir = process_command_args(
    sys.argv)
dslr_scale = float(1) / (2**(level - 1))

# Dataset size

TRAIN_SIZE = 46839
TEST_SIZE = 1204


def train_model():

    torch.backends.cudnn.deterministic = True
    device = torch.device("cuda")

    print("CUDA visible devices: " + str(torch.cuda.device_count()))
    print("CUDA Device Name: " + str(torch.cuda.get_device_name(device)))
예제 #5
0
import vgg
from load_dataset import load_test_data, load_batch
from ssim import MultiScaleSSIM

# defining size of the training image patches

PATCH_WIDTH = 100
PATCH_HEIGHT = 100
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3

# processing command arguments

phone, batch_size, train_size, learning_rate, num_train_iters, \
w_content, w_color, w_texture, w_tv, \
dped_dir, vgg_dir, eval_step, run, kernel_size, depth, blocks, \
parametric, s_conv, convdeconv = utils.process_command_args(sys.argv)

dirname = phone + "_" + run
if run == "":
    dirname = phone

np.random.seed(0)

# loading training and test data

print("Loading testing data...")
test_data, test_answ = load_test_data(phone, dped_dir, PATCH_SIZE)

print("Loading training data...")
train_data, train_answ = load_batch(phone, dped_dir, train_size, PATCH_SIZE)
예제 #6
0
import sys

from load_dataset import load_test_data, load_batch
import models
import utils

# defining size of the training image patches

PATCH_WIDTH = 360
PATCH_HEIGHT = 240
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3

# processing command arguments

batch_size, train_size, learning_rate, num_train_iters, \
w_color, dped_dir, eval_step = utils.process_command_args(sys.argv)

np.random.seed(0)

# loading training and test data

print("Loading test data...")
test_data, test_answ = load_test_data(PATCH_SIZE)
print("Test data was loaded\n")

print("Loading training data...")
train_data, train_answ = load_batch(dped_dir, train_size, PATCH_SIZE)
print("Training data was loaded\n")

TEST_SIZE = test_data.shape[0]
num_test_batches = int(test_data.shape[0] / batch_size)
예제 #7
0
from ssim import MultiScaleSSIM
import models
import utils
import vgg
from lpips.lpips_tf import lpips
# defining size of the training image patches

PATCH_WIDTH = 100
PATCH_HEIGHT = 100
PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3

# processing command arguments

phone, batch_size, train_size, starter_learning_rate, num_train_iters, \
w_content, w_color, w_gray, w_gradient, w_tv, w_laplacian, \
dped_dir, vgg_dir, eval_step, log_step, name = utils.process_command_args(sys.argv)

np.random.seed(0)

# loading training and test data

print("Loading test data...")
test_data, test_answ = load_test_data(phone, dped_dir, PATCH_SIZE)
print("Test data was loaded\n")

print("Loading training data...")
train_data, train_answ, num_of_image = load_batch(phone, dped_dir, train_size, PATCH_SIZE)
print("Training data was loaded\n")

print("Loading validation data...")
valid_data, valid_answ = load_valid_data(phone, dped_dir, PATCH_SIZE, num_of_image)