Beispiel #1
0
 def __init__(self, scale=4, num_res_blocks=32, pretrained_weights=None, name=None):
     self.scale = scale
     self.num_res_blocks = num_res_blocks
     self.model = wdsr_b(scale=scale, num_res_blocks=num_res_blocks)
     self.model.compile(optimizer=AdamWithWeightsNormalization(lr=0.001), \
                        loss=self.mae, metrics=[self.psnr])
     if pretrained_weights != None:
         self.model.load_weights(pretrained_weights)
         print("[OK] weights loaded.")
         pass
     self.data_loader = DataLoader(scale=scale, crop_size=256)
     self.pretrained_weights = pretrained_weights
     self.default_weights_save_path = '/content/Anime-Super-Resolution/weights/wdsr-b-' + \
     str(self.num_res_blocks) + '-x' + str(self.scale) + '.h5'
     self.name = name
     pass
Beispiel #2
0
 def __init__(self,
              scale=4,
              num_res_blocks=32,
              pretrained_weights=None,
              name=None):
     # def __init__(self, scale=4, num_res_blocks=48, pretrained_weights=None, name=None):
     self.scale = scale
     self.num_res_blocks = num_res_blocks
     self.model = wdsr_b(scale=scale, num_res_blocks=num_res_blocks)
     self.model.compile(optimizer=AdamWithWeightsNormalization(lr=0.0001),
                        loss=self.mae,
                        metrics=[self.psnr])
     # self.model.compile(optimizer=AdamWithWeightsNormalization(lr=0.0001), loss=self.ssim_loss, metrics=[self.psnr])
     # self.model.compile(optimizer=AdamWithWeightsNormalization(lr=0.001), loss=self.psnr_loss, metrics=[self.psnr])
     if pretrained_weights != None:
         self.model.load_weights(pretrained_weights)
         print("[OK] weights loaded.")
         pass
     self.data_loader = DataLoader(scale=scale, crop_size=1024)
     self.pretrained_weights = pretrained_weights
     self.default_weights_save_path = 'experiments/ckpt/wdsr-b-' + \
     str(self.num_res_blocks) + '-x' + str(self.scale) + '.h5'
     self.name = name
     pass
"""

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from keras import backend as K
from keras.losses import mean_absolute_error, mean_squared_error
from keras.models import load_model
from keras.optimizers import Adam
import random
import os
from model import wdsr_a, wdsr_b
from utils import DataLoader

model = wdsr_b(scale=4, num_res_blocks=32)
model.load_weights('./weights/wdsr-b-32-x4.h5')

data_loader = DataLoader(scale=4)


def evaluate_test(model,
                  setpath='datasets/train',
                  difficulty='easy',
                  name='evaluate'):
    images = data_loader.search(setpath)
    image = random.choice(images)
    hr = data_loader.imread(image)
    resize = (hr.size[0] // data_loader.scale, hr.size[1] // data_loader.scale)
    hidden_scale = random.uniform(1, 3)
    radius = random.uniform(1, 3)
Beispiel #4
0
def data_generator(path, batch_size=8, input_shape=96, scale=2):
    '''data generator for fit_generator'''
    fns = os.listdir(path)
    n = len(fns)
    i = 0
    while True:
        lrs, hrs = [], []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(fns)
            fn = fns[i]
            fn = os.path.join(path, fn)
            lr, hr = utils.pair(fn, input_shape, scale)
            lr = utils.normalization(lr)
            hr = utils.normalization(hr)
            lrs.append(lr)
            hrs.append(hr)
            i = (i + 1) % n
        lrs = np.array(lrs)
        hrs = np.array(hrs)
        yield lrs, hrs


model = wdsr_b()
model.compile(optimizer='adam',
              loss=mean_absolute_error, metrics=[psnr])
model.fit_generator(data_generator('./datasets/train/'),
                    steps_per_epoch=50,
                    epochs=1250)