Пример #1
0
def main(_):
    with tf.Session() as sess:
        rdn = RDN(sess,
                  image_size=FLAGS.image_size,
                  is_train=FLAGS.is_train,
                  scale=FLAGS.scale,
                  c_dim=FLAGS.c_dim,
                  batch_size=FLAGS.batch_size,
                  test_img=FLAGS.test_img,
                  D=FLAGS.D,
                  C=FLAGS.C,
                  G=FLAGS.G,
                  G0=FLAGS.G0,
                  kernel_size=FLAGS.kernel_size)

        rdn.train(FLAGS)
Пример #2
0
def main(_):
    rdn = RDN(tf.Session(),
              is_train=FLAGS.is_train,
              is_eval=FLAGS.is_eval,
              image_size=FLAGS.image_size,
              c_dim=FLAGS.c_dim,
              scale=FLAGS.scale,
              batch_size=FLAGS.batch_size,
              D=FLAGS.D,
              C=FLAGS.C,
              G=FLAGS.G,
              G0=FLAGS.G0,
              kernel_size=FLAGS.kernel_size)

    if rdn.is_train:
        rdn.train(FLAGS)
    else:
        if rdn.is_eval:
            rdn.eval(FLAGS)
        else:
            rdn.test(FLAGS)
Пример #3
0
'''
输出预测图像并存起来
'''
from model import RDN
import skimage.io
import glob
import os
import numpy as np
from train import modelsavedir
if __name__ == '__main__':
    #xdir='../Data/Set5_test_LR'
    xdir = '../Data/Urban_test_LR'
    xlist = glob.glob(os.path.join(xdir, '*.png'))
    myRDN = RDN()
    #save_dir='./result'
    save_dir = './result2'
    modellist = glob.glob(modelsavedir + '*RDN*.hdf5')
    if len(modellist) > 0:
        modellist.sort(key=lambda x: float(x[len(modelsavedir) + 9:len(
            modelsavedir) + 13]))
        model = myRDN.load_weight(modellist[0])
        print('载入', modellist[0])
    else:
        model = myRDN.load_weight()
    # 读取图像
    for imgname in xlist:
        print(imgname)
        img = skimage.io.imread(imgname)
        Y = model.predict(np.array([img]), 1)[0]
        Y = np.clip(Y, 0, 255)
        Y = Y.astype(np.uint8)
Пример #4
0
parser.add_argument('--crop_size', default=88, type=int, help='training images crop size')
parser.add_argument('--upscale_factor', default=2, type=int, choices=[2, 4, 8],
                    help='super resolution upscale factor')
parser.add_argument('--num_epochs', default=100, type=int, help='train epoch number')


if __name__ == '__main__':
    opt = parser.parse_args()
    
    CROP_SIZE = opt.crop_size
    UPSCALE_FACTOR = opt.upscale_factor
    NUM_EPOCHS = opt.num_epochs
    
    train_set = TrainDatasetFromFolder('VOC2012/train', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)
    train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=32, shuffle=True)
    netG = RDN(scale_factor=UPSCALE_FACTOR,num_channels=3,num_features=64,growth_rate=16,num_blocks=4,num_layers=6,dual_attention=True)
    netD = Discriminator()
    generator_criterion = GeneratorLoss()
    
    if torch.cuda.is_available():
        netG.cuda()
        netD.cuda()
        generator_criterion.cuda()
    
    optimizerG = optim.Adam(netG.parameters())
    optimizerD = optim.Adam(netD.parameters())
    
    results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': []}
    
    for epoch in range(1, NUM_EPOCHS + 1):
        train_bar = tqdm(train_loader)
Пример #5
0
init_lr = 1e-4


def scheduler(epoch):
    # lr = K.get_value(myrdn.model.optimizer.lr)
    lr = init_lr * 0.8**(epoch // 50)
    return lr


def gen(x, y):
    while True:
        yield (next(x), next(y))


if __name__ == '__main__':
    myrdn = RDN()
    lr_decay = LearningRateScheduler(scheduler, verbose=1)
    tfboard = TensorBoard()
    modelcp = ModelCheckpoint(modelsavedir +
                              '{epoch:04d}-RDN-{val_loss:.2f}-weights.hdf5',
                              verbose=1,
                              period=1,
                              save_weights_only=True,
                              save_best_only=True)
    gen_tx, gen_ty, gen_vx, gen_vy = get_train_data(Batch_size)
    train_gen = gen(gen_tx, gen_ty)
    valid_gen = gen(gen_vx, gen_vy)
    myrdn.setting_train()

    # 载入之前的模型
    modellist = glob.glob(modelsavedir + '*RDN*.hdf5')
Пример #6
0
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",default="data/test5")
parser.add_argument("--imgsize",default=128,type=int)
parser.add_argument("--scale",default=4,type=int)
parser.add_argument("--globallayers",default=16,type=int)
parser.add_argument("--locallayers",default=8,type=int)
parser.add_argument("--featuresize",default=64,type=int)
parser.add_argument("--savedir",default="saved_models")
parser.add_argument("--outdir",default="out")
parser.add_argument("--image")

def predict(x, image):
    outputs = network.predict(x)
    scipy.misc.imsave(args.outdir+"/input_"+image,x)
    scipy.misc.imsave(args.outdir+"/output_"+image,outputs[0])

args = parser.parse_args()
if not os.path.exists(args.outdir):
    os.mkdir(args.outdir)
down_size = args.imgsize//args.scale
network = RDN(down_size,args.globallayers,args.locallayers,args.featuresize,scale=args.scale)
network.resume(args.savedir)
if args.image:
    x = Image.open(args.image).convert('RGB')
    predict(np.array(x), os.path.basename(args.image))
else:
    for filename in os.listdir(args.dataset):
        x = Image.open(args.dataset+'/'+filename).convert('RGB')
        predict(np.array(x), filename)

Пример #7
0
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate, GaussianNoise, Lambda, ConvLSTM2D, Bidirectional
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Add
from keras.layers.advanced_activations import PReLU, LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.applications import VGG19
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.models import load_model
from keras.losses import mean_squared_error, mean_absolute_error
from keras.initializers import RandomNormal
import keras.backend as K
import datetime
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import sys
from data_loader import DataLoader
import h5py
from model import RDN
from glob import glob
if __name__ == '__main__':
    test = ['test_x3', 'test_x2', 'test_x4', 'test_x3_BN', 'test_x3_DN']
    for j in range(17, 18):
        rdn = RDN(load=1, rfi=j, lfi=j)
        paths = glob(
            '/scratch/gilbreth/li3120/dataset/DIV2K_train_HR/Test/*/%s.h5' %
            (test[j - 16]))
        for path in paths:
            rdn.predict_process(path=path)
        print(path)
        del rdn
Пример #8
0
import data
import argparse
from model import RDN
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="data/General-100")
parser.add_argument("--testset", default="")
parser.add_argument("--imgsize", default=128, type=int)
parser.add_argument("--scale", default=4, type=int)
parser.add_argument("--globallayers", default=16, type=int)
parser.add_argument("--locallayers", default=8, type=int)
parser.add_argument("--featuresize", default=64, type=int)
parser.add_argument("--batchsize", default=16, type=int)
parser.add_argument("--savedir", default='saved_models')
parser.add_argument("--iterations", default=10000, type=int)
parser.add_argument("--usepre", default=0, type=int)
args = parser.parse_args()
data.load_dataset(args.dataset, args.testset, args.imgsize)
down_size = args.imgsize // args.scale
network = RDN(down_size, args.globallayers, args.locallayers, args.featuresize,
              args.scale)
network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize, down_size),
                    data.get_test_set, (args.imgsize, args.scale))
network.train(args.iterations, args.savedir, args.usepre)
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
from keras.preprocessing import image
import matplotlib.pyplot as plt
from PIL import Image
import os
import time
import threading
import random
import cv2
import time
from model import RDN
datagen = ImageDataGenerator(horizontal_flip=True, vertical_flip=True)
myrdn = RDN()
scale = myrdn.scale  # 缩放倍数
LR_shape = 64
HR_shape = LR_shape * scale
train, test = True, False
n = 100  #测试代码用到参数,全部用None

if bicubic:
    train_x_dir = './DIV2K_train_LR_bicubic/X2'  #tx
    valid_x_dir = './DIV2K_valid_LR_bicubic/X2'  #vx
else:
    valid_x_dir = './DIV2K_valid_LR_unknown/X2'  #vx1
    train_x_dir = './DIV2K_train_LR_unknown/X2'  #tx1

train_y_dir = './DIV2K_train_HR'  #ty ty1
valid_y_dir = './DIV2K_valid_HR'  #vy vy1