Exemplo n.º 1
0
def ed(file_dir, outdir):

    args = parser.parse_args()
    for item in os.listdir(file_dir):
        file_dir1 = os.path.join(file_dir, item)
        outdir1 = os.path.join(outdir, item)
        if not os.path.exists(outdir1):
            os.mkdir(outdir1)
        if os.path.isdir(file_dir1):
            for item1 in os.listdir(file_dir1):
                tf.reset_default_graph()
                final_dir = os.path.join(outdir1, item1)
                image = os.path.join(file_dir1, item1)
                down_size = args.imgsize // args.scale
                network = EDSR(down_size,
                               args.layers,
                               args.featuresize,
                               scale=args.scale)

                network.resume(args.savedir)
                x = scipy.misc.imread(image)
                t0 = time.time()
                outputs = network.predict(x)
                print(time.time() - t0, 5555555555)
                if len(image) > 0:
                    scipy.misc.imsave(final_dir, outputs)
 def main(self):
     global model
     print("EDSR ==> Data loading .. ")
     loader = data.Data(self.args)
     print("EDSR ==> Check run type .. ")
     if self.args.run_type == 'train':
         train_data_loader = loader.loader_train
         test_data_loader = loader.loader_test
         print("EDSR ==> Load model .. ")
         model = EDSR.EDSR()
         print("EDSR ==> Setting optimizer .. [ ", self.args.optimizer,
               " ] , lr [ ", self.args.lr, " ] , Loss [ MSE ]")
         optimizer = optim.Adam(model.parameters(), self.args.lr)
         if self.args.cuda:
             model.cuda()
         self.train(model, optimizer, self.args.epochs, train_data_loader,
                    test_data_loader)
     elif self.args.run_type == 'test':
         print("EDSR ==> Testing .. ")
         if os.path.exists(self.args.pre_model_dir):
             if not os.path.exists(self.args.dir_data_test_lr):
                 print("EDSR ==> Fail [ Test model is not exists ]")
             else:
                 test_data_loader = loader.loader_test
                 Loaded = torch.load(self.args.pre_model_dir)
                 model.load_state_dict(Loaded)
                 if self.args.cuda:
                     model.cuda()
                 self.test(self.args, test_data_loader, model)
         else:
             print(
                 "EDSR ==> Fail [ Pretrain model directory is not exists ]")
Exemplo n.º 3
0
def main():

    if not os.path.exists(Config.checkpoint_dir):
        os.makedirs(Config.checkpoint_dir)

    with tf.Session() as sess:
        trysr = EDSR(sess,
                     image_size=Config.image_size,
                     label_size=Config.label_size,
                     batch_size=Config.batch_size,
                     c_dim=Config.c_dim,
                     checkpoint_dir=Config.checkpoint_dir,
                     scale=Config.scale,
                     feature_size=Config.feature_size,
                     scaling_factor=Config.scaling_factor)

        trysr.train(Config)
Exemplo n.º 4
0
def Test(MODEL_NAME,
         UPSCALE_FACTOR,
         is_save=False,
         IMAGE_DIR=r'data\testing_lr_images',
         TEST_MODE=True):
    if type(MODEL_NAME) is EDSR or type(MODEL_NAME) is WDSR or type(
            MODEL_NAME) is SRResnet:
        model = MODEL_NAME

    else:
        model = EDSR(UPSCALE_FACTOR).eval()
        if TEST_MODE:
            model.cuda()
            model.load_state_dict(torch.load('epochs/' + MODEL_NAME))
        else:
            model.load_state_dict(
                torch.load('epochs/' + MODEL_NAME,
                           map_location=lambda storage, loc: storage))

    print('\n----------------------------------------------------------')
    imgs = []
    with torch.no_grad():
        model.eval()
        for image_name in glob.glob(os.path.join(IMAGE_DIR, '*.*')):
            image = Image.open(image_name)
            image = ToTensor()(image).unsqueeze(0)
            if TEST_MODE:
                image = image.cuda()

            start = time.time()
            out = model(image)
            elapsed = (time.time() - start)

            out_img = ToPILImage()(torch.clip(out[0], 0, 1))
            if is_save:
                out_img.save(
                    f'data/testing_sr_images/{os.path.basename(image_name)}')

            sr_img = gpu_to_numpy(out[0], is_squeeze=False)
            imgs.append(sr_img)
            plot_hr_lr(sr_img, image)
            print('cost time: ' + str(elapsed) + 's')

    return imgs
Exemplo n.º 5
0
def get_model(model_type, scale_list, model_path):
    model_type = model_type.lower()
    if model_type == 'edsr':
        return EDSR.EDSR(scale_list, model_path)
    elif model_type == 'mdsr':
        return MDSR.MDSR( scale_list, model_path )
    elif model_type == 'newnet':
        return NewNet.NewNet( scale_list, model_path )
    else:
        print("no this model_type " + model_type)
        exit(-1)
Exemplo n.º 6
0
    def _save_model():
        model = EDSR(num_blocks=args.num_blocks, channels=args.num_channels)
        assert args.checkpoint != '', 'checkpoint need to be specified'
        model.load_weights(args.checkpoint)

        inputs = tf.zeros(shape=(1, 256, 256, 3))
        model(inputs)
        model.save(args.result_dir)
Exemplo n.º 7
0
Arquivo: test.py Projeto: Nansae/EDSR
def test(args):
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    model = EDSR(num_layers=args.layers,
                 feature_size=args.featuresize).to(device)
    model = nn.DataParallel(model, device_ids=range(
        torch.cuda.device_count())).to(device)
    model.load_state_dict(torch.load(args.savedir))
    model.eval()

    test_dataset = Patches(root=args.path, phase='train')
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batchsize)

    for data in test_loader:
        data50, data100 = data
        data50 = data50.to(device)

        _, out_imgs = model(data50)

        res = []
        for idx in range(len(out_imgs)):
            images = []
            ## input
            data50_cpu = torch.squeeze(data50[idx]).cpu()
            images.append(transforms.ToPILImage()(data50_cpu).convert("RGB"))
            ## output
            output = torch.squeeze(out_imgs[idx]).cpu()
            images.append(transforms.ToPILImage()(output).convert("RGB"))
            # origin
            data100_cpu = torch.squeeze(data100[idx])
            images.append(transforms.ToPILImage()(data100_cpu).convert("RGB"))
            print(idx, len(images))
            res.append(images)

        fig = plt.figure(figsize=(7, 8))
        rows = args.batchsize
        cols = 3

        titles = ['input', 'output', 'origin']
        axes = []
        for r in range(rows):
            for c in range(cols):
                axes.append(fig.add_subplot(rows, cols, (r * cols + c) + 1))
                subplot_title = titles[c]
                axes[-1].set_title(subplot_title)
                plt.imshow(res[r][c])
                plt.savefig('res.png', dpi=300)

        # for i in range(cols*rows):
        #     axes.append(fig.add_subplot(rows, cols, i+1))
        #     subplot_title = titles[i]
        #     axes[-1].set_title(subplot_title)
        #     plt.imshow(images[i])
        #     plt.savefig('res.png', dpi=300)

        plt.show()
Exemplo n.º 8
0
def main():
    args = loadArgu()
    data.load_dataset(args.dataset)  # get two list of train images and test images
    down_size = args.imgsize // args.scale
    network = EDSR(down_size, args.layers, args.featuresize, args.scale)
    network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize, down_size), data.get_test_set,
                        (args.imgsize, down_size))
    network.train(args.iterations, args.savedir)

    return 1
Exemplo n.º 9
0
def train(args):
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    mean_value = 0.5

    # root = 'data/train'
    train_dataset = Patches(root=args.path, phase='train')
    trainloader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=args.batchsize)

    model = EDSR(num_layers=args.layers, feature_size=args.featuresize)
    model.to(device).train()

    model = nn.DataParallel(model, device_ids=range(
        torch.cuda.device_count())).to(device)
    model.train()

    optimizer = optim.Adam(model.parameters(), lr=0.0001)

    for epoch in range(1, args.epochs + 1):
        pbar = tqdm(trainloader)
        for data in pbar:
            data50, data100 = data
            data50, data100 = data50.to(device), data100.to(device)

            optimizer.zero_grad()
            output, _ = model(data50)
            loss = F.l1_loss(output, data100 - mean_value)
            loss.backward()
            optimizer.step()
            pbar.set_description("epoch: %d train_loss: %.4f" %
                                 (epoch, loss.item()))

        if epoch % 50 == 0:
            torch.save(model.state_dict(),
                       args.savedir + '/edsr_step_{}.pth'.format(epoch))
Exemplo n.º 10
0
import data
import argparse
from model import EDSR
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="data/General-100")
parser.add_argument("--imgsize", default=100, type=int)
parser.add_argument("--scale", default=2, type=int)
parser.add_argument("--layers", default=32, type=int)
parser.add_argument("--featuresize", default=256, type=int)
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--savedir", default='saved_models')
parser.add_argument("--iterations", default=10000, type=int)
args = parser.parse_args()
#data.load_dataset(args.dataset,args.imgsize)
if args.imgsize % args.scale != 0:
    print(
        "Image size {args.imgsize} is not evenly divisible by scale {arg.scale}"
    )
    exit()
down_size = args.imgsize // args.scale
network = EDSR(down_size, args.layers, args.featuresize, args.scale)
#network.set_data_fn(data.get_batch,(args.batchsize,args.imgsize,down_size))#,data.get_test_set,(args.imgsize,down_size))
network.train(args.iterations, args.savedir)
Exemplo n.º 11
0
    os.environ['CUDA_VISIBLE_DEVICES'] = DEVICE_GPU_ID


def restore_session_from_checkpoint(sess, saver):
    checkpoint = tf.train.latest_checkpoint(TRAIN_DIR)
    if checkpoint:
        saver.restore(sess, checkpoint)
        return True
    else:
        return False


if MODEL == 'VDSR':
    model = VDSR(scale=SCALE)
else:
    model = EDSR(scale=SCALE)

data_loader = DataLoader(data_dir=TRAIN_PNG_PATH,
                         batch_size=BATCH_SIZE,
                         shuffle_num=SHUFFLE_NUM,
                         prefetch_num=PREFETCH_NUM,
                         scale=SCALE)

if DATA_LOADER_MODE == 'TFRECORD':
    if len(os.listdir(TRAIN_TFRECORD_PATH)) == 0:
        data_loader.gen_tfrecords(TRAIN_TFRECORD_PATH)
    lrs, bics, gts = data_loader.read_tfrecords(TRAIN_TFRECORD_PATH)
else:
    lrs, bics, gts = data_loader.read_pngs()

res = model(lrs, bics)
Exemplo n.º 12
0
parser.add_argument("--imgsize",default=320,type=int)
parser.add_argument("--scale",default=2,type=int)
parser.add_argument("--layers",default=16,type=int)
parser.add_argument("--featuresize",default=128,type=int)
parser.add_argument("--batchsize",default=10,type=int)
parser.add_argument("--savedir",default="saved_models")
parser.add_argument("--iterations",default=400,type=int)
parser.add_argument("--numimgs",default=5,type=int)
parser.add_argument("--outdir",default="out")
parser.add_argument("--image")
args = parser.parse_args()
if not os.path.exists(args.outdir):
	os.mkdir(args.outdir)
data.load_dataset(args.dataset)
down_size = args.imgsize//args.scale
network = EDSR(down_size,args.layers,args.featuresize,scale=args.scale)
network.resume(args.savedir)
if args.image:
	x = scipy.misc.imread(args.image)
else:
	print("No image argument given")
inputs = x
x = np.array(x)
x = x.reshape(x.shape+(1,))
outputs = network.predict(x)
shape = outputs.shape
outputs = outputs.reshape((shape[0], shape[1]))
if args.image:
	scipy.misc.imsave(args.outdir+"/input_"+args.image,inputs)
	scipy.misc.imsave(args.outdir+"/output_"+args.image,outputs)
Exemplo n.º 13
0
import data
import argparse
from model import EDSR
parser = argparse.ArgumentParser()
parser.add_argument("--dataset")
parser.add_argument("--imgsize")
parser.add_argument("--scale")
parser.add_argument("--layers")
parser.add_argument("--featuresize")
parser.add_argument("--batchsize")
args = parser.parse_args()
if args.dataset:
    dataset = args.dataset
else:
    dataset = "data/General-100"
data.load_dataset(dataset)
img_size = int(args.imgsize) if args.imgsize else 100
scale = int(args.scale) if args.scale else 2
down_size = img_size / scale
layers = int(args.layers) if args.layers else 32
feature_size = int(args.featuresize) if args.featuresize else 256
batch_size = int(args.batchsize) if args.batchsize else 10
network = EDSR(down_size, layers, feature_size, scale)
network.set_data_fn(data.get_batch, (batch_size, img_size, down_size),
                    data.get_test_set, (img_size, down_size))
network.train()
Exemplo n.º 14
0
parser.add_argument("--layers", default=32, type=int)
parser.add_argument("--featuresize", default=256, type=int)
parser.add_argument("--batchsize", default=16, type=int)  #10
parser.add_argument("--savedir", default='saved_models')
parser.add_argument("--iterations", default=1000, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--scaling_factor", default=0.5, type=float)
parser.add_argument("--load_model", default='', type=str)

args = parser.parse_args()
#get the train data
print('start loading...')
epoch_has_step = data.load_dataset(args.dataset, args.imgsize, args.batchsize)
print('load dataset complit...')

down_size = args.imgsize // args.scale

network = EDSR(down_size,
               args.layers,
               args.featuresize,
               args.scale,
               output_channels=3,
               sc_factor=args.scaling_factor)

#put the data into the batch ,4 argments
network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize, down_size),
                    data.get_test_set, (args.imgsize, down_size))

network.train(args.iterations, args.savedir, args.lr, args.load_model,
              epoch_has_step)
Exemplo n.º 15
0
def main(args):
    cfg = cfg_dict[args.cfg_name]
    writer = SummaryWriter(os.path.join("runs", args.cfg_name))
    train_loader = get_data_loader(cfg, cfg["train_dir"])

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = EDSR(cfg).to(device)
    criterion = torch.nn.L1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg["init_lr"],
                                 betas=(0.9, 0.999), eps=1e-8)

    global_batches = 0
    if args.train:
        for epoch in range(cfg["n_epoch"]):
            model.train()
            running_loss = 0.0
            for i, batch in enumerate(train_loader):
                lr, hr = batch[0].to(device), batch[1].to(device)
                optimizer.zero_grad()
                sr = model(lr)
                loss = model.loss(sr, hr)
                # loss = criterion(model(lr), hr)
                running_loss += loss.item()
                loss.backward()
                optimizer.step()
                global_batches += 1
                if global_batches % cfg["lr_decay_every"] == 0:
                    for param_group in optimizer.param_groups:
                        print(f"decay lr to {param_group['lr'] / 10}")
                        param_group["lr"] /= 10

            if epoch % args.log_every == 0:
                model.eval()
                with torch.no_grad():
                    batch_samples = {"lr": batch[0], "hr": batch[1], 
                                     "sr": sr.cpu()}
                    writer.add_scalar("training-loss", 
                                      running_loss / len(train_loader),
                                      global_step=global_batches)
                    writer.add_scalar("PSNR", compute_psnr(batch_samples), 
                                      global_step=global_batches)
                    samples = {k: v[:3] for k, v in batch_samples.items()}
                    fig = visualize_samples(samples, f"epoch-{epoch}")
                    writer.add_figure("sample-visualization", fig, 
                                      global_step=global_batches)

            if epoch % args.save_every == 0:
                state = {"net": model.state_dict(), 
                         "optim": optimizer.state_dict()}
                checkpoint_dir = args.checkpoint_dir
                if not os.path.exists(checkpoint_dir):
                    os.makedirs(checkpoint_dir)
                path = os.path.join(checkpoint_dir, args.cfg_name)
                torch.save(state, path)
    
    # eval
    if args.eval:
        assert args.model_path and args.lr_img_path
        print(f"evaluating {args.lr_img_path}")
        state = torch.load(args.model_path, map_location=device)
        model.load_state_dict(state["net"])
        optimizer.load_state_dict(state["optim"])

        with torch.no_grad():
            lr = img2tensor(args.lr_img_path)
            sr = model(lr.clone().to(device)).cpu()
            samples = {"lr": lr, "sr": sr}
            if args.hr_img_path:
                samples["hr"] = img2tensor(args.hr_img_path)
                print(f"PSNR: {compute_psnr(samples)}")
            directory = os.path.dirname(args.lr_img_path)
            name = f"eval-{args.cfg_name}-{args.lr_img_path.split('/')[-1]}"
            visualize_samples(samples, name, save=True, 
                              directory=directory, size=6)
Exemplo n.º 16
0
import data
import argparse
from model import EDSR
from DIV2K import *
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="data/General-100")
parser.add_argument("--imgsize", default=100, type=int)
parser.add_argument("--scale", default=2, type=int)
parser.add_argument("--layers", default=32, type=int)
parser.add_argument("--featuresize", default=256, type=int)
parser.add_argument("--batchsize", default=10, type=int)
parser.add_argument("--savedir", default='saved_models')
parser.add_argument("--iterations", default=1000, type=int)
args = parser.parse_args()
# data.load_dataset(args.dataset)
down_size = args.imgsize // args.scale
network = EDSR(down_size, args.layers, args.featuresize, args.scale)
network.test_data = DIV2K().get_test()
# network.set_data_fn(data.get_batch,(args.batchsize,args.imgsize,down_size),data.get_test_set,(args.imgsize,down_size))
network.train(args.iterations, args.savedir)
Exemplo n.º 17
0
parser.add_argument("--dataset", default="/Test1/6_72_0.3039480800432666.png")
parser.add_argument("--imgsize", default=300, type=int)
parser.add_argument("--scale", default=3, type=int)
parser.add_argument("--layers", default=32, type=int)
parser.add_argument("--featuresize", default=64, type=int)
parser.add_argument("--batchsize", default=32, type=int)
parser.add_argument("--savedir", default="save_model_200")
parser.add_argument("--iterations", default=1000, type=int)
parser.add_argument("--numimgs", default=5, type=int)
parser.add_argument("--outdir", default="out")
parser.add_argument("--image", default="guqinying/*_brain.nii.gz")
args = parser.parse_args()
if not os.path.exists(args.outdir):
    os.mkdir(args.outdir)
down_size = args.imgsize // args.scale
network = EDSR(down_size, args.layers, args.featuresize, scale=args.scale)
network.resume(args.savedir)
li_data = glob.glob(args.image)
x1 = sitk.ReadImage(li_data[0])
# # x = cv2.imread(args.image)
# # x=np.array(x,dtype=float)
tmp = sitk.GetArrayFromImage(x1)
# # x_trans = np.transpose(tmp, (1, 2, 0))
x_1 = tmp[0:155, 0:351, 0:351]

# input=x_1[120,:,:]

sum_ssim = 0
sum_psnr = 0
bh_sum_ssim = 0
bh_sum_psnr = 0
Exemplo n.º 18
0
def main():
    args = get_args()

    writer = SummaryWriter(args.work_dir)
    timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
    log_file = osp.join(args.work_dir, '{}.log'.format(timestamp))
    logger = get_root_logger(log_file)

    train_dataset = Dataset(dataset=args.train_dataset,
                            split='train',
                            crop_cfg=dict(type='random',
                                          patch_size=args.patch_size),
                            flip_and_rotate=True)
    val_dataset = Dataset(dataset=args.valid_dataset,
                          split='valid',
                          override_length=args.num_valids,
                          crop_cfg=None)
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    model = EDSR(num_blocks=args.num_blocks, channels=args.num_channels)
    loss_fn = tf.keras.losses.MeanAbsoluteError()
    optimizer = tf.keras.optimizers.Adam(args.learning_rate)

    best_psnr = 0

    for epoch in range(1, args.num_epochs + 1):
        losses = []
        for lr, hr in tqdm(train_loader):
            lr = tf.constant(lr, dtype=tf.float32)
            hr = tf.constant(hr, dtype=tf.float32)
            with tf.GradientTape() as tape:
                sr = model(lr)
                loss = loss_fn(hr, sr)
            gradients = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))

            losses.append(loss.numpy())
        logger.info(f'Epoch {epoch} - loss: {np.mean(losses)}')
        writer.add_scalar('loss', np.mean(losses), epoch)

        # eval
        if epoch % args.eval_freq == 0 or epoch == args.num_epochs:
            logger.info('Evaluating...')
            psnrs = []
            for i, (lr, hr) in enumerate(val_loader):
                lr = tf.constant(lr, dtype=tf.float32)
                hr = tf.constant(hr, dtype=tf.float32)
                sr = model(lr)
                cur_psnr = compute_psnr(sr, hr)
                psnrs.append(cur_psnr)
                update_tfboard(writer, i, lr, hr, sr, epoch)
            psnr = np.mean(psnrs)
            if psnr > best_psnr:
                best_psnr = psnr
            model.save_weights(osp.join(args.work_dir, f'epoch_{epoch}'))
            logger.info('psnr: {:.2f} (best={:.2f})'.format(psnr, best_psnr))
            writer.add_scalar('psnr', psnr, epoch)
            writer.flush()
Exemplo n.º 19
0
parser.add_argument("--layers",default=32,type=int)
parser.add_argument("--featuresize",default=256,type=int)
parser.add_argument("--batchsize",default=10,type=int)
parser.add_argument("--load_model",default="saved_models")
parser.add_argument("--iterations",default=1000,type=int)
parser.add_argument("--numimgs",default=5,type=int)
parser.add_argument("--outdir",default="out")
parser.add_argument("--image")
parser.add_argument("--bicubic")

args = parser.parse_args()
if not os.path.exists(args.outdir):
	os.mkdir(args.outdir)

down_size = args.imgsize//args.scale
network = EDSR(down_size,args.layers,args.featuresize,scale=args.scale)
network.resume(args.load_model)


if args.image:
	start_time = time.time()
	x = scipy.misc.imread(args.image)	
	
	#bicubic = scipy.misc.imread(args.bicubic)
	bicubic = scipy.misc.imresize(x,(x.shape[0]*args.scale,x.shape[1]*args.scale),'bicubic')
	print(x.shape,bicubic.shape)

else:
	print("No image argument given")

Exemplo n.º 20
0
from model import EDSR
import scipy.misc
import os

dataset = "data/General-100"
imgsize = 100
scale = 2
layers = 32
featuresize = 256
batchsize = 1
savedir = "saved_models"
iterations = 1000
numimgs = 5
outdir = "dataout"
image = "1.jpg"

if not os.path.exists(outdir):
    os.mkdir(outdir)
down_size = imgsize // scale
network = EDSR(down_size, layers, featuresize, scale=scale)
network.resume(savedir)
if len(image) > 0:
    x = scipy.misc.imread(image)
else:
    print("No image argument given")
inputs = x
outputs = network.predict(x)
if image:
    scipy.misc.imsave(outdir + "/input_" + image, inputs)
    scipy.misc.imsave(outdir + "/output_" + image, outputs)
Exemplo n.º 21
0
import data
import argparse
from model import EDSR
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",default="images_register")
parser.add_argument("--imgsize",default=320,type=int)
parser.add_argument("--scale",default=2,type=int)
parser.add_argument("--layers",default=16,type=int)
parser.add_argument("--featuresize",default=128,type=int)
parser.add_argument("--batchsize",default=10,type=int)
parser.add_argument("--savedir",default='saved_models')
parser.add_argument("--iterations",default=400,type=int)
args = parser.parse_args()
data.load_dataset(args.dataset)
down_size = args.imgsize//args.scale
network = EDSR(down_size,args.layers,args.featuresize,args.scale, output_channels=1)
network.set_data_fn(data.get_batch,(args.batchsize,args.imgsize,down_size),data.get_test_set,(args.imgsize,down_size))
network.train(args.iterations,args.savedir)
Exemplo n.º 22
0
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True


def loss_fn(sr, gt):
    loss = nn.MSELoss(reduction='sum')
    output = loss(sr, gt)
    return output


if __name__ == '__main__':
    set_seed(2019)  # Set seed to produce the same training results

    model = EDSR(upscale=scale)
    model = nn.DataParallel(model, device_ids=[0])
    model = model.to(device)
    model.train()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate,
                                 weight_decay=weight_decay)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=lr_step,
                                                gamma=0.1)

    writer = SummaryWriter(os.path.join(checkpoint_dir, 'tensorboard_log'),
                           flush_secs=10)
    step = 0
    dataset = DIV2K_Dataset(dataset_dir, patch_size, scale, crop_num_per_image)
Exemplo n.º 23
0
import data
import argparse
from model import EDSR
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="data/General-100")
parser.add_argument("--imgsize", default=100, type=int)
parser.add_argument("--scale", default=2, type=int)
parser.add_argument("--layers", default=32, type=int)
parser.add_argument("--featuresize", default=256, type=int)
parser.add_argument("--batchsize", default=10, type=int)
parser.add_argument("--savedir", default='saved_models')
parser.add_argument("--iterations", default=1000, type=int)
args = parser.parse_args()
data.load_dataset(args.dataset, args.imgsize)
if args.imgsize % args.scale != 0:
    # print(f"Image size {args.imgsize} is not evenly divisible by scale {arg.scale}")
    exit()
down_size = args.imgsize//args.scale
network = EDSR(down_size, args.layers, args.featuresize, args.scale)
network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize,
                                     down_size), data.get_test_set, (args.imgsize, down_size))
network.train(args.iterations, args.savedir)
Exemplo n.º 24
0
import data
import os
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",default="data/General-100")
parser.add_argument("--imgsize",default=100,type=int)
parser.add_argument("--scale",default=2,type=int)
parser.add_argument("--layers",default=32,type=int)
parser.add_argument("--featuresize",default=256,type=int)
parser.add_argument("--batchsize",default=10,type=int)
parser.add_argument("--savedir",default="saved_models")
parser.add_argument("--iterations",default=1000,type=int)
parser.add_argument("--numimgs",default=5,type=int)
parser.add_argument("--outdir",default="out")
parser.add_argument("--image")
args = parser.parse_args()
if not os.path.exists(args.outdir):
	os.mkdir(args.outdir)
data.load_dataset(args.dataset)
down_size = args.imgsize//args.scale
network = EDSR(down_size,args.layers,args.featuresize,scale=args.scale)
network.resume(args.savedir)
if args.image:
	x = scipy.misc.imread(args.image)
else:
	print("No image argument given")
inputs = x
outputs = network.predict(x)
if args.image:
	scipy.misc.imsave(args.outdir+"/input_"+args.image,inputs)
	scipy.misc.imsave(args.outdir+"/output_"+args.image,outputs)
Exemplo n.º 25
0
device_gpu_id = config['device_gpu_id']

if not os.path.exists(output_dir):
    os.makedirs(output_dir)

if device_mode == 'CPU':
    os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
    device = torch.device("cpu")
else:
    os.environ['CUDA_VISIBLE_DEVICES'] = device_gpu_id
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

with torch.no_grad():
    checkpoint = torch.load(checkpoint_path)

    model = EDSR(upscale=scale)
    model.load_state_dict(checkpoint['state_dict'])
    model = model.to(device)
    model.eval()

    fs = glob.glob(os.path.join(input_dir, input_suffix))
    psnrs = []
    for f in fs:
        img = misc.imread(f)
        lr_img = misc.imresize(img, 1.0 / scale, 'bicubic')
        bic_img = misc.imresize(lr_img, scale * 1.0, 'bicubic')
        lr_y = utils.rgb2ycbcr(lr_img)[:, :, 0]
        bic_ycbcr = utils.rgb2ycbcr(bic_img)
        bic_y = bic_ycbcr[:, :, 0]

        lr_y = torch.from_numpy(lr_y).unsqueeze(0).unsqueeze(0).float().to(