Exemple #1
0
 def __init__(self, network = default_values['g_network'], model_path = None,
              device = 'cuda:0', weights=default_values['weights'], activation='PReLU', funit=32,
              beta1=default_values['beta1'], lr=default_values['lr'], printer=None, compute_SSIM_anyway=False,
              save_dict=True, patience=default_values['patience'], debug_options=[]):
     Model.__init__(self, save_dict, device, printer, debug_options=[])
     self.weights = weights
     if weights['SSIM'] > 0 or compute_SSIM_anyway:
         self.criterion_SSIM = pytorch_ssim.SSIM().to(device)
     if weights['L1'] > 0:
         self.criterion_L1 = nn.L1Loss().to(device)
     if weights['D1'] > 0:
         self.criterion_D = nn.MSELoss().to(device)
     if weights['D2'] > 0:
         self.criterion_D2 = nn.MSELoss().to(device)
     self.model = self.instantiate_model(model_path=model_path, network=network, pfun=self.print, device=device, funit=funit, keyword='generator')
     self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(beta1, 0.999))
     self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=0.75, verbose=True, threshold=1e-8, patience=patience)
     self.device = device
     self.loss = {'SSIM': 1, 'L1': 1, 'D': 1, 'weighted': 1}
     self.compute_SSIM_anyway = compute_SSIM_anyway
Exemple #2
0
     print('resuming by loading epoch %03d' % initial_epoch)
     # model.load_state_dict(torch.load(os.path.join(save_dir, 'model_%03d.pth' % initial_epoch)))
     model = torch.load(
         os.path.join(save_dir, 'model_%03d.pth' % initial_epoch))
 if args.load_g_state_dict_path:
     model.load_state_dict(torch.load(args.load_g_state_dict_path))
 #elif args.model != 'DnCNN':
 #    model.apply(nnModules.init_weights)
 model.train()
 # Loss function
 #if args.lossf == 'MSSSIM':
 #    criterion = pytorch_msssim.MSSSIM(channel=3)
 #elif args.lossf == 'MSSSIMandMSE':
 #    criterion = pytorch_msssim.MSSSIMandMSE()
 if args.lossf == 'SSIM':
     criterion = pytorch_ssim.SSIM()
 elif args.lossf == 'MSE':
     criterion = torch.nn.MSELoss()
 else:
     exit('Error: requested loss function ' + args.lossf +
          ' has not been implemented.')
 if cuda:
     model = model.cuda()
     criterion = criterion.cuda()
 else:
     print("Warning: running on CPU is not sane")
 # Dataset
 #TODO replace num_workers
 DDataset = DenoisingDataset(train_data,
                             compressionmin=args.compressionmin,
                             compressionmax=args.compressionmax,
Exemple #3
0
# Loss utils used by denoise_dir.py
import torch
import torchvision
import os
from PIL import Image
from lib import pytorch_ssim
from dataset_torch_3 import sortISOs
import argparse

totensor = torchvision.transforms.ToTensor()
MSE = torch.nn.MSELoss().cuda()
SSIM = pytorch_ssim.SSIM().cuda()


def find_gt_path(denoised_fn, gt_dir):
    dsname, setdir = denoised_fn.split('_')[0:2]
    setfiles = os.listdir(os.path.join(gt_dir, setdir))
    isos = [fn.split('_')[2][:-4] for fn in setfiles]
    baseiso = sortISOs(isos)[0][0]
    baseiso_fn = dsname + '_' + setdir + '_' + baseiso + '.' + denoised_fn.split(
        '.')[-1]
    return os.path.join(gt_dir, setdir, baseiso_fn)


def files(path):
    for fn in os.listdir(path):
        if os.path.isfile(os.path.join(path, fn)) and fn != 'res.txt':
            yield fn


def gen_score(noisy_dir, gt_dir='datasets/test/ds_fs'):
Exemple #4
0
def main(opt, _run):
    cuda = opt.gpu_mode
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)
        cudnn.benchmark = True
    gpus_list = range(opt.gpus)

    # =============================#
    #   Prepare training data     #
    # =============================#
    # first use the synthesis data (from VOC 2007) to train the model, then use the LOL real data to fine tune
    print('===> Prepare training data')
    train_set = get_Low_light_training_set(upscale_factor=1, patch_size=opt.patch_size, data_augmentation=True)
    #train_set = get_training_set("datasets/LOL/train", 1, opt.patch_size, True) # uncomment it to do the fine tuning
    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize,
                                      pin_memory=True, shuffle=True, drop_last=True)
    # =============================#
    #          Build model        #
    # =============================#
    print('===> Build model')
    lighten = DLN(input_dim=3, dim=64)
    lighten = torch.nn.DataParallel(lighten)
    #lighten.load_state_dict(torch.load("DLN_journal.pth", map_location=lambda storage, loc: storage), strict=True)

    print('---------- Networks architecture -------------')
    print_network(lighten)

    print('----------------------------------------------')
    if cuda:
        lighten = lighten.cuda()

    # =============================#
    #         Loss function       #
    # =============================#
    L1_criterion = nn.L1Loss()
    TV_loss = TVLoss()
    mse_loss = torch.nn.MSELoss()
    ssim = pytorch_ssim.SSIM()
    if cuda:
        gpus_list = range(opt.gpus)
        mse_loss = mse_loss.cuda()
        L1_criterion = L1_criterion.cuda()
        TV_loss = TV_loss.cuda()
        ssim = ssim.cuda(gpus_list[0])

    # =============================#
    #         Optimizer            #
    # =============================#
    parameters = [lighten.parameters()]
    optimizer = optim.Adam(itertools.chain(*parameters), lr=opt.lr, betas=(0.9, 0.999), eps=1e-8)

    # =============================#
    #         Training             #
    # =============================#
    psnr_score, ssim_score = eval(lighten, 0)
    print(psnr_score)
    for epoch in range(opt.start_iter, opt.nEpochs + 1):
        print('===> training epoch %d' % epoch)
        epoch_loss = 0
        lighten.train()

        tStart_epoch = time.time()
        for iteration, batch in enumerate(training_data_loader, 1):
            over_Iter = epoch * len(training_data_loader) + iteration
            optimizer.zero_grad()

            LL_t, NL_t = batch[0], batch[1]
            if cuda:
                LL_t = LL_t.cuda()
                NL_t = NL_t.cuda()

            t0 = time.time()

            pred_t = lighten(LL_t)

            ssim_loss = 1 - ssim(pred_t, NL_t)
            tv_loss = TV_loss(pred_t)
            loss = ssim_loss + 0.001 * tv_loss

            loss.backward()
            optimizer.step()
            t1 = time.time()

            epoch_loss += loss

            if iteration % 10 == 0:
                print("Epoch: %d/%d || Iter: %d/%d " % (epoch, opt.nEpochs, iteration, len(training_data_loader)),
                      end=" ==> ")
                logs = {
                    "loss": loss.data,
                    "ssim_loss": ssim_loss.data,
                    "tv_loss": tv_loss.data,
                }
                log_metrics(_run, logs, over_Iter)
                print("time: {:.4f} s".format(t1 - t0))

        print("===> Epoch {} Complete: Avg. Loss: {:.4f}; ==> {:.2f} seconds".format(epoch, epoch_loss / len(
            training_data_loader), time.time() - tStart_epoch))
        _run.log_scalar("epoch_loss", float(epoch_loss / len(training_data_loader)), epoch)

        if epoch % (opt.snapshots) == 0:
            file_checkpoint = checkpoint(lighten, epoch, opt)
            exp.add_artifact(file_checkpoint)

            psnr_score, ssim_score = eval(lighten, epoch)
            logs = {
                "psnr": psnr_score,
                "ssim": ssim_score,
            }
            log_metrics(_run, logs, epoch, end_str="\n")

        if (epoch + 1) % (opt.nEpochs * 2 / 3) == 0:
            for param_group in optimizer.param_groups:
                param_group['lr'] /= 10.0
            print('G: Learning rate decay: lr={}'.format(optimizer.param_groups[0]['lr']))
else:
    net_d = define_D(D_n_layers, args.ndf, args.netD, gpu_id=device, out_activation=dout_activation, finalpool=args.finalpool, funit=args.funit_D)

if args.weight_L1_0 > 0 or weight_L1_1 > 0:
    use_L1 = True
    criterionL1 = nn.L1Loss().to(device)
else:
    use_L1 = False

# load state dic for compatibility
if args.load_g_state_dict_path:
    net_g.load_state_dict(torch.load(args.load_g_state_dict_path))
if args.load_d_state_dict_path:
    net_d.load_state_dict(torch.load(args.load_d_state_dict_path))

criterionSSIM = pytorch_ssim.SSIM().to(device)
assert args.weight_ssim_0 > 0 # not implemented

# setup optimizer

optimizer_g = optim.Adam(net_g.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
net_g_scheduler = get_scheduler(optimizer_g, args, generator=True)
optimizer_d = optim.Adam(net_d.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
net_d_scheduler = get_scheduler(optimizer_d, args, generator=False)

loss_crop_lb, loss_crop_up = get_crop_boundaries(DDataset.cs, DDataset.ucs, args.model, args.netD)

use_D = False
useful_discriminator = False
generator_learns = not args.generator_waits
if not generator_learns: